././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/0000775000175000017500000000000000000000000013463 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/.coveragerc0000664000175000017500000000013500000000000015603 0ustar00zuulzuul00000000000000[run] branch = True source = keystone omit = keystone/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/.git-blame-ignore-revs0000664000175000017500000000034000000000000017560 0ustar00zuulzuul00000000000000# You can configure git to automatically use this file with the following config: # git config --global blame.ignoreRevsFile .git-blame-ignore-revs a00839ca028304946e500cd14ddde322b6303ec8 # Blackify the keystone code base ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/.mailmap0000664000175000017500000000264300000000000015111 0ustar00zuulzuul00000000000000# Format is: # # Joe Gordon Sirish Bitra sirish.bitra Sirish Bitra sirishbitra Sirish Bitra root Zhongyue Luo Chmouel Boudjnah Zhenguo Niu ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/.pre-commit-config.yaml0000664000175000017500000000313100000000000017742 0ustar00zuulzuul00000000000000--- default_language_version: # force all unspecified python hooks to run python3 python: python3 repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.6.0 hooks: - id: trailing-whitespace - id: mixed-line-ending args: ['--fix', 'lf'] exclude: '.*\.(svg)$' - id: check-byte-order-marker - id: check-executables-have-shebangs - id: check-merge-conflict - id: debug-statements - id: check-yaml files: .*\.(yaml|yml)$ exclude: '^zuul.d/.*$' # TODO(gtema): Uncomment once content is fixed # - repo: https://github.com/PyCQA/doc8 # rev: v1.1.1 # hooks: # - id: doc8 - repo: https://github.com/asottile/pyupgrade rev: v3.15.2 hooks: - id: pyupgrade args: ['--py38-plus'] - repo: https://github.com/psf/black rev: 24.4.0 hooks: - id: black args: ['-S', '-l', '79'] - repo: https://github.com/PyCQA/bandit rev: '1.7.9' hooks: - id: bandit args: ["-x", "keystone/tests/*,devstack/*"] - repo: https://opendev.org/openstack/hacking rev: 7.0.0 hooks: - id: hacking additional_dependencies: - flake8-import-order~=0.18.2 exclude: '^(doc|releasenotes|tools|devstack)/.*$' - repo: https://github.com/pre-commit/mirrors-mypy rev: v1.9.0 hooks: - id: mypy additional_dependencies: - types-decorator - types-requests - types-setuptools exclude: | (?x)( api-ref/.* | doc/.* | releasenotes/.* ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/.stestr.conf0000664000175000017500000000014600000000000015735 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./keystone/tests/unit} top_dir=./ group_regex=.*(test_cert_setup) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/.zuul.yaml0000664000175000017500000001464700000000000015440 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - job: name: keystone-dsvm-functional parent: devstack-tempest timeout: 4200 required-projects: - openstack/keystone - openstack/keystone-tempest-plugin vars: tox_envlist: all tempest_test_regex: 'keystone_tempest_plugin' devstack_localrc: TEMPEST_PLUGINS: '/opt/stack/keystone-tempest-plugin' - job: name: keystone-dsvm-py3-functional parent: keystone-dsvm-functional vars: devstack_localrc: TEMPEST_PLUGINS: '/opt/stack/keystone-tempest-plugin' USE_PYTHON3: True - job: name: keystone-dsvm-py3-functional-fips parent: keystone-dsvm-py3-functional nodeset: devstack-single-node-centos-9-stream description: | Functional testing for a FIPS enabled Centos 9 system pre-run: playbooks/enable-fips.yaml vars: nslookup_target: 'opendev.org' - job: name: keystone-dsvm-py3-functional-federation-ubuntu-jammy parent: keystone-dsvm-functional nodeset: openstack-single-node-jammy vars: devstack_localrc: TEMPEST_PLUGINS: '/opt/stack/keystone-tempest-plugin' USE_PYTHON3: True devstack_services: keystone-saml2-federation: true tls-proxy: false devstack_plugins: keystone: https://opendev.org/openstack/keystone zuul_copy_output: /etc/shibboleth: logs /var/log/shibboleth: logs - job: name: keystone-dsvm-py3-functional-federation-ubuntu-jammy-k2k parent: keystone-dsvm-py3-functional-federation-ubuntu-jammy vars: devstack_localrc: IDP_ID: k2k # This job will execute 'tox -e upgrade' from the OSA # repo specified in 'osa_test_repo'. - job: name: openstack-ansible-keystone-rolling-upgrade parent: openstack-ansible-cross-repo-functional required-projects: - name: openstack/openstack-ansible-os_keystone vars: tox_env: upgrade osa_test_repo: openstack/openstack-ansible-os_keystone - job: name: keystone-dsvm-ldap-domain-specific-driver parent: devstack-tempest vars: devstack_localrc: KEYSTONE_CLEAR_LDAP: 'yes' LDAP_PASSWORD: 'nomoresecret' USE_PYTHON3: True devstack_services: ldap: true # Experimental - job: name: keystone-tox-patch_cover parent: openstack-tox description: | Run test for keystone project. Uses tox with the ``patch_cover`` environment. vars: tox_envlist: patch_cover # Experimental - job: name: keystone-grenade-multinode parent: grenade-multinode required-projects: - openstack/grenade - openstack/keystone vars: devstack_plugins: keystone: https://opendev.org/openstack/keystone grenade_devstack_localrc: shared: MULTI_KEYSTONE: True # Experimental - job: name: keystone-dsvm-functional-oidc-federation parent: keystone-dsvm-functional vars: devstack_localrc: TEMPEST_PLUGINS: '/opt/stack/keystone-tempest-plugin' USE_PYTHON3: True OS_CACERT: '/opt/stack/data/ca_bundle.pem' devstack_services: tls-proxy: true keystone-oidc-federation: true devstack_plugins: keystone: https://opendev.org/openstack/keystone - project: templates: - openstack-cover-jobs - openstack-python3-jobs - publish-openstack-docs-pti - periodic-stable-jobs - check-requirements - integrated-gate-py3 - release-notes-jobs-python3 - openstack-python3-jobs-arm64 check: jobs: - keystone-dsvm-py3-functional: irrelevant-files: &irrelevant-files - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^etc/.*$ - ^keystone/tests/unit/.*$ - ^releasenotes/.*$ - keystone-dsvm-py3-functional-fips: voting: false irrelevant-files: *irrelevant-files - keystone-dsvm-py3-functional-federation-ubuntu-jammy: voting: false irrelevant-files: *irrelevant-files - keystone-dsvm-py3-functional-federation-ubuntu-jammy-k2k: irrelevant-files: *irrelevant-files - keystoneclient-devstack-functional: voting: false irrelevant-files: *irrelevant-files - keystone-dsvm-ldap-domain-specific-driver: voting: false irrelevant-files: &tempest-irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^etc/.*$ - ^keystone/tests/unit/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - tempest-full-py3: irrelevant-files: *tempest-irrelevant-files - grenade: irrelevant-files: *tempest-irrelevant-files - tempest-ipv6-only: irrelevant-files: *tempest-irrelevant-files - keystone-protection-functional - codegenerator-openapi-identity-tips-with-api-ref: voting: false gate: jobs: - keystone-dsvm-py3-functional: irrelevant-files: *irrelevant-files - keystone-dsvm-py3-functional-federation-ubuntu-jammy-k2k: irrelevant-files: *irrelevant-files - tempest-full-py3: irrelevant-files: *tempest-irrelevant-files - grenade: irrelevant-files: *tempest-irrelevant-files - tempest-ipv6-only: irrelevant-files: *tempest-irrelevant-files - keystone-protection-functional experimental: jobs: - keystone-tox-patch_cover - keystone-grenade-multinode: irrelevant-files: *irrelevant-files - openstack-ansible-keystone-rolling-upgrade: irrelevant-files: *irrelevant-files - tempest-pg-full: irrelevant-files: *tempest-irrelevant-files - keystone-dsvm-functional-oidc-federation: irrelevant-files: *irrelevant-files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/AUTHORS0000664000175000017500000007070200000000000014541 0ustar00zuulzuul00000000000000Abhishek Kekane Adam Gandelman Adam Young Adam Young Ade Lee Adipudi Praveena Adrian Turjak Ajaya Agrawal Akira YOSHIYAMA Alan Pevec Alan Pevec Alberto Planas Alessio Ababilov Alessio Ababilov Alex Gaynor Alex Silva Alexander Ignatyev Alexander Makarov Alexander Maretskiy Alexandre Arents Alexey Miroshkin Alexey Stupnikov Alfredo Moralejo Allan Feid Alvaro Lopez Garcia Amy Marrich Andreas Jaeger Andreas Jaeger Andrew Bogott Andrey Andrey Kurilin Andrey Pavlov Andy Smith Angus Lees Anh Tran Ankit Agrawal Anna Sortland Annapoornima Koppad Anne Gentle Anthony Dodd Anthony Washington Anthony Young Aqsa Arica Chakraborty Arnaud Morin Artem Goncharov Artem Vasilyev Arthur Dayne Arun Kant Arvid Requate Arvind Tiwari Atsushi SAKAI Ben Nemec Ben Nemec Bence Romsics Bernhard M. Wiedemann Bhuvan Arumugam Bin Zhou Bo-Chun Lin Bob Thyne Boris Bobrov Boris Bobrov Boris Bobrov Brad Pokorny Brad Topol Brant Knudson Brian Lamar Brian Waldon Bruno Semperlotti BubaVV Béla Vancsics Cady_Chen Cao Xuan Hoang Carlos D. Garza Carlos Marin Chandan Kumar Chandan kumar Chang Bo Guo ChangBo Guo(gcb) Chaozhe.Chen Chen Li Chengwei Yang Chetna Khullar Chloe Jensen Chmouel Boudjnah Chris Jones Chris Spencer Chris Yeoh Christian Berendt Christian Berendt Christian Rohmann Christina Darretta Christoph Gysin Chuck Short Clark Boylan Clayton O'Neill Clenimar Clenimar Filemon Clenimar Filemon Clint Byrum Cole Robinson Colleen Murphy Colleen Murphy Colleen Murphy Colleen Murphy Colleen Murphy Corey Bryant Craig Jellick Cristian Sava Cristina Loma DaiHanada Dan Prince Dan Prince Dan Radez Daniel Gollub Darren Birkett Darren Shaw Davanum Srinivas Davanum Srinivas Dave Chen Dave Wilde (d34dh0r53) Dave Wilde David Hill David Höppner <0xffea@gmail.com> David Lyle David Lyle David Rabel David Ripton David Stanek Dean Troyer Deepak Garg Deepti Ramakrishna Derek Higgins Derek Yarnell Devin Carlen Diego Adolfo de Araújo Dina Belova Dirk Mueller Divya Dmitriy Bogun Dmitriy Rabotyagov Dmitry Khovyakov Dmitry Khovyakov Dolph Mathews Dominic Schlegel Doug Hellmann Doug Hellmann Dougal Matthews Douglas Mendizábal Douglas Mendizábal Dr. Jens Harbott Du Yujie Ed Leafe Edgar Magana Edmund Rhudy Eduardo Patrocinio Edward Hope-Morley Einar Forselv Elena Ezhova Elvin Tubillara Emilien Macchi Emilien Macchi Eoghan Glynn Eric Brown Eric Guo Erik Olof Gunnar Andersson Eunyoung Kim Everett Toews Ewan Mellor Eyal Fabio Giannetti Felipe Monteiro Felix Li Feng Shengqin Fernando Diaz Flavio Percoco Florent Flament ForestLee Francois Deppierraz Frank Kloeker Gabriel Hurley Gage Gage Hugo Gage Hugo George Tian Gergely Csatari Gerhard Muntingh Ghanshyam Mann Ghanshyan Mann Ghe Rivero Ghe Rivero Gordon Chung Graham Hayes Grzegorz Grasza Grzegorz Grasza Guang Yee Guang Yee Guo Shan Guo shan Gyorgy Szombathelyi Gábor Antal Ha Van Tu Haiwei Xu Han Guangyu Haneef Ali Harini Harry Rybacki Harshada Mangesh Kakad Hemanth Nakkina Hengqing Hu Henrique Truta Henry Nash Henry Nash Hervé Beraud Hidekazu Nakamura Hieu LE Hirofumi Ichihara Hiromu Asahina Hiromu Asahina Hironori Shiina Hongbin Lu Hugh Saunders Hugo Nicodemos Ian Denhardt Ian Wienand Ilya Pekelny Ionuț Arțăriși Irina Islam Musleh Iswarya_Vakati Itxaka Ivan Mironov J. Daniel Schmidt Jadon Naas Jaewoo Park Jake Yip James Carey James E. Blair James E. Blair James E. Blair James Page James Slagle Jamie Lennox Jamie Lennox Jamie Lennox Jan Provaznik Jaroslav Henner Jason Anderson Jason Cannavale Jason Cannavale Javeme Javier Pena Jay Pipes Jens Harbott Jeremy Freudberg Jeremy Liu Jeremy Stanley Jesse Andrews Jesse Keating Jesse Pretorius Ji-Wei Jianing YANG Jim Rollenhagen Jin Nan Zhang Joe Duhamel Joe Gordon Joe Heck Joe Savak Joe Savak Johannes Erdfelt John Bresnahan John Dennis John Dewey John Dickinson John Eo John Eo John Warren Jon Schlueter Jorge L. Williams Jorge Munoz Jorge Munoz Jose Castro Leon Joseph Joseph W. Breu Josh Kearney Juan Antonio Osorio Juan Antonio Osorio Robles Juan Manuel Olle Juan Pedro Torres Julian Edwards Julien Danjou Julien Danjou Justin Santa Barbara Justin Shepherd KIYOHIRO ADACHI Kahou Lei Kalaswan Datta Kam Nasim Kamil Rykowski Kanagaraj Manickam Kanami Akama Kanika Singh Keigo Noha Ken Pepple Ken Thomas Ken'ichi Ohmichi Kenny Johnston Kenny Johnston Kenny Johnston Kent Wang Kevin Benton Kevin Kirkpatrick Kevin L. Mitchell Khaled Hussein Kiall Mac Innes KnightHacker Kobi Samoray Konstantin Maximov Kristi Nikolla Kristi Nikolla Kristy Siu Krsna Widmer Kui Shi Kun Huang Kurt Taylor Kévin Bernard-Allies Lance Bragstad Lance Bragstad Lars Butler Lei Zhang Li Ma Liam Young Liang Bo Liang Chen Liem Nguyen Lin Hua Cheng Lin Hua Cheng Lin Yang LiuNanke Lu lei Luigi Toscano Luis A. Garcia Luong Anh Tuan M V P Nitesh Magnus Lööf Malini Bhandaru Mandell Degerness Marc Koderer Marcellin Fom Tchassem Marcin Wilk Marco Fargetta Marcos Lobo Marek Denis Mark Gius Mark Goddard Mark Hamzy Mark J. Washenberger Mark McClain Mark McLoughlin Markus Hentsch Markus Hentsch Martin Chacon Piza Martin Schuppert Maru Newby Mat Grove Mathew Odden Matt Fischer Matt Odden Matt Riedemann Matt Riedemann Matthew Edmonds Matthew Thode Matthew Treinish Matthew Treinish Matthieu Huin Maurice Escher Michael Basnight Michael J Fork Michael Krotscheck Michael Still Michael Tupitsyn Michał Dulko Mike Bayer Mike Chen Mike Perez Mikhail Durnosvistov Mikhail Nikolaenko Min Song Mohammed Naser Monty Taylor Morgan Fainberg Mustafa Kemal Gilor Nachiappan VR N Nam Nguyen Hoai Nathan Kinder Nathan Oyler Nathanael Burton Navid Pustchi Ngo Quoc Cuong Nguyen Hai Nguyen Hung Phuong Nguyen Phuong An Nguyen Van Trung Nikita Koltsov Nina Goradia Ning Sun Nisha Yadav Nishant Kumar Olivier Pilotte Ondřej Nový OpenStack Release Bot Pandiyan Paul Belanger Paul McMillan Paul Voccio Paulo Ewerton Pavel Sedlák Pavlo Shchelokovskyy Pedro Martins Peng Yong Pete Zaitcev Peter Feiner Peter Razumovsky Peter Sabaini Pierre-André MOREY Priti Desai Priti Desai Puneet Arora Pádraig Brady Pádraig Brady Qiaowei Ren Rabi Mishra Radosław Piliszek Rafael Durán Castañeda Rafael Weingärtner Raildo Mascena Raildo Mascena Raildo Mascena Rajesh Tailor Ralf Haferkamp Ramana Juvvadi Ravi Shekhar Jethani Ray Chen Rich Megginson Richard Avelar Rick Hull Rishabh Jain Rob Crittenden Robert Collins Robert Collins Robert H. Hyerle Robin Norwood Rodolfo Alonso Hernandez Rodolfo Alonso Hernandez Rodrigo Duarte Rodrigo Duarte Sousa Rodrigo Duarte Sousa Roman Bodnarchuk Roman Bogorodskiy Roman Verchikov Ron De Rose Ronald Bradford Ronald De Rose Rongze Zhu RongzeZhu Roxana Gherle Roxana Gherle Rudolf Vriend Russell Bryant Russell Tweed Ryan Bak Ryosuke Mizuno Sahdev Zala Sai Krishna Salvatore Orlando Sam Morrison Sami MAKKI Samriddhi Samriddhi Jain SamriddhiJain Samuel Pilla Samuel de Medeiros Queiroz Samuel de Medeiros Queiroz Sandy Walsh Sarvesh Ranjan Sascha Peilicke Sascha Peilicke Saulo Aislan Sean Dague Sean Dague Sean McGinnis Sean Perry Sean Perry Sean Winn Sergey Lukjanov Sergey Nikitin Sergey Nuzdhin Sergey Skripnick Sergey Vilgelm Shane Wang Shengjing Zhu Shevek Shoham Peller Shuayb Shuquan Huang Simo Sorce Sirish Bitra Slawek Kaplonski Slawomir Gonet Sony K. Philip Sreyansh Jain Stanisław Pitucha Stef T Stephen Finucane Stephen Finucane Steve Baker Steve Martinelli Steve Martinelli Steven Hardy Stuart Grace Stuart McLaren Suramya Shah Sushil Kumar Sven Anderson Syed Armani Sylvain Afchain THOMAS J. COCOZZELLO Tahmina Ahmed Taishi Roy Takashi Kajinami Takashi Kajinami Takashi NATSUME Taketani Ryo Telles Nobrega Theodore Ilie Thierry Carrez Thomas Bechtold Thomas Bechtold Thomas Goirand Thomas Hsiao Tim Burke Tim Kelsey Tim Simpson Timothy Symanczyk Tin Lam Tin Lam Tin Lam Tobias Urdin Todd Willey Tom Cameron Tom Cocozzello Tom Fifield Tom Fifield Tony Breeds Tony NIU Tony Wang Tovin Seven Travis Tripp Trent Lloyd Tuan Do Anh Tushar Patil Ubuntu Unmesh Gurjar Unmesh Gurjar Van Hung Pham Varun Mittal Venkatesh Sampath Victor Coutellier Victor Morales Victor Sergeyev Victor Silva Victor Stinner Vincent Hou Vincent Untz Vishakha Agarwal Vishvananda Ishaya Vivek Dhayaal Vladimir Eremin Vu Cong Tuan Werner Mendizabal Will Kelly William Kelly Wu Wenxiang XiaBing Yao Xianghui Zeng XieYingYun Xuhan Peng YaHong Du Yaguang Tang Yaguang Tang Yaguo Zhou YangLei Yejia Xu Yi Feng Yogeshwar Srikrishnan Yong Sheng Gong Yong Sheng Gong You Ji You Yamagata YuehuiLei Yuiko Takada Yun Mao Yuriy Taraday Yusuke Niimi Zhang Chun Zhang Jinnan ZhangHongtao Zhenguo Niu ZhiQiang Fan ZhiQiang Fan ZhongShengping Zhongyue Luo Ziad Sawalha abhishekkekane adriant ajayaa alatynskaya algerwang andrewbogott annegentle anusha-rayani-7 april ayoung bhavani.cr biwei boden bruceSz bsirish chenaidong1 chenwei chenxiangui chenxing chioleong damon-devops daniel-a-nguyen darren-wang dcramer deepakmourya dineshbhor ebukha erus ferag gage hugo galstrom21 gaofei gecong1973 gengchc2 gengjh ghanshyam gholt gtema guang-yee guang-yee guoshan henriquetruta hgangwx houming-wang huangtianhua jabdul jakedahn janonymous jeremy.zhang jessegler jiaqi07 jiataotj jiaxi jinxingfang jneo8 johnlinp jolie jpic jun xie kylin7-sg leekelby lhinds liangjingtao lilintan lin-hua-cheng linjiang liu-sheng liuchenhong liuhongjiang liujiong liuqing liushuobj liyanhang liyingjun long-wang ls1175 luqitao lvdongbing malei mari-linhares mathrock melanie witt melissaml monsterxx03 morgan fainberg mouad benchchaoui narnt naveenkunareddy nitin-29-gupta niuke niuke nonameentername npraveen35 pallavi pangliye pedro pengyuesheng phil-hopkins-a pmoosh prashkre qinglin.cheng r-sekine rajat29 ricolin rocky root rpedde rtmdk ruichen saikrishna saikrishna1511@gmail.com sam leong saradpatel saranjan sathish-nagappan shenjiatong sirish bitra space sunxifa sunyonggen tanlin tengqm termie venkatamahesh vishvananda wanghong wanghongtaozz wanghui wanglong wangqiangbj wangxiyuan wangzihao werner mendizabal whoami-rajat wingwj wu.shiming wudong xianming mao xiexs xingzhou xuhaigang xurong00037997 yanghuichan yangshaoxue yangweiwei yangyapeng yaroslavmt yfzhao yuhui_inspur zhang-jinnan zhang.lei zhangbailin zhangboye zhangguoqing zhanghongtao zhiguo.li zhiyuan_cai zhouxinyong zhouyunfeng zhufl ziadsawalha zlyqqq zouyee Édouard Thuleau “Fernando “Richard 翟小君 최동규/클라우드팀/NE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/CONTRIBUTING.rst0000664000175000017500000000121600000000000016124 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html If you already have a good understanding of how the system works and your OpenStack accounts are set up, you can skip to the development workflow section of this documentation to learn how changes to OpenStack should be submitted for review via the Gerrit tool: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/keystone ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867785.0 keystone-26.0.0/ChangeLog0000664000175000017500000133543300000000000015251 0ustar00zuulzuul00000000000000CHANGES ======= 26.0.0 ------ * Remove logic to support pysaml2<3.0.0 * Remove support-matrix.css * Implement the Domain Manager Persona for Keystone * Update hacking to latest version * Enable hacking check in pre-commit * Fix role statement in admin doc * Replace deprecated in py312 datetime usages * Add keystone-manage reset\_last\_active command * Correct format for token expiration time * Update OIDC Apache config to avoid masking Keystone API endpoint * Enable mypy * Enable non-voting OpenAPI build job * Re-join the strings after re-formatting * Move bandit to pre-commit * Enable pyupgrade * Enable black in pre-commit * Add blackify commit to blame ignore * Only log a small debug message for NotFound * Blackify the keystone code base * Add a release note to cover fix of implied role for application credentials * Fix implied roles in the application credentials * Fix bindep for py312 job * Add pre-commit * Replace use of testtools.testcase.TestSkipped * Remove dependency on pytz * Improve configuration of out-of-tree identity drivers * do not use str(url) to stringify a URL for subsequent use * Remove reference to devstack-gate * reno: Update master for unmaintained/zed * Make protection job voting again * Allow domain users to manage credentials * Allow domain admin to view roles * Enable protection jobs * Remove SQLAlchemy tips jobs * Allow admin to access tokens and credentials * Run Secure RBAC tests as project-admin * reno: Update master for unmaintained/xena * reno: Update master for unmaintained/wallaby * reno: Update master for unmaintained/victoria * Update master for stable/2024.1 * Add test with noauth for s3tokens and ec2tokens 25.0.0 ------ * Deprecate templated catalog driver * Update regex to detect closed branch * Add ability to create users and projects from keystone-manage * Remove unused old job templates and experimental jobs * api-ref: Fix indentation * sql: Fixup for invalid unique constraint on external\_id in access\_rule table * Drop keystone-dsvm-functional-federation-opensuse15 jobs * Fix operation order in role deletion * Fix old arm64 job template * Dont enforce when HTTP GET on s3tokens and ec2tokens * Normalize policy checks for domain-scoped tokens * Add domain scoping to list\_domains * Fix federation mapping role jsonschema * Pass initiator to delete user * reno: Update master for unmaintained/yoga * Drop unused pymongodb from requirements * tox: Drop envdir * Allow users with "admin" role to get projects * Allow assignment of domain specific role to federated users * Drop remaining references to eventlet options * Replace CRLF by LF * Fix policies for groups * Consistent and Secure RBAC (Phase 1) * Keystone to honor the "domain" attribute mapping rules * Update python classifier in setup.cfg * Improve application credential validation speed * python 3.12: use raw string * Remove babel.cfg * Imported Translations from Zanata * Propagate redirect exceptions to the client * Clean up deprecated options for eventlet server * Remove deprecated [memcache] options * Drop compatibility code for Python 2.y * Fix bindep.txt for python 3.11 job(Debian Bookworm) * Check user existence before setting last\_active\_at * Remove unnecessary shebang * fix(federation): follow-up * Stop pinning pep8 related packages * Fix typo in cmd/status.py * Update master for stable/2023.2 24.0.0 ------ * Update keystone gates to use jammy * Add default manager role support to bootstrap command * Imported Translations from Zanata * Respect cached tokens issued before upgrade * Add support for bcrypt\_sha256 hasher * Properly trimm bcrypt hashed passwords * Use py3 as the default runtime for tox * doc: Update the installtion guide for RHEL8/CentOS8 and RHEL9/CentOS9 * Imported Translations from Zanata * Add a cache to check\_revocation * doc: Correct typo * Revoke list\_events: Add trust sql filter * sql: Remove service\_provider.relay\_state\_prefix default * docs: Clarify lack of LDAP assignment back end * sql: Fix incorrect columns * doc: Add minimal documentation on generating migrations * Fix presentation of OAuth2.0 user guides * sql: Delay importing SQL modules * Add job to test with SQLAlchemy master (2.x) * db: Don't rely on branched connections * Imported Translations from Zanata * Fix typo in openid federation diagram * Add doc of OAuth2.0 Client Credentials Grant Flow * sql: Remove duplicate constraints * Fix outdated default catalog template * Add default service role support to boostrap command * Print a human readable error if tls certs are not provided * [PooledLDAPHandler] Clean up the fix for result3() * Don't forget to check if authorization fails * Remove Dependency on Cryptography >=36.0.0 * sql: Fix incorrect constraints * Update master for stable/2023.1 23.0.0 ------ * OAuth 2.0 Mutual-TLS Support * sql: Add support for auto-generation * tests: Rework BannedDBSchemaOperations fixture * Remove unnecessary removal of pyc files * db: Remove legacy migrations * db: Replace use of reverse cascades * db: Replace use of Query.get() * db: Don't pass strings to 'Connection.execute' * db: Replace use of 'autoload' parameter * db: Replace use of legacy select() calling style * db: Remove use of 'bind' arguments * tests: Enable SQLAlchemy 2.0 deprecation warnings * Bump SQLAlchemy minimum version * Force algo specific maximum length * api-ref: Correct app credentials auth response * Add oidc federation test setup * Fix passenv syntax in tox and update python jobs * [PooledLDAPHandler] Ensure result3() invokes message.clean() * requirements: Bump linter requirements * Limit token expiration to application credential expiration * Remove authenticate.failed from the notification\_opt\_out list * fix(federation): allow using numerical group names * Add an option to randomize LDAP urls list * Update master for stable/zed 22.0.0 ------ * Imported Translations from Zanata * Fix host:port handling * remove unicode prefix from code * Use TOX\_CONSTRAINTS\_FILE * Imported Translations from Zanata * Move fips job to centos-9 * docs: Update docs to reflect migration to Alembic * sql: Integrate alembic * tests: Don't monkeypatch functions * sql: Don't create a new connection in migrations * Ignore UserWarning for scope checks during test runs * tox: Don't generate byte code * OAuth2.0 Client Credentials Grant Flow Support * Change error response status code in master branch * Imported Translations from Zanata * Changed minversion in tox to 3.18.0 * Fix typo in documentation * Update python testing as per zed cycle teting runtime * Drop lower-constraints.txt and its testing * Add service\_type config info for access rules * Remove the note of training-labs * Fix delete a limit api doc * typo fix in docstring * Update TOTP example code for python 3 * Log the traceback in \_handle\_keystone\_exception * trivial: Fix typo * Update master for stable/yoga 21.0.0 ------ * Fix bindep.txt for current RPM based distributions * Fix API path in document * Add Python3 xena unit tests * Add Python3 wallaby unit tests * sql: Prepare for alembic migration * sql: Remove dead helpers * Properly instantiate FernetUtils * Fix issue with LDAP backend returning bytes instead of string * sql: Add initial Yoga migration branches * sql: Add additional changes to initial alembic migration * sql: Populate initial alembic migration * sql: Move test-only code to tests * sql: Vendor 'oslo\_db.sqlalchemy.migration' * sql: Move migrations to 'legacy\_migrations' * sql: Remove dead code * cmd: Remove deprecated '--extension' argument * sql: Add initial alembic scaffolding * sql: Reorder tables to reflect creation order * sql: Squash ussuri migrations * sql: Squash train migrations * sql: Squash stein migrations * sql: Squash rocky migrations * sql: Squash queens migrations * sql: Squash pike migrations * sql: Squash ocata migrations * sql: Squash newton migrations (part 2) * sql: Remove duplicated constants * sql: Remove 'get\_init\_version' * Change the min value of pool\_retry\_max to 1 * Add generate schemas tool * Add 'StandardLogging' fixture * sql: Rename initial migrations * sql: Remove legacy 'migrate\_repo' migration repo * sql: Fold unique constraints into table definitions * sql: Fold indexes into table defintions * sql: Squash newton migrations (part 1) * sql: Squash mitaka migrations * Add 'WarningsFixture' * sql: Squash liberty migrations * sql: Trivial formatting changes * Add support for pysaml2 >= 7.1.0 * tox: Random fixups * using standard library secrets function token\_bytes to replace os.urandom * Explicitly check policy name in policy warning tests * Deprecate ineffective [memcache] options * Fix response code of 'Revoke Token' in api-ref * Accept STS and IAM services from Ceph Obj Gateway * Fix oslo policy warning assert in unit tests * Temporary exclude the common.sql.core.py from sphinx-apidoc target * Remove broken tempest-full-py3-opensuse15 job * Fix typos in application credential policies * Fix typo in identity provider policies * Update master for stable/xena * Improve performance on trust deletion * Replace deprecated assertDictContainsSubset 20.0.0 ------ * Fix typos in ec2 credential policies * Fix oslo policy DeprecatedRule warnings * Update local\_id limit to 255 characters * Add FIPS check job * Replace deprecated import of ABCs from collections * Moving IRC network reference to OFTC * Update master for stable/wallaby * Remove use of deprecated oslo.db options * docs: Fix failing build * Make DB queries compatible with SQLAlchemy 1.4.x * fix get\_security\_compliance\_domain\_config policy rule typo * Only log warnings about token length when length exceeds max\_token\_size * setup.cfg: Replace dashes with underscores * Hide AccountLocked exception from end users * Retry update\_user when sqlalchemy raises StaleDataErrors * Imported Translations from Zanata 19.0.0.0rc1 ----------- * Add job for keystone functional protection tests * trivial: Update minor wording nit in RBAC persona documentation * Clarify top-level personas in RBAC documentation * Clarify \`\`reader\`\` role implementation in persona admin guide * [goal] Deprecate the JSON formatted policy file * Ignore oslo.db deprecating sqlalchemy-migrate warning * Add openstack-python3-wallaby-jobs-arm64 job * Add details to bootstrap docs for system role assignments * Support bytes type in generate\_public\_ID() * Imported Translations from Zanata * Drop lower-constraints job * fix E741 ambiguous variable name * fix E225 missing whitespace around operator * Use app cred user ID in policy enforcement * Generalize release note for bug 1878938 * Use enforce\_new\_defaults when setting up keystone protection tests * Implement more robust connection handling for asynchronous LDAP calls * Imported Translations from Zanata * Update master for stable/victoria * Add vine to lower-constraints * Simplify default config test * Replace assertItemsEqual with assertCountEqual 18.0.0 ------ * [goal] Migrate testing to ubuntu focal * Fix gate by running l-c job on Bionic * Write a symptom for checking memcache connections * Bump pysaml2 requeriment to avoid CVE-2020-5390 * Fix user creation with GRANT in MySQL 8.0(Ubuntu Focal) * Improve the update description for limits in api-ref * Follow-up for bug-1891244 * Support format for msgpack < 1.0 in token formatter * Skip tests to update u-c for PyMySql to 0.10.0 * Spelling Fix * NIT: Spelling Fix * Properly handle octet (byte) strings when converting LDAP responses * Add support for functional RBAC tests * Fix invalid assertTrue which should be assertEqual * Delete system role assignments from system\_assignment table * Fix api-ref for list endpoints * Fix lower-constraint for PyMySQL * Fix doc for package mod\_wsgi on Centos8/RHEL8 * requirements: Drop os-testr * Fix "allow expired" feature for JWT * Add ignore\_user\_inactivity user option * Adding note for create a project without domain info * Add "explicit\_domain\_id" to api-ref * Run federation jobs on Ubuntu Focal * Add an enhanced debug configuration technique to caching guide * Remove an assignment from domain and project * Imported Translations from Zanata * New config option 'user\_limit' in credentials * ldap: fix config option docs for \*\_tree\_dn * Port the grenade multinode job to Zuul v3 * Stop to use the \_\_future\_\_ module * NIT: Fix Spelling in auth\_context.py * Update caching-layer.rst * Cap jsonschema 3.2.0 as the minimal version * Support regexes in whitelists/blacklists * Switch to newer openstackdocstheme and reno versions * Update keystone Making an API Change doc * Update filtering-responsibilities and truncation * Update doc id-manage.rst * Update keystone architecture doc * Disable EC2 credentials access\_id update * Add service name filter to service list api-ref * Bump hacking min version to 3.0.1 * Fix UserNotFound exception for expiring groups * Switch to new grenade job name * Fix security issues with EC2 credentials * Ensure OAuth1 authorized roles are respected * Check timestamp of signed EC2 token request * Removes info about deleted function should\_cache\_fn * Correct help for unified\_limits * Imported Translations from Zanata * Add Python3 victoria unit tests * Update master for stable/ussuri 17.0.0.0rc1 ----------- * Enable groups testing for K2K scenarios * Add schema placeholders for Ussuri * Remove Babel as requirement * Update hacking for Python3 * Remove a note related to UUID tokens from example configuration * Update api-ref for federated objects in user * Expiring Group Memberships API - Allow set idp authorization\_ttl * Add federated support for updating a user * Update contributors document keystone * Add federated support for creating a user * Stop configuring install\_command in tox * Cleanup py27 support * Add federated support for get user * Add expiring user group memberships on mapped authentication * Expiring Group Membership Driver - Add, List Groups * Expiring User Group Membership Model * Community goal: Adding contributing.rst * Parse cli args in get\_enforcer * Add openstack\_groups to assertion * Change time faking for totp test * Document the "immutable" resource option * remove oslo-concurrency from requirements * drop mock from test-requirements * Correcting api-ref for users * NIT: Fix spelling * Copy shibboleth logs in federation jobs * Ignore SQLAlchemy RemovedIn20Warning * Switch from mock to unittest.mock use * Refactor some ldap code to implement TODOs * Doc Cleanup * Tell reno to ignore the kilo branch * Constraint dependencies for docs build * Removing tempest-full from gate * Check if content-type contains http, not equals * Add docs about bootstrapping immutable roles * Add domain admin grant test cases * Default to bootstrapping roles as immutable * Use inspect instead of Inspector.from\_engine() * Remove six usage * Updating tox -e all-plugin command * Capture output from test run of policy generator * Cleanup doc/requirements.txt * Always have username in CADF initiator * Fix duplicated words issue like "each each user\_id" * Ensure bootstrap handles multiple roles with the same name * Fix role\_assignments role.id filter * Fix release note link formatting * Fix token auth error if federated\_groups\_id is empty list * Update OIDC documentation to handle bearer access token flow * Imported Translations from Zanata * Add docs for app cred access rules * Remove python 2.7 specific library * Add name in GET API of application credentials * Stop adding entry in local\_user while updating ephemerals * Fix api-ref roles response description * Fix credential list for project members * Fix application credential doc example * Migrate grenade jobs to py3 * Start README.rst with a better title * Drop old neutron-grenade job * Stop testing Python 2 * Remove group deletion for non-sql driver when removing domains * Refresh "how can I help?" doc * Re-enable line-length linter * Fix line-length PEP8 errors for c7fae97 * Add voting k2k tests * Fix K2K auth flow diagram * Stop explicitly requiring pycodestyle * Add Source links to readme * Switch to opensuse-15 nodeset * Switch to official Ussuri jobs * Revert "Resource backend is SQL only now" * Drop project.id foreign keys * Fix sql migrate repo prefix check * Add schema placeholders for Train * Overhaul the RBAC documentation for administrators * Fix wrong interface description * Import LDAP job into project * Update getting started guide * Remove legacy protection tests * Update token definitions * Remove policy.v3cloudsample.json * Imported Translations from Zanata * Fix misspell word * Update master for stable/train 16.0.0.0rc1 ----------- * Remove limit policies from policy.v3cloudsample.json * Add tests for project users interacting with limits * Allow domain users to access the limit API * Use immutable roles in tests * Add missing ws between words in log messages * Allow system/domain scope for assignment tree list * Make policy deprecation reasons less verbose * Readjust job timeouts * Implement scope type checking for Project Endpoints * Federation mapping debug should show direct\_maps values * Consolidate policy deprecation warnings * Add default roles and scope checking to project tags * DRY up credential policies * Move remaining protection tests * Fix test case in policy associations * Fix PostgreSQL specifc issue with credentials encoding * Fix validation of role assignment subtree list * Specify keystone is OS user for fernet and credential setup * Add remote\_id definition in \_perform\_auth * Use correct repo for initial version check * Split protection unit tests into its own job * Remove system EC2 credentials from policy.v3cloudsample.json * Remove system Domain Config from policy.v3cloudsample.json * Update API version for access rules * Add access rules to token validation * Expose access rules as its own API * Remove obsolete grant policies from policy.v3cloudsample.json * Alphabetize removed policies in tests * Implement system admin for OAUTH1 consumers * Implement system scope for domain role management * Make system tokens work with domain-specific drivers * Implement scope type checking for EC2 credentials * Increase tox job timeouts to 90 minutes * Add immutable roles status check * Remove implied roles policies from v3cloudsample * Implement system admin for implied roles * Implement domain admin support for grants * Implement domain reader support for grants * Add Project User coverage for domain config API * Add Domain User for security compliance domain config API * Implement system admin for domain config API * Implement system reader & member for domain config API * Fix timeout Zuul changes * Generate PDF documentation * Add --immutable-roles flag to bootstrap command * Add immutable option for roles and projects * Bump timeout for lower-constraints job * Implement resource options for roles and projects * Implement system reader for OAUTH1 consumers * Implement system reader for implied roles * Remove system policy and its association from policy.v3cloudsample.json * Override tox job timeouts * Fix federation CI * Fix oauthlib update errors * Use raw formatting for mapping\_engine help text * Add tests for project users for policy association * Add tests for domain users for policy association * Implement system admin for policy association * Implement system reader & member for policy association * Add tests for project users interacting with policies * Add notifications for deleting app creds by user * Add tests for domain users interacting with policies * Clean up UserGroups target enforcement callback * Fix relative links * Add tests for project users interacting with endpoint\_groups * Add tests for domain users interacting with endpoint\_groups * Implement system\_admin for endpoint\_groups * Implement system reader and member for endpoint\_groups * Add retry for DBDeadlock in credential delete * Fix translated response * Implement system admin for trusts API * Add tests for domain users for trusts * Add tests for system member for trusts * Implement system reader role for trusts API * Move get\_role\_for\_trust enforcement to policies * Move list\_roles\_for\_trust enforcement to policies * Move get\_trust enforcement to default policies * Move delete\_trust enforcement to default policies * Move list\_trusts enforcement to default policies * Add protection tests for trusts API * Update broken link * Update cli docs * Implement system admin for policies * Implement system reader and member for policies * Add support for previous TOTP windows * Honor group\_members\_are\_ids for user\_enabled\_emulation * Update api-ref for revocation list OS-PKI * Docs: Make robust with using real links * Clean up irrelevant comment * Fix list\_mappings deprecation warning message * Allows to use application credentials through group membership * Fix missing print format and missing ws between words * Suppress policy deprecation warnings in unit tests * Add API changes for app cred access rules * Add manager support for app cred access rules * Add user\_id, external\_id to access rules table * Fix websso auth loop * Deprecate keystone.conf.memcache socket\_timeout * Fix typo: RBACKEnforcer -> RBACEnforcer * Run 'tempest-ipv6-only' job in gate * Followup for remove signing[config] * Remove broken api-ref link * doc: Fix broken links * Fix python3 compatibility on LDAP search DN from id * Deprecate identity:revocation\_list policy for removal * Remove [signing] config * Update api-ref location * implement system scope for application credential * Fixing dn\_to\_id function for cases were id is not in the DN * Add new attribute to the federation protocol API * Allow to filter endpoint groups by name * update documentation for X.509 tokenless auth * Deprecate [federation] federated\_domain\_name * Allow JsonBlob to accommodate SQL NULL result sets * Add exercises for intern applicants * Fix keystone document * nit: remove some useless code * Drop limit columns * token: consistently decode binary types * Incorrect behavior of validate\_password method * Update test cases for os-pki revoke API * Blacklist sphinx 2.1.0 (autodoc bug) * Bump openstackdocstheme to 1.20.0 * Remove redundant parameter passed to assertTrue * Add Python 3 Train unit tests * Switch order of precedence for unit test deps * Don't call .c from select() objects * Update misleading comment about fernet credential encryption * Fix E731 flake8 * [api-ref] Fix nocatalog description for unscoped token * Drop use opendev.org for tox deps * Fix contributor doc of keystone * Add link to describe Principle of Least Privilege * Update the meaning of low-hanging-fruit * Implement system scope and default roles for token API * Update unified limit documentation * Add cadf auditing to credentials * Remove deprecated admin\_endpoint * Revert "Exclude constants from autodoc" * Revert "Ignore boilerplate constants in autodoc" * Ignore boilerplate constants in autodoc * Exclude constants from autodoc * Report correct domain in federated user token * Add flake8 ignore list to fast8 script * Add application\_credential as a CADF type * add raw format link to keystone config sample * Update mission statement and vision reflection * Add note about application credential ownership * Revert "Add JSON driver for access rules config" * Revert "Add manager for access rules config" * Revert "Add a permissive mode for access rules config" * Revert "Add manager support for app cred access rules" * Revert "Add API for /v3/access\_rules\_config" * Don't throw valueerror on bootstrap * Remove [token]/ infer\_roles * Pep8 environment to run on delta code only * Add clarification for context in install guides * Adds caching of credentials * Cap sphinx for py2 to match global requirements * Revert "Blacklist bandit 1.6.0" * Fix documentation typo * Blacklist bandit 1.6.0 * Update Python 3 test runtimes for Train * [docs] remove deprecated ubuntu package from installation * Fix for werkzeug > 0.15 * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * Pass kwargs to exception to get better format of error message * Replace support matrix ext with common library * Uncap jsonschema * Fix unscoped federated token formatter * Use openstackdocstheme according to guide * Make fetching all foreign keys in a join * Support endpoint updates in bootstrap * Add missing ws separator between words * Move redelegation fields out of extras * Replace dict.iteritems() with dict.items() in keystone * Add release note for service token documentation * Fix werkzeug imports for version 0.15.x * Allow an explicit\_domain\_id parameter when creating a domain * Update the min version of tox * Convert user\_id back to string * Add API for /v3/access\_rules\_config * Ignore Stein-specific release notes * Be more verbose in logging role grant on bootstrap * Replace UUID with id\_generator for Federated users * DRY: Remove redundant policies from policy.v3cloudsample.json * Raise METHOD NOT ALLOWED instead of 500 error on protocol CRUD * Remove redundant policies from v3cloudsample * Add domain scope support for group policies * Update broken links to dogpile.cache docs * Add keystone's technical vision reflection * Add release prelude about changing policies * Consolidate user protection tests * Replace URL name to the correct one in Keystone Docs * Delete shadow users when domain is deleted * Make system admin policies consistent for grants * Remove assignment policies from policy.v3cloudsample.json * Add role assignment testing for project users * Replace openstack.org git:// URLs with https:// * Implement system reader functionality for grants * Remove external-dev and consolidate to contributor * Remove system assignment policies from policy.v3cloudsample.json * Test domain and project users against group system assignment API * Add role assignment test coverage for domain admins * Add role assignment test coverage for domain members * Implement domain reader for role\_assignments * Add explicit testing for project users and the user API * Update group system grant policies for admins * Update system group assignment policies for reader and member * Fix typo in docs section header * Update master for stable/stein * Test project users against system assignment API * Test domain users against system assignment API * Update system grant policies for system admin * Update system grant policies for system member * Update system grant policies for system reader 15.0.0.0rc1 ----------- * trivial: correct spelling in test names * Remove project policies from policy.v3cloudsample.json * Implement domain admin functionality for projects * Implement domain member functionality for projects * Only validate tokens once per request * Pin Werkzeug in lower-constraints * Implement domain admin functionality for user API * Implement domain member functionality for user API * Implement domain reader functionality for user API * Add documentation for service tokens * Added keystone identity provider installation to Devstack plugin * PY3: Ensure LDAP searches use unicode attributes * Use ForbiddenAction for invalid action instead of Forbidden * Add schema placeholders for Stein * Implement domain reader functionality for projects * Small refactor for create nonlocal user * Mention allow\_expired\_window in fernet FAQ * Fix the incorrect release name of project guide * trivial: fix broken link in trust API reference * Migrate keystone-dsvm-grenade-multinode job to Ubuntu Bionic * Remove publish-loci post job * Add hint for order of keys during distribution * Add service developer documentation for scopes * Make system members the same as system readers for credentials * Drop py35 jobs * Remove service policies from policy.v3cloudsample.json * Switch federation check jobs to opensuse * Add manager support for app cred access rules * Add driver support for app cred access rules * Add SQL migrations for app cred access rules * Add a permissive mode for access rules config * Add manager for access rules config * Add JSON driver for access rules config * Remove protocol policies from v3cloudsample.json * Add tests for project users interacting with services * Remove role policies from policy.v3cloudsample.json * Add tests for project users interacting with roles * Add tests for domain users interacting with roles * Remove endpoint policies from policy.v3cloudsample.json * Remove domain policies from policy.v3cloudsample.json * Add role assignment test coverage for system admin * Add role assignment test coverage for system members * Reorganize role assignment tests for system users * Implement system reader for role\_assignments * Remove idp policies from policy.v3cloudsample.json * Add py37 tox env * Add tests for domain users interacting with services * Update service policies for system admin * Add shibboleth config to log output * Update introduction of external services doc * Address follow-up comments in contributor guide for specs * [api-ref] add domain level limit support * Release note for domain level limit * Update project depth check * Add domain level support for strict-two-level-model * Add domain level limit support - API * Add domain level limit support - Manager * Remove mapping policies from policy.v3cloudsample.json * Add tests for project users interacting with mappings * Deprecate cache\_on\_issue configuration option * Add JWS token provider documentation * Add OpenSUSE support in devstack federation plugin * Add experimental job for OpenSUSE * Fix mock for v2 test * Add documentation for writing specifications * Remove unused sample token fixtures * Fix bindep for SUSE * add python 3.7 unit test job * Correcting tests with project\_id * Add domain\_id column for limit * [SQLite] Ensure change is addressed for limit table * Remove region policies from policy.v3cloudsample.json * Add tests for project users interacting with regions * Add tests for domain users interacting with regions * Update region policies to use system admin * Add region tests for system member role * Implement system admin role in groups API * populate request context with X.509 tokenless cred information * Fix wrong example for direct\_maps * Fixes incorrect params * Implement JWS token provider * Seperated CADF notifications tests for request\_id * Added request\_id and global\_request\_id to basic notifications * Converting the API tests to use flask's test\_client * Implement system admin role in users API * Implement system member role user test coverage * Implement system reader role for users * Replace 'tenant\_id' with 'project\_id' * Add PyJWT as a requirement * Add test fixture for the JWS key repository * Add keystone-manage create\_jws\_keypair functionality * Add configuration options for JWS provider * Test case for bad type user in assertion * Adjust Indents to meet PEP8 E117 * Handle special cases with msgpack and python3 * Add experimental job for CentOS * Add CentOS support in devstack federation plugin * Remove service provider policies from v3cloudsample.json * Add documentation for Auth Receipts and MFA * bump Keystone version for Stein * Allow project users to retrieve domains * Fix wrong urls * Optimize fernet token and receipts in cli.py * PY3: switch to using unicode text values * Expose receipt\_setup and receipt\_rotate command * Clean up the create\_arguments\_apply methods * Allow domain users to access the GET domain API * Update doc for token\_setup and token\_rotate * Fix nits * Fix app\_cred schema spell nit * Update limit policies for system admin * Do not use self in classmethod * Add tests for project users interacting with endpoints * Add tests for domain users interacting with endpoints * Update endpoint policies for system admin * Add endpoint tests for system member role * Update endpoint policies for system reader * Add tests for domain users interacting with mappings * Update mapping policies for system admin * Add mapping tests for system member role * Update mapping policies for system reader * Add tests for project users interacting with idps * Add tests for domain users interacting with idps * Update idp policies for system admin * Add idp tests for system member role * Update idp policies for system reader * Add region protection tests for system readers * Update role policies for system admin * Reuse common system role definitions for roles API * Add tests for project users interacting with protocols * Add tests for domain users interacting with protocols * Implement system admin role in protocol API * Add protocol tests for system member role * Update protocol policies for system reader * Add limit tests for system member role * Add limit protection tests * Remove registered limit policies from policy.v3cloudsample.json * Add tests for project users interacting with registered limits * Allow domain users to access the registered limits API * Remove duplicated TOC in configuration guide * Implement system admin role in project API * Implement system member role project test coverage * Implement system reader role for projects * Enhance the openidc guide * Enhance the mellon guide * Enhance the shibboleth guide * Consolidate WebSSO guide into SP instructions * Add section on configuring protected auth paths * Reorganize guide on configuring a keystone SP * Clean up keystone-to-keystone section * Enhance authn sections in federation guide * correct the description on domain re-enable * Add tests for project users interacting with sps * Add tests for domain users interacting with sps * Update service provider policies for system admin * Add prerequisites section to keystone-to-keystone * Invalidate shadow\_federated\_user cache when deleting protocol * Remove duplicate RBAC logging from enforcer * Update federation SP prerequisites section * Use samltest.id as an example sandbox IdP * Fix nits in code blocks in federation guide * Bring SP/IdP URLs closer to style guide guidance * Restructure federation guide * Update doc with samltest.id * Clarify location for HTTPD instructions * Use common system role definitions for registered limits * Implement system member test coverage for groups * Implement system reader role for groups * Add service provider tests for system member role * Update service provider policies for system reader * Add service tests for system member role * Update service policies for system reader * Use renamed template 'integrated-gate-py3' * Add scope checks to common system role definitions * Remove i18n.enable\_lazy() translation * Reorganize admin guide * Consolidate service catalog docs * Add irrelevant-files for grenade-py3 jobs * Delete outdated keystonemiddleware doc * Remove example usage from admin guide * Split trusts docs between admin and user guide * Move identity sources doc to admin guide * Remove message about circular role inferences * Remove Certificates for PKI guide * Add introduction section to federation docs * Fix links to external-authentication * Move list limit docs to admin guide * Rename admin guide pages * Consolidate tokenless X.509 docs * Update registered limit policies for system admin * Consolidate Keystone docs: admin/identity-external-authentication.rst * Implement system admin role in domains API * Implement system member role domain test coverage * Implement system reader role in domains API * Bump oslo.policy and oslo.context versions * Move supported clients section to user guide * Use request\_body\_json function * Move SSL recommendation to installation guide * Move "Public ID Generators" to relevant docs * Consolidate Keystone docs: federated-identity.rst * Add role tests for system member role * Consolidate catalog management guide * Update role policies for system reader * Change openstack-dev to openstack-discuss * Add registered limit tests for system member role * Add registered limit protection tests * Keep federation jobs running on Xenial * Clarify docstrings for domain flask refactor * Move test utility to common location * Add missing translation import to common.auth.py * Move to password validation schema * Don't emit a notification for the root domain * Pass context objects to policy enforcement * Consolidate identity-domain-specific-config.rst * Consolidate auth-totp.rst * Consolidate event\_notifications.rst * Consolidate endpoint-policy.rst * Consolidate service-catalog.rst * Update contributor doc * Use pycodestyle in place of pep8 * Update api-ref to include user options * Document user options * Add scope documentation for service developers * Remove deprecated secure\_proxy\_ssl\_header config * Refactor flask domain config resources * Add missing ws seperator between words * Add the missing packages when install keystone * add request\_id and global\_request\_id to cadf notifications * changed port in tools/sample\_data.sh * Move irrelevant-files to project definition * Add tempest-full-py3 job to zuul file * Remove the repetition words in identity-fernet-token-faq.rst * Removing default\_assigment\_driver * Bump sqlalchemy minimum version to 1.1.0 * Drop the compatibility password column * Remove "crypt\_strength" option * Correct HTTP OPTIONS method * Update api-ref for set registered limits * Remove deprecated "bind" in token * Update more info of vhost file * Refactor directory creation into a common place * Region update extra support * Change \_\_all\_\_ list to tuple * Remove redundant variables from context class * Refresh admin doc * Fixing nits * Add abstract method in trusts base.py * Switch devstack plugin to samltest.id * Clean up python3.5 usage in tox.ini * Add py36 tox environment * Remove unused lower constraints * Replace usage of get\_legacy\_facade() with get\_engine() * Fix uwsgi --http flag * Fix an issue with double fernet key rotation * Delete PKI middleware debugging section * Fix developer config dir flask aftermath * Documentation fix - Port number * Use port 5000, keystone-wsgi-public and --http-socket * Changed the port numbers * Implement auth receipts spec * changed port in argument '--bootstrap-admin-url' * Unregister "Exception" from flask handler * Add release note for unified limit APIs changing * Deprecate eventlet related configuration * Remove compatability shim * Remove check for disabled v3 * Remove obsolete credential policies * Delete "Preparing your environment" section * Implement scope\_type checking for credentials * Fix spelling 'unnecessary' * Remove custom auth middleware documentation * Delete the external auth admin guide * Remove useless use of :orphan: * Change port and version on v3 endpoints example * Provide a Location on HTTP 300 * Set Default and resource limit as defined schema * Emit CADF notifications on authentication for invalid users * Delete administrator federation guide * Update keystone-manage bootstrap port instructions * Fix api-ref v3.9 release identifier * Update third endpoint legacy port for Keystone v3 API * Remove unused logging module * Remove useless "clean" file * Trivial: Remove repeated if conditions * Updating doc of unified limit * Adding 'date' for trust\_flush * Add caching on trust role validation to improve performance * Allow registered limit's region\_id to be None * Add a test for idp and federated user cascade deleting * Fix example for getting system scoped token * Remaining cases of MappingEngineTester * Set min and max length for resource\_name * Implement scaffolding for upgrade checks * Fixing update unified limit api-ref * Remove deprecated token\_flush * Invalidate app cred AFTER deletion * Update API version to 3.11 * Added test case update registered limit with region * Remove incorrect copyright notice * Remove paste-ini * Remove pre-flask legacy code * Make collection\_key and member\_key raise if unset * Increment versioning with pbr instruction * Loosen the assertion for logging scope type warnings * Expand implied roles in system-scoped tokens * Add test case for expanding implied roles in system tokens * Move loadapp to a generic place * Make policy file support in fixture optional * Use tempest-pg-full * Cleanup test\_wsgi * Flask comment/docstring cleanup * Move AuthContextMiddleware * Convert Normalizing filter to flask native Middleware * Internally defined middleware don't use stevedore * Make Request Logging a little better * Register exceptions with a Flask Error Handler * Cleanup keystone.server.flask.application * Replace JSON Body middleware with flask-native func * Convert S3 and EC2 auth to flask native dispatching * Remove skip for test\_locked\_out\_user\_sends\_notification * Convert projects API to Flask * Convert /v3/users to flask native dispatching * add unit tests for healthcheck * Replace openSUSE experimental check with newer version * Auth flask conversion cleanup * Convert auth to flask native dispatching * Update notification tests to work with o-m 9.0.0 * Don't mock internal implementation details of oslo * Update log translation hacking check * Don't quote {posargs} in tox.ini * Enable foreign keys for unit test * Update doc string for transform\_to\_group\_ids * Follow Zuul job rename * Add release names to api-ref * Avoid using dict.get() in assertions * Clarify group-mapping example in docs * Purge soft-deleted trusts * LDAP attribute names non-case-sensitive * Organize project tag api-ref by route * Add build\_target arguement to enforcer * Properly replace flask view args in links * Adding test case for MappingEngineTester * Fix command to verify role removal in docs * Add python3 functional test job * Convert legacy functional jobs to Zuul-v3-native * Update auto-provisioning example to use reader * Enable Foreign keys for sql backend unit test * Add releasenote for bug fix 1789450 * Comment out un-runnable tests * Mapped Groups don't exist breaks WebSSO * Add hint back * Implement Trust Flush via keystone-manage * Properly normalize domain ids in flask * Use templates for cover and lower-constraints * Make OSA rolling upgrade test experimental * Rename v3-only functional zuul job * Remove unused revoke\_by\_user\_and\_project * Address issues with flask conversion of os-federation * Convert domains api to flask * Move use of constraints out of install\_cmd * Ensure view args is in policy dict * Rename py35 v3 only check * Convert OS-INHERIT API to flask native dispatching * Fix a translation of log * Convert groups API to flask native dispatching * Fix RBACEnforcer get\_member\_from\_driver mechanism * Refactor ProviderAPIs object to better design pattern * Convert OS-FEDERATION to flask native dispatching * Update the documentation bug tag * api-ref: Remove broken link * Added support for a \`\`description\`\` attribute for Identity Roles * Update the minimimum required version of oslo.log * Incorrect use of translation \_() * Update RDO install guide for v3 * Remove member\_role\_id/name * Convert policy API to flask * Fix db model inconsistency for FederatedUser * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Use items() instead of iteritems() * Add details and clarify examples on casing * Address nits * Re-Add scope.system to filters * Add placeholder migrations for Rocky * Change unique\_last\_password\_count default to 0 * Trivial: Remove app\_conf kwarg from testing setup * Trivial: Add missing space in exception * Move json\_home "extension" rel functions * Convert system (role) api to flask native dispatching * Do not log token string * Convert role\_assignments API to flask native dispatching * Add safety to the inferred target extraction during enforcement * Use osc in k2k example * Fix a bug that issue token with project-scope gets error * Convert role\_inferences API to flask native dispatching * Convert Roles API to flask native dispatching * Convert endpoints api to flask native dispatching * Convert services api to flask native dispatching * Convert regions API to flask native dispatching * Remove unused util function * Redundant parameters in api-ref:domain-config * Add callback action back in * Set initiator id as user\_id for auth events * Update reno for stable/rocky * More accurate explanation in api-ref:application credentials * Imported Translations from Zanata 14.0.0.0rc1 ----------- * Allow wrap\_member and wrap\_collection to specify target * Pass path into full\_url and base\_url * Allow for more robust config checking with keystone-manage * Remove redundant get\_project call * Convert OS-SIMPLE-CERT to flask dispatching * Migrate OS-EP-FILTER to flask native dispatching * Convert limits and registered limits to flask dispatching * Add a release note for bug 1785164 * Error location of parameters in api-ref:project tags * Code optimization of create application credential * Do not allow create limits for domain * Update api-ref for unified limits * Fix json indentation of notification sample * Convert OS-AUTH1 paths to flask dispatching * Clean up token extra code * Expose a bug that issue token with project-scope gets error * Remove KeystoneToken object * Convert OS-REVOKE to flask dispatching * Address FIXMEs for listing revoked tokens * Move unenforced\_api decorator to module function * Remove direct calls to auth.controllers in some tests * Move validate\_issue\_token\_auth from controllers * Unified code style nullable description parameter * Remove get\_catalog from manage layer * Api-ref: Correct response code * Adding missing comma in docs * Expose random uuid bug in cadf notifications * Boostrap CLI tests no longer call auth controller * Implement "no-update" test for trusts * Move trusts to flask native dispatching * Address nits in strict-two-level implementation * Remove get\_catalog usage from contrib 14.0.0.0b3 ---------- * Deprecate [token] infer\_roles=False * Reduce duplication in federated auth APIs * Fix RBACEnforcer Comment * Mirror self-link trust check from tempest * Trusts do not implement patch * Allow for 'extension' rel in json home * Add pycadf initiator for flask resource * Use oslo\_serialization.jsonutils * Correctly pull input data for enforcement * Delete project limits when deleting project * Add project hierarchical tree check when Keystone start * Update project depth check * Add include\_limits filter * Bump lower constraint for pysaml2 to 4.5.0 * Allow class-level definition of API URL Prefix * Move Credentials API to Flask Native * Add project\_id filter for listing limit * Strict two level limit model * Switch to python-ldap * Add correct self-link * Properly remove content-type on HTTP 204 * Increase test coverage of entity\_type id mapping query * Cleanup keystone.token.providers.common * Remove remnants of token bind * Simplify the token provider API * Add serialization for TokenModel object * Introduce new TokenModel object * Don't allow legacy and native flask to share paths * Remove uuid token size check from doctor * Do not use flask.g imported as g * Fix keystone.common.rbac\_enforcer.\_\_init\_\_.py exporting * Make keystone.server.flask more interesting for importing * Flesh out and add testing for flask\_RESTful scaffolding * Update pypi url to new url * Invalidate 'computed assignments' cache when creating a project * Filter project\_id for list limits * Expose endpoint to return enforcement model * Add docs for case-insensitivity in keystone * Clarifications to API & Scenario Tests * Remove enable config option of trust feature * Fix keystone-manage saml\_idp\_metadata under python3 * Only upload SP metadata to testshib.org if IDP id is testshib * Ignore .eggs dir as well * Implement enforcement model logic in Manager * Add registered\_limit\_id column for limit * Add auto increase primary key for unified limit * Address minor comments from initial impl RBACEnforcer * Refactor \_handle\_shadow\_and\_local\_users * Refactor \_set\_domain\_id\_and\_mapping functions * Move keystone.server.common to keystone.server * Add support for enforce\_call to set value on flask.g * Refactor - remove extra for loop * Remove token bind capabilities * Address minor comments to 404 error detection * Exposing ambiguity bug when querying role assignments * pycrypto is not used by keystone * Add new "How Can I Help?" contributor guide * Added check to avoid keyerror "user['name']" * Implement base for new RBAC Enforcer * Refactor trust roles check * Make it easy to identify a 404 from Flask * Don't replace the whole app just the wsgi\_app backing * Add support for before and after request functions * Convert json\_home and version discovery to Flask * Keystone adheres to public\_endpoint opt only * Implement scaffolding for Flask-RESTful use * Add Flask-RESTful and update flask minimum(s) * Fix keystone-manage mapping\_purge with --type option * Override oauthlib docstrings that fail with Sphinx 1.7.5 * Simple usage docs for implied roles * Fix duplicate role names in trusts bug * Expose duplicate role names bug in trusts * Remove unclear wording in parameters * Filter by entity\_type in get\_domain\_mapping\_list * Migrate all password hashes to the new location if needed * Add policy for limit model protection * Api-ref: Refresh the Update APIs for limits * Imported Translations from Zanata * Remove a useless function * Clarify complicated sentence in docs * Unified limit update APIs Refactor * Store JSON Home Resources off the composing router * Ensure default roles created during bootstrap * Add release notes link to README * Remove duplicated test * Expand on debug\_middleware option * Update response codes for authentication API reference * Clarify scope responses in authentication api ref * fix tox python3 overrides * Add Flaskification release-note * Remove pastedeploy * Flaskification cleanup * Remove the rest of v2.0 legacy * Add in ability to load DEBUG middleware * Revert "Rename fernet\_utils to token\_utils" * Convert Keystone to use Flask 14.0.0.0b2 ---------- * Docs: Remove the TokenAuth middleware * Correct test\_v3\_oauth1.test\_deleting\_project\_also\_invalidates\_tokens * Correct test\_v3\_oauth1.test\_change\_user\_password\_also\_deletes\_tokens * Correct test\_v3\_oauth1.test\_bad\_authorizing\_roles\_id * Correct test\_v3\_oauth1.test\_bad\_authorizing\_roles\_name * Fix warnings in documentation * fix rally docs url * Decouple bootstrap from cli module * Handle empty token key files * Remove some unused functions * Update tests to work with WebOb 1.8.1 * Consolidate oauth1.rst * Remove the TokenAuth middleware * Remove token driver configuration * Fix the test for unique IdP * Consolidate health-check-middleware.rst * Limit description support * The migration script to add description for limit * Update IdP sql model * Remove dead dependency injection code * Remove unused assertions from test\_v3.py * Remove dead code in token provider * Remove unused exception * Do not return all the limits for POST request * Add configuration option for enforcement models * Use the provider\_api module in limit controller * Fix the outdated URL * Remove policy service from architecture.rst * Invalidate the shadow user cache when deleting a user * Add conceptual overview of the service catalog * Trivial: Update pypi url to new url * Update the RDO installation guide to use port 5000 * Update keystone functional tests 14.0.0.0b1 ---------- * Remove the sample .conf file * Allow blocking users from self-service password change * Add prerequisite package note to Keystone install guide * Update auth\_uri option to www\_authenticate\_uri * Fix json schema nullable to add None to ENUM * Use consistent role schema in token response validation * Corrects spelling of MacOS * Fix 500 error when deleting domain * Allow cleaning up non-existant group assignments * Follow the new PTI for document build * Use the new pysaml2 constraints * Fix incompatible requirement in lower-constraints * Update install guides * Fix mispelling of accommodate in install docs * Fix list\_limit doesn't work correctly for domain * Expose a bug that list\_limit doesn't work correctly * Log warning when using token\_flush * Removal of deprecated direct driver loading * Make tags filter match subset rather than exact * Updated from global requirements * Update RDO install guide for v3 * Remove admin interface in sample Apache file * add lower-constraints job * Fix integer -> method conversion for python3 * Fix user email in federated shadow users * Remove references to v2.0 from external developer doc * Remove references to UUID from token documentation * Add logging for xmlsec1 installation * Updated from global requirements * Mark the implied role API as stable * Add note to keystone-manage bootstrap doc * Fix assert test error under py3.6 * Fix api-ref for project tag create * Updated from global requirements * Fixing multi-region support in templated v3 catalog * Update links in README * Use different labels for user and project names * Imported Translations from Zanata * Add user documentation for JSON Home * Fix formatting of ImportError * Imported Translations from Zanata * Updated from global requirements * Imported Translations from Zanata * Remove @expression from tags * Work around deprecations for opportunistic tests * Api-ref: fix resource\_limit format * Correct typo in identity API reference * Imported Translations from Zanata * Consolidate identity-token-binding.rst * Consolidate identity-service-api-protection.rst * Add new setup commands for token keys * Consolidate endpoint-filtering.rst * Remove unnecessary config overrides from fernet tests * Make assertValidFernetKey assertion more robust * Update 3.10 versioning to limits and system scope * Remove v2.0 policies * Populate application credential data in token * Imported Translations from Zanata * Simplify federation and oauth token callbacks * Simplify token persistence callbacks * Refactor token cache invalidation callbacks * Remove needs\_persistence property from token providers * Imported Translations from Zanata * Use OSC in application credential documentation * Add docs for application credentials * Force SQLite to properly deal with foreign keys * Remove unused class variables from token provider * Imported Translations from Zanata * Grant admin a role on the system during bootstrap * Fix querying role\_assignment with system roles * Delete system role assignments when deleting groups * Expose bug in system assignment when deleting groups * Delete system role assignments when deleting users * Expose bug in system assignment when deleting users * Expose bug in /role\_assignments API with system-scope * Remove the sql token driver and uuid token provider * Imported Translations from Zanata * Update reno for stable/queens * Imported Translations from Zanata 13.0.0.0rc1 ----------- * Add placeholder migrations for Queens * Delete SQL users before deleting domain * Reorganize api-ref: v3-ext federation mapping.inc * Update OBS install docs for v2 removal * Reorganize api-ref: v3-ext federation service-provider * Reorganize api-ref: v3-ext oauth.inc * Replace port 35357 with 5000 for ubuntu guide * Reorganize api-ref: v3 os-pki * Reorganize api-ref: v3-ext federation identity-provider * Reorganize api-ref: v3-ext trust.inc * Remove v2.0 from documentation guides * Remove v2.0 extension documentation * Update curl request documentation to remove v2.0 * Remove v2 and v2-admin API documentation * Remove all v2.0 APIs except the ec2tokens API * Update sample configuration file for Queens * Imported Translations from Zanata * Finish refactoring self.\*\_api out of tests * Add cache invalidation when delete application credential * Expose a bug that application credential cache is not invalidated * Fix cache invalidation for application credential * Expose a bug that cache invalidation doesn't work for application credential * Update the base class for application credential * Fix list users by name * Refactor self.\*\_api out of tests * Use keystone.common.provider\_api for auth APIs * Fix the wrong description * Remove the redundant word * Validate identity providers during token validation * Update historical context about the removal of v2.0 * Document flat limit enforcement model * add 'tags' in request body of projects * Increase MySQL max\_connections for unit tests * Add scope\_types for user policies * Use native Zuul v3 tox job * Update documentation to reflect system-scope * Add a release note for application credentials * Impose limits on application credentials * Enable application\_credential auth by default * Add api-ref for application credentials * Add application credential auth plugin * Add Application Credentials controller * Zuul: Remove project name * Refresh the admin\_token doc * Remove pki\_setup step in doc * Add documentation describing unified limits * Handle TZ change in iso8601 >=0.1.12 * Remove PKI/PKIZ token in doc * Add api-ref for unified limits * Expose unified limit APIs * Implement policies for limits * Add limit provider * Improve limit sql backend * Replace Chinese punctuation with English punctuation 13.0.0.0b3 ---------- * Add release note for system-scope * Implement GET /v3/auth/system * Updated from global requirements * Implement system-scoped tokens * Document scope\_types for project policies * Add scope\_types to trust policies * Add scope\_types to grant policies * Add scope\_types to role assignment policies * Fix column rename migration for mariadb 10.2 * Remove foreign key for registered limit * Introduce assertions for system-scoped token testing * Implement system-scope in the token provider API * Teach TokenFormatter how to handle system scope * Remove the deprecated "giturl" option * Relay system information in RoleAssignmentNotFound * Rename application credential restriction column * Update token doc * Update keystone v2/tokenauth example * Reorganize api-ref: v3-ext revoke.inc * Reorganize api-ref: v3-ext ep-filter.inc * Reorganize api-ref: v3-ext simple-cert.inc * Reorganize api-ref: v3-ext federation projects-domains.inc * Document scope\_types for credential policies * Document scope\_types for ec2 policies * Move token\_formatter to token * Document fixes needed for token scope\_types * Add scope\_types to service provider policies * Add scope\_types to group policies * Add scope\_types to domain config policies * Add system column to app cred table * Fix outdated links * Add ability to list all system role assignments * Add system role assignment documentation * Add Application Credentials manager * Handle TODO notes for using new\_user\_ref * Updated from global requirements * Add application credentials driver * Make entries in policy\_mapping.rst consistent * Add application credentials db migration * Fix indentation in docs * remove \_append\_null\_domain\_id decorator * Fix wrong url in domains-config-v3.inc * msgpack-python has been renamed to msgpack * adjust response code order in 'regions-v3.inc' * Fix wrong url in config-options.rst * adjust response code order in 'authenticate-v3.inc' * Reorganize api-ref: v3-ext endpoint-policy.inc * Imported Translations from Zanata * Extract expiration validation to utils * Implement controller logic for system group assignments * adjust response code order in ''policies.inc'' * adjust response code order in ''domains-config-v3.inc'' * put response code in table of ''domains.inc'' * adjust response code in order of credentials.inc * fix wrong url link of User trusts * Reorganize api-ref: v3-ext federation assertion.inc * Implement controller logic for system user assignments * Add schema check for authorize request token * Remove whitespace from policy sample file * Use keystone.common.provider\_api for trust APIs * Add db operation for unified limit * Add new tables for unified limits * Fix federation unit test * add response example and 'extra' info of create user * Add scope\_types to domain policies * Add scope\_types for policy policies * Add scope\_types to oauth policies * Add scope\_types to token revocation policies * Add scope\_types to endpoint group policies * Migrate jobs to zuulV3 * Add scope\_types to role policies * Add scope\_types to implied role policies * Add expired\_at\_int column to trusts * Add scope\_types for revoke event policies * Add scope\_types to protocol policies * Add scope\_types to project endpoint policies * Add scope\_types to policy association policies * Add scope\_types to mapping policies * Add scope\_types to identity provider policies * Add scope\_types to service policies * Handle InvalidScope exception from oslo.policy * Use keystone.common.provider\_api directly in assignment * Add scope\_types to region policies * Add scope\_types to endpoint policies * Expose a get\_enforcer method for oslo.policy scripts * Reorganize api-ref: v3 project-tags * Reorganize api-ref: v3 authenticate-v3 * Deprecate [trust]/enabled option * Use keystone.common.provider\_api for resource APIs * Re-organize api-ref: v3 inherit.inc * Implement get\_unique\_role\_by\_name * Reorganize api-ref: v3-ext federation projects-domains * Reorganize api-ref: v3 regions-v3 * Reorganize api-ref: v3 policies * Remove duplicated release note * Reorganize api-ref: v3 credentials * Reorganize api-ref: v3 domains-config-v3 * Reorganize api-ref: v3 service-catalog * Reorganize api-ref: v3 projects * Reorganize api-ref: v3 roles * Use keystone.common.provider\_api for identity APIs * Use keystone.common.provider\_api for revoke APIs * Use keystone.common.provider\_api for policy APIs * Use keystone.common.provider\_api for oauth APIs * Use keystone.common.provider\_api for federation APIs * Use keystone.common.provider\_api for endpoint\_policy APIs * Use keystone.common.provider\_api for credential APIs * Use keystone.common.provider\_api for catalog APIs * Use keystone.common.provider\_api for token APIs * modify LOG.error tip message * Performance: improve get\_role * Add group system grant policies * Replace parse\_strtime with datetime.strptime * Remove private methods for v2.0 and v3 tokens * Ensure building scope is mutually exclusive * Add user system grant policies * Implement manager logic for group+system roles * Implement manager logic for user+system roles * Implement backend logic for system roles * Add a new table for system role assignments * Refactor project tags encoding * Expose a bug when authorize request token * Bump API version and date to 3.9 * Create doc/requirements.txt * remove some misleading info in Update user API doc * Updated from global requirements * remove "admin\_token\_auth" related content" * Remove rolling\_upgrade\_password\_hash\_compat * Deprecate member\_role\_id and member\_role\_name * Migrate functional tests to stestr * Remove Dependency Injection * Rename fernet\_utils to token\_utils * Remove extra parameter for token auth * Refresh sample\_data.sh * Improve exception logging with 500 response * Remove dead code for auth\_context * Updated from global requirements 13.0.0.0b2 ---------- * Reorganize api-ref:v3 groups * Handle deprecation of inspect.getargspec * Enforce policy on oslo-context * Correct error message for request token * Refresh the Controller list * Updated from global requirements * Update keystone testing documentation * Fix role schema in trust object * Validate disabled domains and projects online * Add New in Pike note to using db\_sync check * Fix 500 error when create trust with invalid role key * Expose a bug when create trust with roles * Remove member role assignment * Fix wrong links in keystone documentation * Add schema check for OS-TRUST:trust authentication * Expose a bug when authenticating for a trust-scoped token * Update the help message for unique\_last\_password\_count * Remove apache-httpd related link * Populate user, project and domain names from token into context * Remove setting of version/release from releasenotes * Updated from global requirements * Update cache doc * Updated from global requirements * Fix 500 error when authenticate with "mapped" * Updated from global requirements * Filter users/groups in ldap with whitespaces * Deprecate policies API * Change url in middleware test to v3 * Remove ensure\_default\_domain\_exists * Ensure listing projects always returns tags * Consolidate V2Controller functionality * Remove v2 token value model * Add non-voting rolling upgrade test * Remove "no auth token" debug log * Partially clarify federation auth plugins * Handle ldap size limit exeeded exception * policy.v3cloudsample.json: remove redundant blank space * Remove expired password v2 test * Remove v2 token test models * Remove/update v2 catalog endpoint tests * Remove unnecessary dependency injection * Remove identity v2 to v3 test case * Reorganize api-ref: v3 domains * Correct parameter to follow convention 13.0.0.0b1 ---------- * Remove v2 schema and validation tests * Implement project tags API controller and router * Implement project tags logic into manager * Implement backend logic for project tags * Remove v2.0 assignment schema * Add project tags api-ref documentation and reno * Deleting an identity provider doesn't invalidate tokens * Add policy for project tags * Add JSON schema validation for project tags * Fix initial mapping example * Fix list in caching documentation * Updated from global requirements * Refactor test\_backend\_ldap tests * Emit deprecation warning for federated domain/project APIs * Reorganize api-ref: v3-ext federation auth * Update the release name in install tutorial * Reorganize api-ref: v3 users * Add explain of mapping group attribute * Remove v2.0 identity API documentation * Add database migration for project tags * Remove the v2\_deprecated decorator * Remove the v3 to v2 resource test case * Remove admin\_token\_auth steps from install guide * Remove the v2.0 validate path from validate\_token * Remove v2.0 test plumbing * Remove v2.0 auth APIs * Remove v2.0 token APIs * Move auth header definitions into authorization * Remove v2.0 identity APIs * Use stestr directly instead of ostestr * Remove middleware reference to PARAMS\_ENV and CONTEXT\_ENV * Migrate to stestr * Updated from global requirements * Add default configuration files to data\_files * Add unit tests to mapping\_purge * Replace assertRegexpMatches with assertregex * Update API reference link in README * Refactor removal of duplicate projects/domains * Update links in keystone * Fix role assignment api-ref docs * Update invalid url in admin docs * Remove keystone-all doc * Fix typos in bootstrap doc * Properly normalize protocol in Fedrations update\_protocol * Two different API achieve listing role assignments * Add backport migrations for Pike * Adds Bandit #nosec flag to instances of SHA1 * Policy exception * Remove duplicate code * Fix a typo * Increase multi region endpoints test coverage * Replace DbMigrationError with DBMigrationError * Confusing notes of ephemeral user's domain * Confusing log messages in project hierarchy checking * Remove vestigate HUDSON\_PUBLISH\_DOCS reference * Add test GET for member url in the Assignment API * Remove v2.0 resource APIs * Remove v2.0 assignment APIs * Remove v2.0 service and endpoint APIs * Fix endpoint examples in api-ref * Copy specific distro pages for install guide * Imported Translations from Zanata * Log format error * Updated from global requirements * Ignore release notes for pike and master * Clarify documentation for release notes * Revert "Fix wrong links" * Remove missing release note from previous revert * Include a link in release note for bug 1698900 * Delete redundant code * Call methods with kwargs instead of positionals * Remove duplicate roles from federated auth * Add the step to create a domain * Add int storage of datetime for password created/expires * Resource backend is SQL only now * Assert default project id is not domain * Fix wrong links * Imported Translations from Zanata * Remove deprecation of domain\_config\_upload * Update reno for stable/pike 12.0.0.0rc1 ----------- * Unset project ids for all identity backends * Update docs: fernet is the default provider * Add description for relationship links in api-ref * Updated URLs in docs * Cache list projects and domains for user * Remove unused hints from assignment APIs * Make an error state message more explicit * Fill in content in CLI Documentation * Except forbidden when clearing default project IDs * Update URL in README.rst * Document required \`type\` mapping attribute * Imported Translations from Zanata * Fix man page builds * Fill in content in User Documentation * Clarify SELinux note in LDAP documentation * Remove duplicate sample files * Remove policy for self-service password changes * Add role\_domain\_id\_request\_body in parameters * use the show-policy directive to show policy settings * Move credential encryption docs to admin-guide * Consolidate LDAP documentation into admin-guide * Imported Translations from Zanata * Add description of domain\_id in creating user/group * Add cli/ directory for documentation * Add user/ directory for documentation * Add contributor/ directory for docs * Removed unnecessary setUp() calls from unit tests * Filter users and groups in ldap * Move url safe naming docs to admin guide * Fix ec2tokens validation in v2 after regression in metadata\_ref removal * Add the step to install apache2 libapache2-mod-wsgi * Handle auto-generated domains when creating IdPs * Updated from global requirements * Fix the documentation sample for OS-EP-FILTER 12.0.0.0b3 ---------- * Clarify documentation on whitelists and blacklists * In the devstack plugin, restart keystone after modifying conf * Fix typo in index documentation * Move performance documentation to admin-guide * Consolidate certificate docs to admin-guide * Move auth plugin development doc to contrib guide * Add missing comma to json sample * Added new subsections to developer docs * Fix wording of configuration help text * Added index.rst in each sub-directory * Optional request parameters should be not required * Updated from global requirements * Move development environment setup to contributor docs * Add a hacking rule for string interpolation at logging * Make the devstack plugin more configurable for federation * Reorganised developer documentation * Enable sphinx todo extension * Remove duplicate configuration sections * Expanded the best practices subsection in devdocs * Added new docs to admin section * Move bootstrapping documentation to admin-guide * Updated from global requirements * Add a release note for bug 1687593 * Reorganised api-ref index page * remove default rule * Merged the caching subsections in admin docs * Move trust to DocumentedRuleDefault * Improved the keystone federation image * [install] Clarify the paths of the rc files * fix identity:get\_identity\_providers typo * fix assert\_admin * Fixing flushing tokens workflow * Replaced policy.json with policy.yaml * Added configuration options using oslo.config * Added configuration references to documentation * Add history behind why keystone has two ports * Move upgrade documentation to admin-guide * Stop using deprecated 'message' attribute in Exception * Move caching docs into admin-guide * Gear documentation towards a wider audience * Removed apache-httpd guide from docs * Update security compliance documentation * A simple fix about explicit unscoped string * Remove duplicate token docs * Update info about logging in admin guide * Use log debug instead of warning * Added a note for API curl examples * Move import down to correct group * Switch from oslosphinx to openstackdocstheme * Clarify LDAP invalid credentials exception * Ensure there isn't duplication in federated auth * Remove keystone\_tempest\_plugin from setup.cfg * Move implied role policies to DocumentedRuleDefault * Remove duplicated list conversion * Remove duplicated hacking rule * Document and add release note for HEAD APIs * Validate rolling upgrade is run in order * Remove duplicate logging documentation * Migrated docs from devdocs to user docs * Updated from global requirements * Remove note about kvs from admin-guide * Move token flush documentation to admin-guide * Remove the revocation api config section * Rename Developer docs to Contributor docs * Removed unnecessary line breaks from install-guides * Added keystone installation guides * Implement HEAD for assignment API * Make federation documentation consistent * Added keystone admin guides to documentation * Add annotation about token authenticate * Split test\_get\_head\_catalog\_no\_token * Move related project information into main doc * Move ec2 credential policies to DocumentedRuleDefault * Return 400 when trying to create trust with ambiguous role name * Reorganised keystone documentation structure * Updated the keystone docs to follow the docs theme * Fix PCI DSS docs on change\_password\_after\_first\_use * Add HEAD API to auth * Add HEAD APIs to federated API * Ensure the trust API supports HEAD requests * Ensure oauth API supports HEAD * Ensure the endpoint policy API supports HEAD * Improve handling of database migration checks * Updated from global requirements * Check log output rather than emitting in tests * Ensure HEAD is supported with simple cert * Ensure the ec2 API supports HEAD * Ensure the endpoint filter API supports HEAD * Move domain config to DocumentedRuleDefault * Add HEAD API to domain config * Updated from global requirements * Move grant policies to DocumentedRuleDefault * Move role policies to DocumentedRuleDefault 12.0.0.0b2 ---------- * Use DocumentedRuleDefault for token operations * Remove the local tempest plugin * Add response example in authenticate-v3.inc * Addition of "type" optional attribute to list credentials * Remove keystone.conf if not used * Updated from global requirements * Remove assertRaisesRegexp testing function * Update DirectMappingError in keystone.exception * Remove dependency requires if not used * Add role test to test\_consume\_trust\_once in test\_v3\_auth.py * Writing API & Scenario Tests docs * Handle group NotFound in effective assignment list * Updated from global requirements * Update doctor warning about caching * Basic overview of tempest and devstack plugins * Updated from global requirements * Updated from global requirements * Don't need to contruct data if not need persistence * Fix response body of getting role inference rule * Quotation marks should be included in http url using curl * Updated from global requirements * Replace test.attr with decorators.attr * Update test case for federation * Support new hashing algorithms for securely storing password hashes * Remove loading drivers outside of their expected namespaces * Change LDAPServerConnectionError * Error api about grant collections in policy\_mapping.rst * Updated from global requirements * Handle NotFound when listing role assignments for deleted users * Update sample configuration file for Pike * Change url scheme passed to oauth signature verifier * Updated from global requirements * Role name is unique within the owning domain * Remove LDAP delete logic and associated tests * Revert change 438035 is\_admin\_project default * Trivial fix typo in doc * Fix misnamed variable in config * Change url passed to oauth signature verifier to request url * Expose a bug in domain creation from idps * Role name is unique within the owning domain * Refactor is\_admin * Update fail message to test\_database\_conflicts * Fix keystone.tests.unit.test\_v3\_oauth1.MaliciousOAuth1Tests * Test config option 'user\_enabled\_default' with string type value * Stop using oslotest.mockpatch * Remove X-Auth-Token from response parameters * Fix test\_minimum\_password\_age\_and\_password\_expires\_days\_deactivated * Refactor Authorization: * Cleanup policy generation * Fix test keystone.tests.unit.test\_token\_bind.BindTest * Fix keystone.tests.unit.test\_backend\_ldap.LDAPIdentity * Remove test\_metadata\_invalid\_contact\_type * Update dead API spec links * override config option notification\_opt\_out with list * Add filter explain in api ref about parents\_as\_list and subtree\_as\_list * use '&' instead of '?' to connect parameters in url * Remove usage of enforce\_type * Revise doc about python 3.4 * Update Devstack plugin for uwsgi and mod\_proxy\_uwsgi * Add notes in inherit.inc * Do not fetch group assignments without groups * Readability enhancements to architecture doc * Add response examples to OS-OAUTH1 api documentation * Correct oauth create\_request\_token documentation * Remove unused CONF * Remove unused LOG * Move policy generator config to config-generator/ * Include sample policy file in documentation * Trivial Fix: fix typo in test comments * Move user policies to DocumentedRuleDefault * Explicitly set 'builders' option * Make flushing tokens more robust * Minor corrections in OS-OAUTH1 api documentation * Fix-test-of-assertValidRole * Small refactoring in tests development docs * Move endpoint group to DocumentedRuleDefault * Fix doc generation for python 3 12.0.0.0b1 ---------- * Updated from global requirements * Imported Translations from Zanata * Updated scope parameter description in v3 API-ref * Add Apache License Content in index.rst * Address comments from Policy in Code 5 * Remove unused revocation check in revoke\_models * Updated from global requirements * Remove unused code in test\_revoke * Move group policies to DocumentedRuleDefault * Move consumer to DocumentedRuleDefault * Move access token to DocumentedRuleDefault * Move mapping to DocumentedRuleDefault * Move role assignment to DocumentedRuleDefault * Move region policies to DocumentedRuleDefault * Move project endpoint to DocumentedRuleDefault * Remove unnecessary processing when deleting grant * Add sem-ver flag so pbr generates correct version * Move protocol to DocumentedRuleDefault * Move credential policies to DocumentedRuleDefault * Move policy association to DocumentedRuleDefault * Move and refactor test\_revoke\_by\_audit\_chain\_id * Move policy policies to DocumentedRuleDefault * Move and refactor project\_and\_user\_and\_role * Updated from global requirements * Move and refactor test\_by\_domain\_domain * Move and refactor test\_by\_domain\_project * Move and refactor test\_by\_domain\_user * Remove unused method \_sample\_data in test\_revoke * Refactor test\_revoke to call check\_token directly * Differentiate between dpkg and rpm for libssl-dev * Move auth to DocumentedRuleDefault * Move service policies to DocumentedRuleDefault * Remove unnecessary setUp function in testcase * Remove policy file from source and refactor tests * Remove revocation API dependency from identity API * Remove revocation API dependency from resource API * Move project policies to DocumentedRuleDefault * Replace wip with skip * Removed domain conflict guard in load\_fixtures * Updated from global requirements * Remove create\_container\_group from tests * Add charset to webob.Response * Move identity provider to DocumentedRuleDefault * Move endpoint policies to DocumentedRuleDefault * Move domain policies to DocumentedRuleDefault * Move service provider to DocumentedRuleDefault * Add policy sample generation * Removed the deprecated pki\_setup command * Reduce fixture setup in test\_backend\_ldap * Consolidate and cleanup test\_backend\_ldap setup * Remove conflict guards in load\_fixtures * Remove orphaned \_create\_context test helper * Remove decorator for asserting validation errors * Remove orphaned AuthTestMixin from test\_v3 * Move revoke events to DocumentedRuleDefault * Doc db\_sync --expand incurring downtime in upgrades to Newton * Fix some reST field lists in docstrings * Remove log translations in keystone * Move release note from /keystone/releasenotes to /releasenotes * Small fixes for WebOb 1.7 compatibiltity * Error messages are not translating with locale * Add a note to db\_sync configuration section * Remove unused revoke\_by\_domain\_role\_assignment * Remove unused revoke\_by\_project\_role\_assignment * Remove unnecessary revocation events revoke grant * Remove unnecessary revocation events * Remove unnecessary revocation events * Policy in code (part 5) * Policy in code (part 4) * Set the correct in-code policy for ec2 operations * Don't persist revocation events when deleting a role * Policy in code (part 3) * Policy in code (part 2) * Policy in code * Speed up check\_user\_in\_group for LDAP users * Don't persist rev event when deleting access token * Include the requested URL in authentication errors * Remove extra duplicate 'be' in description * Add group\_members\_are\_ids to whitelisted options * Use HostAddressOpt for opts that accept IP and hostnames * Remove x-subject-token in api-ref for v3/auth/catalog * Add reno conventions to developer documentation * Updated from global requirements * Fix description for 204 response * Updated from global requirements * Remove keystone.common.ldap * Fix the typo * Add in-code comment to clarify pattern in tests * Fix keystone.o.o URL * Test for fernet rotation recovery after disk full * API-ref return code fix * Updated from global requirements * Imported Translations from Zanata * Fix api-ref building with sphinx 1.5 * Change is\_admin\_project to False by default * Remove pbr warnerrors in favor of sphinx check * Move driver loading inside of dict * Minor cleanup from patch 429047 * Remove password\_expires\_ignore\_user\_ids * Remove unused variable * Revise conf param in releasenotes * Modify examples to use v3 URLs * Fix duplicate handling for user-specified IDs * Removing group role assignments results in overly broad revocation events * Typos in the LoadAuthPlugins note * Remove domains \*-log-\* from compile\_catalog * Add instruction to restart apache * Exchange cURL examples for openstackclient * Updated from global requirements * Remove x-subject-token in api-ref for v3/auth/{projects,domains} * Exclusively use restore\_padding method in unpacking fernet tokens * Remove EndpointFilterCatalog * Give a prospective removal date for all v2 APIs * Fix some typo in releasenotes * Correct and enhance OpenId Connect docs * Imported Translations from Zanata * Correct and enhance Mellon federation docs * Clear the project ID from user information * Fix MFA rule checks for LDAP auth * Fix v2 role create schema validation * Update reno for stable/ocata * Fix the s3tokens endpoint * Stop reading local config dirs for domain-specific file config driver * Fix typo in config doc * Updated from global requirements * Fix example response formatting * Rename protocol cascade delete migration file * Remove logging import unused * Address db\_sync check against new install * Deprecate (and slate for removal) UUID tokens * Remove the file encoding which is unnecessary * Correct some typo errors * Federated mapping doc improvements * Include 'token' in the method list for federated scoped tokens * Add --check to keystone-manage db\_sync command * Deprecate (and emit message) AdminTokenAuthMiddleware * Use ostestr instead of the custom pretty\_tox.sh * Fix multiple uuid warnings with pycadf * Add unit test for db\_sync run out of order * Fixed warning when building keystone docs * Ensure migration file names are unique to avoid caching errors * use the correct bp link for shadow-mapping rel note * Readability/Typo Fixes in Release Notes * Remove unused api parameters * Make use of Dict-base including extras explicit * Add placeholder migrations for Ocata * Update hacking version * Use httplib constants for http status codes * Renaming of api parameters * Remove KVS code 11.0.0 ------ * Modify the spelling mistakes * Stop reading local config dirs for domain-specific SQL config driver * Prepare for using standard python tests * update keystone.conf.sample for ocata-rc * Add MFA Rules Release Note * Remove de-dupe for MFA Rule parsing * Add comment to clarify resource-options jsonschema * Cleanup TODO, AuthContext and AuthInfo to auth.core * Cleanup TODO about auth.controller code moved to core * Add validation that token method isn't needed in MFARules * Add validation for mfa rule validator (storage) * Process and validate auth methods against MFA rules * Update endpoint api for optional region\_id * No need to enable infer\_roles setting * Fix bad error message from FernetUtils * Use https for docs.openstack.org references * Update PCI documenation * Auth Plugins pass data back via AuthHandlerResponse * Auth Method Handlers now return a response object always * Add MFA Rules and Enabled User options * cleanup release notes from PCI options * Create user option \`ignore\_lockout\_failure\_attempts\` * Implement better validation for resource options * Deprecate [security\_compliance]\password\_expires\_ignore\_user\_ids * Fixes deprecations caused by latest oslo.context * PCI-DSS Force users to change password upon first use * clean up release notes for ocata * Reuse already existing groups from upstream tempest config * add additional deprecation warnings for KVS options * Address follow-up comments from previous patchset * Cleanup for resource-specific options * Adds tests showing how mapping locals are handled 11.0.0.0b3 ---------- * Add 'options' as an explicit user schema validation * Code-Defined Resource-specific Options * Set the domain for federated users * Refactor shadow users tests * Add domain\_id to the user table * Do not call \`to\_dict\` outside of a session context * Remove code supporting moving resources between domains * Change unit test class to a less generic name * Remove dogpile.core dependencies * Verbose breakup of method into seperate methods * Fixed unraised exception in \_disallow\_write for LDAP * Add password expiration queries for PCI-DSS * Add missing parentheses * Add queries for federated attributes in list\_users * update entry points related to paste middleware * Remove LDAP write support * Remove releated role\_tree\_dn test * Add warning about using \`external\` with federation * Allow user to change own expired password * Fix warnings generated by os-api-ref 1.2.0 * Improvements to external auth documentation page * Test cross domain authentication via implied roles * Updates to project mapping documentation * Add documentation for auto-provisioning * Implement federated auto-provisioning * Fix typo in main docs page * switch @hybrid\_property to @property * Catch potential SyntaxError in federation mapping * Fix typo in shibboleth federation docs * Handling of 'region' parameter as None * Corrected punctuation on multiple exceptions * Exclude 'keystone\_tempest\_plugin' in doc build * Force use of AuthContext object in .authentcate() * Cascade delete federated\_user fk * update sample config for ocata release * Drop type in filters * Add DB operations tracing * fix broken links * Changed 'Driver' reference to 'TokenDriverBase' * Fix keystone-manage mapping\_engine tester * Add anonymous bind to get\_connection method * Set connection timeout for LDAP configuration * Invalid parameter name on interface * Bump API version and date * listing revoke events should be admin only * Adds projects mapping to the mapping engine * Updated docstring for test\_sql\_upgrade.py * Use public interfaces of pep8 for hacking * [api-ref] Clean up OS-EP-FILTER association docs * Remove comment from previous migration * [api-ref] Clean up OS-EP-FILTER documentation * Fixed not in toctree warnings when building docs * Remove stevedore warning when building docs * Update docs to require domain\_id when registering Identity Providers * Retry on deadlock Transactions in backend * Fix region\_id responses and requests to be consistent * Remove endpoint\_id parameter from EP-FILTER docs * [api] fix ep filter example * Require domain\_id when registering Identity Providers * Fix minor typo * Remove references to Python 3.4 * Improve assertion in test * Use assertGreater(len(x), y) instead of assertTrue(len(x) > y) * Correct invalid rst in api docs * Fixed 7 tests running twice in v3 identity * Fix issues with keystone-dsvm-py35-functional-v3-only on py35 * Fix the usage of tempest.client.Manager class * Correct timestamp format in token responses * Remove unused exceptions from CADF notifications * Minor improvement in test\_user\_id\_persistence * Remove CONF.domain\_id\_immutable * Fix test function name with two underscores to have only one * Updated from global requirements * Fix import ordering in tempest plugins * [api] Inconsistency between v3 API and keystone token timestamps * Federated authentication via ECP functional tests * Removes unnecessary utf-8 encoding * Handle disk write failure when doing Fernet key rotation * Fix cloud\_admin rule and ensure only project tokens can be cloud admin * Updated from global requirements * Remove duplicate role assignment in federated setup * Remove unused variables from federation tests * Remove unused variables from unit test method * Add reason to CADF notifications in docs * [doc] point release note docs to project team guide * [api] set \`is\_admin\_project\` on tokens for admin project * Settings for test cases * Add reason to notifications for PCI-DSS * Fix typo in doc * fix one typo * Updated from global requirements * Wrap invalidation region to context-local cache * move common sql test helpers to base class * Use assertGreater(len(x), y) instead of assertTrue(len(x) > y) * replace assertTrue with assertIs 11.0.0.0b2 ---------- * Replace logging with oslo\_log * expose v3policy failure with is\_admin\_token * Add doctor checks for ldap symptoms * Implement password requirements API * Fix a typo in comment * Add unit tests for doctor token\_fernet symptoms * Remove impossible case from \_option\_dict method * Make \_option\_dict() a method for domain\_config\_api * Add unit tests for doctor tokens symptoms * Add checks for doctor credential symptoms * Make user to nonlocal\_user a 1:1 relationship * Add id to conflict error if caused by duplicate id * Refactors \_get\_names\_from\_role\_assignments * Do not manually remove /etc/shibboleth folder * API Documentation for user password expires * Revert "API Documentation for user password expires" * API Documentation for user password expires * Clean up keystone doc landing page * Add doctor tests on security\_compliance and rename * Fix typo in api-ref doc * Move V2TokenDataHelper to the v2.0 controller * Remove exception from v2 validation path * Make bootstrap idempotent when it needs to be * Add unit tests for doctor's database symptoms * Print name with duplicate error on user creation * Expose idempotency issue with bootstrap * Print domain name in mapping\_populate error message * Correct missspellings of secret * Trivial indentation corrections in mappings doc * Add doctor check for debug mode enabled * Fixed multiple warnings in tox -edocs * Get assignments with names honors inheritance flag * Updated from global requirements * Add test to expose bug 1625230 * Invalidate token cache after token delete * Revert "Rename doctor symptom in security\_compliance" * Domain included for role in list\_role\_assignment * api-ref update for roles assignments with names * Rename doctor symptom in security\_compliance * Corrects sample-data incorrect credential call * Correct minor issues in test schema * Add unit tests for doctor federation file * Remove CONF.os\_inherit.enabled * Add unit tests for doctor's caching symptoms * Updated from global requirements * Updated from global requirements * More info in schema validation error * Minor fix in role\_assignments api-ref * Include mapped in the default auth methods * Validate token issue input * Removes unused exceptions * Removes unused method from assignment core * Removes unused default\_assignment\_driver method * Removed unused EXTENSION\_TO\_ADD test declarations * Use sha512.hash() instead of .encrypt() * Don't invalidate all user tokens of roleless group * Upload service provider metadata to testshib * Updated from global requirements * SAML federation docs refer to old WSGIScriptAlias * cache\_on\_issue default to true * Make try/except work for passlib 1.6 and 1.7 * Document token header in federation auth response * Refactor Keystone admin-tokens and admin-users v2 * ignore deprecation warning for .encrypt() * Send the identity.deleted.role\_assignment after the deletion * Allow fetching an expired token * Show team and repo badges on README * Remove eventlet-related call to sleep * Add a comment about not using assertTrue * clean up developer docs * Improvements in error messages * Remove trailing "d" from -days param of OpenSSL command * Swap the notification formats in the docs * Normalizes use of ForbiddenAction in trusts * Enable CADF notification format by default * Remove unused statements in matches * Fix doc example * Remove extension and auth\_token middleware docs * Move docs from key\_terms to architecture * move content from configuringservices to configuration * Update configuration.rst documentation * Verbose 401/403 debug responses * Fix the misspelling in \`keystone/tests/unit/test\_cli.py\` * refactor notification test to work with either format * Clarify the v2.0 validation path * Remove metadata from token provider * Lockout ignore user list * Add developer docs for keystone-manage doctor * [api] add changelog from 3.0 -> 3.7 * Devstack plugin to federate with testshib.org * Remove entry\_points to non-existent drivers * Fix typo in doc 11.0.0.0b1 ---------- * remove release note about LDAP write removal * Change "Change User Password" request example * Fixes remaining nits in endpoint\_policy tests * Remove reference to future removal of saml * Limits config fixture usage to where it's needed * Updated from global requirements * Remove format\_token method * Remove issue\_v3\_token in favor of issue\_token * Remove issue\_v2\_token * refactor the token controller * Use issue\_v3\_token instead of issue\_v2\_token * Updates to the architecture doc * Support nested groups in Active Directory * Add healthcheck middleware to pipelines * Request cache should not update context * Change cfg.set\_defaults into cors.set\_defaults * Updated from global requirements * Updated from global requirements * Doc warning for keystone db migration * Wording error in upgrading documentation * Updated from global requirements * fix credentials backend tests * Allow running expand & migrate at the same time * Add test cases for passing "None" as a hint * Fix test\_revoke to run all tests after pki removal * Updated from global requirements * Switch fernet to be the default token provider * Remove support for PKI and PKIz tokens * Doc the difference between memcache and cache * Doctor ldap check fix for config files * Additional logging when authenticating * Document OS-SIMPLE-CERT Routes * Document v2 Revoked Token Route * Add api-ref /auth/tokens/OS-PKI/revoked (v3) * Fix broken links in the docs * Add structure for Devstack plugin * Add bindep environment to tox * Pass a request to controllers instead of a context * Create default role as a part of bootstrap * Updated from global requirements * Don't deprecate the LDAP property which is still needed * Clarifying on the remove of \`build\_auth\_context\` middleware * log.error use \_ of i18n * Doctor check for LDAP domain specific configs * Updated from global requirements * Updated from global requirements * Validate mapping exists when creating/updating a protocol * Remove new\_id() in test\_revoke * Adds warning when no domain configs were uploaded * Add release note for fernet tokens * Tweak api-ref doc for v3 roles * Tweak api-ref doc for v3 roles status codes * Reorder APIs in api-ref for v3 groups * [api-ref] Remove the duplicated sample * Follow-on of memcache token persistence removal * changed domain id to name in JSON request * More configuration doc edits * Remove backend dependencies from token provider * Updated from global requirements * [api-ref] Fix couple of issues on OS-INHERIT API * Code cleanup * Replace tenant with project for keystone catalog * Imported Translations from Zanata * Update, correct, and enhance federation docs * Invalidate trust when the related project is deleted * Remove unused arg(project and initiator) * Drop MANIFEST.in - it's not needed by pbr * Ignore unknown arguments to fetch\_token * Return password\_expires\_at during auth * Move the token abstract base class out of core * Add is\_admin\_project to policy dict * Fix a typo in token\_formatters.py * Improve check\_token validation performance * Add revocation event indexes * Add docs for PCI-DSS * Invalidate trust when the trustor or trustee is deleted * Updated from global requirements * [api] add a note about project name restrictions * One validate method to rule them all.. * Simplify the KeystoneToken model * Remove validate\_v2\_token() method * [api] remove \`user\_id\` and \`project\_id\` from policy * Remove the decorator where it's not applied * Optimize remove unused variable * Remove those redundant variable declaration * [doc] Correct mapping JSON example * Remove no use variable (domain\_id) * Remove redundant variable declaration * Deprecate \`endpoint\_filter.sql\` backend * remove deprecated \`[endpoint\_policy] enable\` option * Pass initiator to Manager as a kwarg * create release notes for removed functionality * Remove driver version specifiers from tests * Enable release notes translation * Remove driver version from identity backend test names * Remove driver version from docs * Updated from global requirements * Default the assignment backend to SQL * remove legacy driver tox target * Use validate\_v3\_token instead of validate\_token * Ensure all v2.0 tokens are validated the same way * Make sure all v3 tokens are validated the same way * re-add valid comment about None domain ID * Default the resource backend to SQL * Make returning is\_domain conditional * Move audit initiator creation to request * Don't validate token expiry in the persistence backend * Add tests for validating expired tokens * Fix a typo in \_init\_.py * Remove password history validation from admin password resets * Updating the document regarding LDAP options * Updated from global requirements * Remove the unused sdx doc files * Updated from global requirements * Remove the no use arg (auth=None) * Fix typo in docstring * Tweak api-ref for v3 groups status codes * Updated from global requirements * Add Apache 2.0 license to source file * Fix a typo in core.py and bp-domain-config-default-82e42d946ee7cb43.yaml * Validate password history for self-service password changes * Make test\_v3\_auth exercise the whole API * Remove stable driver interfaces * Updated from global requirements * Remove the check for admin token in build\_auth\_context middleware * Reorder APIs in api-ref doc for v3 users * Fix a docstring typo in test\_v3\_resource.py * Using assertIsNone(...) instead of assertIs(None, ...) * Updated from global requirements * remove deprecated items from contrib * Update man page for Ocata release version and date * Using assertIsNone() instead of assertIs(None) * Remove default=None when set value in config * Undeprecate options used for signing * Remove unused path in the v2 token controller * Fix the belongsTo query parameter * Fix 'API Specification for Endpoint Filtering' broken link * Add domain check in domain-specific role implication * Override credential key repository for null key tests * Remove useless method override * remove memcache token persistence backends * remove keystone/service.py * remove saml2 auth plugin * remove httpd/keystone.py * remove cache backends * Revert "Allow compatibility with keystonemiddleware 4.0.0" * Consolidate the common code into one method * Handle the exception from creating request token properly * Fix formatting strings in LOG.debug * Fix formatting strings in LOG.warning * Handle the exception from creating access token properly * Updated from global requirements * Tweak status code in api-ref doc for v3 users * Fix prameters names in Keystone API v2-ext * Refactor Keystone admin-tenant API v2 * Refactor Keystone admin-endpoint API * Fix for unindent warning in doc build * add placeholder migrations for newton * Remove default=None for config options * Ensure the sqla-migrate scripts cache is cleared * Move test\_sql\_upgrade.MigrationRepository into keystone.common * Rename sql.migration\_helpers to sql.upgrades * Give domain admin rights to domain specific implied roles * Update reno for stable/newton * Refactor find\_migrate\_repo(): require caller to specify repo * Fixes password created\_at errors due to the server\_default * Move the responsibility for stdout to the CLI module * Use a read-only DB session to retrieve schema version * Move rolling upgrade repo names into constants 10.0.0.0rc1 ----------- * Removal of imports within functions * Trivial fixes in the ldap common functions * Test that rolling upgrade repos are in lockstep * Add unit tests for isotime() * Remove unused \_convert\_to\_integers() method * Adds tests for verify\_length\_and\_trunc\_password() * Remove unused read\_cached\_file method from utils * Allow compatibility with keystonemiddleware 4.0.0 * Fix links on configure\_federation documentation * Add edge case tests for disabling a trustee * Fix prameters name and response codes in Keystone API v2 * Tweak api-ref doc for services/endpoints * Use issued\_at in fernet token provider * Remove unused method from keystone.common.utils * Use ConfigParser instead of SafeConfigParser * Consistently round down timestamps * Remove the APIs from doc that is not supported yet * TrivialFix: Merge imports in code * Fix the nit on how to deploy keystone with \`mod\_proxy\_uwsgi\` * Tweak api-ref doc for projects * Remove the dead link in schema migration doc * Updated from global requirements * Fix order of arguments in assertIs * New notes on advanced upgrade/fallback for cluster * standardize release note page ordering * [api-ref] Correct response code status * Replace six iteration methods with standard ones * Fixes a nit in a comment * Updates configuration doc with latest changes * Use freezegun for change password tests * Update sample keystone.conf for Newton * Project domain must match role domain for assignment * Add docs for the null key * Log warning if null key is used for encryption * Introduce null key for credential encryption * More nit doc fixes * Keep the order of passwords in tests * EndpointPolicy driver doesn't inherit interface * [api-ref] Stop supporting os-api-ref 1.0.0 * Fix up some doc nits * Only cache callables in the base manager * [api-ref] Correcting parameter's type * Correct link type * Fix problems in service api doc * Raise NotImplementedError instead of NotImplemented * Add the deprecated\_since to deprecated options * Add doctor checks for credential fernet keys * Few new commands missing from docs * Emit log message for fernet tokens only * Implement encryption of credentials at rest * Typo: key\_manger\_factory to key\_mangler\_factory 10.0.0.0b3 ---------- * Fixes spelling mistakes * Fixes migration where password created\_at is nullable * Block global roles implying domain specific roles * Correct typo in mapping\_populate command's help * Relax the requirement for mappings to result in group memberships * Document credential encryption * Update sample uwsgi config for lazy-apps * Add documentation on how to set a user's tenant * Pre-cache new tokens * Config logABug feature for Keystone api-ref * Fix nits in db migration dev docs * Disallow new migrations in the legacy migration repository * Updated from global requirements * Update developer docs for new rolling upgrade repos * Add man page info for credential setup command * Remove unnecessary try/except from token provider * Fixes small grammar mistake in docstring * Add a feature support matrix for identity sources * Fix wrong response codes in 'groups' APIs * Make token\_id a required parameter in v3\_to\_v2\_token * Distributed cache namespace to invalidate regions * Fix formatting strings when using multiple variables * Add credential setup command * Add Response Example for 'Create credential' API * Add Response Example for 'Passwd auth with unscoped authorization' * Remove mapping schema from the doc * Impose a min and a max on time values in CONF.token * Repair link in Keystone documentation * Faster id mapping lookup * Fix some typos in comments * Cleaning imports in code * Updated from global requirements * TrivialFix: Remove logging import unused * Removes old, unused code * Reduce log level of Fernet key count message * Updated from global requirements * Adds password regular expression checks to doctor * Let upgrade tests control all 4 repositories at once * Adds check that minimum password age is less than password expires days * Remove unused global variable from unit tests * Modify sql banned operations for each of the new repos * Use egg form of osprofiler in paste pipeline * api-ref: Splitting status lines in API v3-ext * api-ref: Splitting status lines in API v3 * Remove mox from test-requirements * TrivialFix: Remove logging import unused * [api-ref]: Outdated link reference * Remove unnecessary \_\_init\_\_ * Add mapping\_populate command * Doc fix: license rendered in published doc * Doc fix: "keystone-manage upgrade" is not a thing * Fix credential update to ec2 type * Add key repository uniqueness check to doctor * Update \`href\` for keystone extensions * Updated from global requirements * Fix the wrong URI for the OAuth1 extension in api-ref * Shadowing a nonlocal\_user incorrectly creates a local\_user * Add entrypoint for mapped auth method * Get ready for os-api-ref sphinx theme change * Add rolling upgrade documentation * Add create and update methods to credential Manager * Create a fernet credential provider * Make KeyRepository shareable * Add conf to support credential encryption * Password expires ignore user list * Add expand, data migration and contract logic to keystone-manage * [api] add relationship links to v3-ext * Removes use of freezegun in test\_auth tests * Removes a redundant test from FernetAuthWithTrust * api-ref: Fix parameters attributes * Set default value for [saml]/idp\_contact\_surname * Tidy up for late-breaking review comments on keystone-manage * PCI-DSS Minimum password age requirements * api-ref: Document domain specific roles * Revert "Add debug logging to revocation event checking" * Replace the content type with correct one * Add credential encryption exception * Pass key\_repository and max\_active\_keys to FernetUtils * Make a FernetUtils class * Move fernet utils into keystone/common/ * Add support for rolling upgrades to keystone-manage * api-ref: Document implied roles API * Support new osprofiler API * api-ref: Correcting V3 OS-INHERIT APIs * Fix typo in the file * Add debug logging to revocation event checking * Detail Federation Service Provider APIs in api-ref * Detail Fed Projects and Domains APIs in api-ref * add a header for the federation APIs * Detail Federation Mapping APIs in api-ref docs * Detail Federation Auth APIs in api-ref docs * Detail Federation Assertion APIs in api-ref docs * Move other-requirements.txt to bindep.txt * Detail IdP APIs in api-ref docs * api-ref: Add default domain config documentation * Constraints are ready to be used for tox.ini * Updated from global requirements * [api] add relationship links to v3 * Refactor revoke matcher * Document get auth/catalog,projects,domains * api-ref: Renaming parameters of V3-ext APIs * api-ref: Correcting V3 Credentials APIs * api-ref: Correcting V3 Policies APIs * api-ref: Correcting V3 Authentication APIs * api-ref: Correcting V3 Domain config APIs * Use international logging message * Updates Development Environment Docs * Create unit tests for endpoint policy drivers * api-ref: Add query options to GET /projects API documentation * Updated from global requirements * api-ref: Add missing parameter tables to tenant * Create unit tests for the policy drivers * api-ref: Correcting V3 Endpoints APIs * api-ref: Correcting V3 Services APIs * api-ref: Add "nocatalog" option to GET /v3/auth/tokens * Fix warning when running tox -e api-ref * Add basic upgrade documentation * Document query option (is\_domain) for projects * remove test utilities related to adding extensions * Update etc/keystone.conf.sample * Make hash\_algorithms order deterministic * PCI-DSS Password expires validation * Report v2.0 as deprecated in version discovery * Update the api-ref to mark the v2 API as deprecated * Add schema validation to create user v2 * Fix the spelling of a test name * Remove mention of db\_sync per backend * Trust controller refactoring * Use more specific asserts in tests * Updated from global requirements * Add debug logging for RevokeEvent deserialize problem * Make all token provider behave the same with trusts * Use URIOpt for endpoint URL options * Clean up the introductory text in the docs * Retry revocation on MySQL deadlock * Add schema validation to update user v2 * PCI-DSS Lockout requirements * Improve domain configuration API docs * Skip middleware request processing for admin token * Move Assertion API to its own file * Bump API version number and date * Move Federation Auth API to its own file * Move List Projects and Domains API to its own file * Move Service Provider API to its own file * Move Mapping API to its own file * Use %()d for integer substitution * Don't include openstack/common in flake8 exclude list * Added postgresql libs to developer docs * Add schema validation to create service in v2 * Remove the redundant verification in OAuth1 authorization * Add schema validation to v2 update tenant * refactor idp to its own file * Updated from global requirements * PCI-DSS Password history requirements * Move Identity Provider API to its own file * Add dummy domain\_id column to cached role * Allow attributes other than \`enabled\` in schema * Remove the extensions repos * Document the domain config API as stable * Remove configuration references to eventlet * Adds a custom deepcopy handler * Add token feature support matrix to documentation * Test number of queries on list\_users * No need the redundant validation in manager level * Add the missing testcases for \`name\` and \`enabled\` * Adds test for SecurityError's translation behavior * TOTP auth not functional in python3 * Invalid tls\_req\_cert constant as default * Add schema validation to v2 create tenant * Use quotes consistently in token controller * Add performance tuning documentation * Allow V2TestCase to be tested against fernet and uuid * Make AuthWithTrust testable against uuid and fernet * Improve os-federation docs * Fix v2-ext API enabled documentation * PCI-DSS Adds password\_expires\_at to API docs * Make it so federated tokens are validated on v2.0 * Use freezegun in AssignmentInheritanceTestCase * Only run KvsTokenCacheInvalidation against uuid * Use freezegun in OSRevokeTests * refactor: make TestFetchRevocationList test uuid * refactor: make TestAuthExternalDefaultDomain test uuid/pki/pkiz * refactor: make TestAuthKerberos test pki/pkiz/uuid * Add schema validation to create role * Replace OpenStack LLC with OpenStack Foundation * refactor: inherit AuthWithRemoteUser for other providers * Run AuthWithToken against all token providers * Don't run TokenCacheInvalidation with Fernet * Refactor TestAuthExternalDomain to not inherit tests * Use freezegun to increment clock in test\_v3\_assignment * Add schema for enabling a user * Fix up the api-ref request/response parameters for projects * \`password\` is not required for updating a user * Clarify V2 API for enabling or disabling user * Removed duplicate parameter in v2-admin api-ref * Fix the errors in params in api-ref for V3 region * Fix the errors in params in api-ref for V3 user * Added cache for id mapping manager * Updated from global requirements * Add Python 3.5 classifier * Handle Py35 fix of ast.node.col\_offset bug * deprecate a few more LDAP config options * Clean up api-ref for domains * keystone-manage doctor * v2 api: add APIs for setting a user's password * Update os-inherit API reference * Updated from global requirements * Run AuthTokenTests against fernet and uuid * Use freezegun to increment the clock in test\_v3\_filters * Prevent error when duplicate mapping is created * Fix the wrong check condition * Clean up the api-ref for groups * Updated from global requirements * Improve introdcution to api-ref projects * Migrate OS-FEDERATION from specs repo * v2 api: remove APIs for global roles * v2 api: group and order the v2-ext APIs * v2 api: remove duplicated delete user API * v2 api: add missing /roles in role CRUD APIs * v2 api: list user roles is defined twice * v2 api: add OS-KSADM to service API routes * v2 api: add tenant APIs * v2 api: delete user is defined twice * v2 api: change update user * v2 api: correct user list * Update Identity endpoint in v2 samples * Fix up numerous errors in params in api-ref for roles * Fix up the api-ref for role query paramaters * Fix the username value in federated tokens * Improve readability of the api-ref roles section * Use constraints for coverage job * clean up OAUTH API * Add relationship links to OAUTH APIs * Remove \`name\` property from \`endpoint\` create/update API * Add v2.0 /endpoints/ api-ref * Update identity endpoint in v3 and v3-ext samples * Pass request to v2 token authenticate * Remove unused context from AuthInfo * Correct normal response codes for v2.0 extensions * Improve user experience involving token flush * Add "v2 overview" docs to APIs * add OS-OAUTH1/authorize/{request\_token\_id} API * Move OS-INHERIT api-ref from extensions to core * re-order the oauth APIs * Copy the preamble / summary of OAuth1 from the specs repo * Correct normal response codes in trust documentation * Add OS-EP-FILTER to api-ref 10.0.0.0b2 ---------- * PCI-DSS Password strength requirements * Variables in URL path should be required * Remove get\_trust\_id\_for\_request function * Pass request to normalize\_domain\_id * Remove a validate\_token\_bind call * Remove get\_user\_id in trust controller * Cleanup trusts controller * Trivial spacing and comma corrections * Add OS-KSCRUD api-ref * Disable warnerrors in setup.cfg temporarily * Add is\_domain to project example responses * Add is\_domain to scope token response examples * Improve keystone.conf [security\_compliance] documentation * Improve keystone.conf [signing] documentation * Correct normal response codes in OS-INHERIT docs * Fix python{3,}-all-dev depends in deb based * Correct normal status codes for v2.0 admin docs * Improve keystone.conf [shadow\_users] documentation * Correct normal response codes for region docs * Correct normal response codes for auth docs * Correct normal response codes for credential docs * Correct normal response codes for project docs * Correct normal response codes for policy docs * Correct normal response codes for v2.0 versions doc * Correct normal response codes in v2.0 versions doc * Correct normal response codes in v2.0 tenant docs * Use URIOpt instead of StrOpt for SAML config * Correct normal response codes for role docs * Correct normal response codes in v2.0 token docs * Correct normal response codes in service catalog doc * Correct normal response codes in oauth docs * Correct normal response codes in v2.0 admin user docs * Improve keystone.conf [token] documentation * Correct normal response codes in endpoint policy docs * Validate SAML keyfile & certfile options * Improve keystone.conf [tokenless\_auth] documentation * Complete OS-TRUST API documentation * Fixes response codes in endpoint policy api-ref * List 20X status codes as Normal in domain docs * Improve the API documentation for groups * Create APIs for OS-REVOKE * Clean up token binding validation code * Reorder request params in endpoint policy api-ref * Adds missing parameter to endpoint policy api-ref * Adds missing docs to endpoint policy api-ref * Reorders API calls to match precedence rules * Improve keystone.conf [saml] documentation * Handle more auth information via context * Require auth\_context middleware in the pipeline * Updated from global requirements * Improve keystone.conf [trust] documentation * Improve keystone.conf [role] documentation * Improve keystone.conf [ldap] documentation * Improve keystone.conf [os\_inherit] documentation * Improve keystone.conf [revoke] documentation * Improve keystone.conf [resource] documentation * Move logic for catalog driver differences to manager * Minor docstring cleanup for domain\_id mapping * Remove unnecessary stable attribute value for status * Updated from global requirements * Mark the domain config via API as stable * Remove validated decorator * Move request validation inline * Invalidate token cache on domain disablement * Isolate token caching into its own region * Doc update on enabled external auth and federation * keystone recommend deprecated memcache backend * Use request object in policy enforcement * Use the context's is\_admin property * Add the oslo\_context to the environment and request * Use http\_client constants instead of hardcoding * Increase test coverage for token APIs * Ensure status code is always passed as int * Fix fernet token validate for disabled domains/trusts * Doc update for moving abstract base classes out of core * Fix \_populate\_token\_dates method signature * Move the trust abstract base class out of core * Move the credential abstract base class out of core * Move the auth plugins abstract base class out of core * Expose bug with Fernet tokens and trusts * Remove last parts of query\_string from context * Remove get\_auth\_context * Correct reraising of exception * Pass request to build\_driver\_hints * Remove headers from context * Use request.environ through auth and federation * Remove accept\_header from context * Fixed a Typo * Docs: Fix the query params in role\_assignments example * [doc/api]Remove space within word * Remove unused LOG * Make assert\_admin work with a request * Add missing preamble for v3 and v3-ext * move OAUTH1 API to extensions * generate separate index files for each api-ref * Migrate identity /v2-admin docs from api-ref repo * Use request instead of context in v2 auth * Handle catalog backends that don't support all functions * Refactoring: remove the duplicate method * Return \`revoked\_at\` for list revoke events * Use skip\_test\_overrides everywhere we feature skip * Improve keystone.conf [fernet\_tokens] documentation * Improve keystone.conf [catalog] documentation * Refactor: [ldap] suffix should not be an instance attribute * Grammar fix: will -> can * Fixes hacking's handling of log hints * Improve keystone.conf [paste\_deploy] documentation * Improve keystone.conf [kvs] documentation * Improve keystone.conf [identity] documentation * Improve keystone.conf [endpoint\_filter] documentation * Improve keystone.conf [oauth1] documentation * Verify domain\_id when get\_domain is being called * Updated from global requirements * Include doc directory in pep8 checks * Do not register options on import * Improve keystone.conf [policy] documentation * Improve keystone.conf [memcache] documentation * Use min to avoid checking < 1 max fernet keys * Improve keystone.conf [identity\_mapping] documentation * Improve keystone.conf [federation] documentation * Updated tests that claimed to be blocked by bugs * Use skip\_test\_overrides in test\_backend\_ldap * Adds a skip method to identify useless skips * Update the nosetests test regex for legacy tests * update a config option deprecation message * Improve keystone.conf [eventlet\_server] documentation * Improve keystone.conf [endpoint\_policy] documentation * Improve keystone.conf [credential] documentation * Improve keystone.conf [domain\_config] documentation * Rename [DEFAULT] keystone.conf module to keystone.conf.default * Improve keystone.conf [DEFAULT] documentation * Remove test\_backend\_ldap skips for missing tests * Removes duplicate ldap test setup * Extracted common ldap setup and use in the filter tests * Reduce domain specific config setup duplication * API Change Tutorial doc code modify * Update other-requirements for Xenial * Concrete role assignments for federated users * PCI-DSS Disable inactive users requirements * Migrate identity /v3-ext docs from api-ref repo * Migrate identity /v2-ext docs from api-ref repo * Migrate identity /v2 docs from api-ref repo * Use request.params instead of context['query\_string'] * Config: no need to set default=None * Do not spam the log with uncritical stacktraces * Improve keystone.conf [auth] documentation * Improve keystone.conf [assignment] documentation * Group test\_backend\_ldap skips for readability * Adds a backend test fixture * Remove unused test code * Moves auth plugin test setup closer to its use * Add security\_compliance group back to config * Fix nits related to the new keystone.conf package * Fixes failure when password is null * Allow auth plugins to be setup more than once * Removes outdate comment from a test * Replace keystone.common.config with keystone.conf package * Updated from global requirements * Fix a few spelling mistakes * Allow user to get themself and their domain * PCI-DSS Password SQL model changes * Fix argument order for assertEqual to (expected, observed) * Use the ldap fixture to simplify tests * Change the remaining conf setup to use the fixture * Reduce setup overhead in auth\_plugin tests * /services?name= API fails when using list\_limit * Updated from global requirements * Make sure to use InnoDB as the DB engine * Remove TestAuth * Move last few TestAuth tests to TokenAPITests * Move external auth and bind test to TokenAPITests * Refactor test\_validate\_v2\_scoped\_token\_with\_v3\_api * Remove test\_validate\_v2\_unscoped\_token\_with\_v3\_api * Move more project scoped token behavior to TokenAPITests * Validate impersonation in trust redelegation * Correct domain\_id and name constraint dropping * Integration tests cleanup * Use http\_proxy\_to\_wsgi from oslo.middleware * Use request object in auth plugins * Move cross domain/group/project auth tests * Move negative token tests to TokenAPITests * Move unscoped token test to TokenAPITests * Move negative domain scope test to TokenAPITests * Consolidate domain token tests into TokenAPITests * Move more project scoped behavior tests to TokenAPITests * Move project scoped catalog tests to TokenAPITests * Update driver versioning documentation * Move project scoped tests to TokenAPITests * Move TestAuth unscoped token tests to TokenAPITests * Add cache invalidation for service providers * Updated from global requirements * Add 'links' to implied roles response * Updated from global requirements * fix ldap delete\_user group member cleanup * exception sensitive cache/audit changes * Fix TOTP transient test failure * Change LocalUser sql model to eager loading * Shadow LDAP and custom driver users * Refactor shadow users * Fix ValidationError exception name in docstring * Add docstring to delete\_project * Updated from global requirements * Revert to caching fernet tokens the same way we do UUID * Honor ldap\_filter on filtered group list * Pass a request to controllers instead of a context * Update the keystone-manage man page options * clean up test\_resource\_uuid * Return 404 instead of 401 for tokens w/o roles * Updating sample configuration file * Revert "Install necessary files in etc/" * Keystone uwsgi performance tuning * Add caching config for federation * Updated from global requirements * Updating sample configuration file * Updating sample configuration file * Bootstrap: enable and reset password for existing users * PEP257: Ignore D203 because it was deprecated * Cache service providers on token validation * Refactor revoke\_model to remove circular dependency * Update man page for Newton release * Move stray notification options into config module * Adding role assignment lists unit tests * Add protocols integration tests * Add mapping rules integration tests * Add service providers integration tests * Imported Translations from Zanata * Updated from global requirements 10.0.0.0b1 ---------- * Simplify & fix configuration file copy in setup.cfg * Config settings to support PCI-DSS * Fix credentials\_factory method call * Allow domain admins to list users in groups with v3 policy * Updating sample configuration file * Updated from global requirements * Honor ldap\_filter on filtered user list * Install necessary files in etc/ * Replace revoke tree with linear search * Migrate identity /v3 docs from api-ref repo * Updated from global requirements * Add new functionality to @wip * remove deprecated revoke\_by\_expiration function * Isolate common ldap code to the identity backend * Updated from global requirements * Remove helper script for py34 * Include project\_id in the validation error on default project is domain * Add python 3 release note * Add comment to test case helper function * Add Python 3 classification * Py3 oauth tests * Enable py3 tests for test\_v3\_auth * make sure default\_project\_id is not domain on user creation and update * Let setup.py compile\_catalog process all language files * Fix broken link of federation docs * Add new line in keystone/common/request.py * Move identity.backends.sql model code to sql\_model.py * Add .mo files to MANIFEST.in * Replace context building with a request object * Enable py3 testing for Fernet token provider * Enable py3 for credential tests * reorganize mitaka release notes * enable ldap tests for py3 * Updated from global requirements * Add the validation rules when create token * Use PyLDAP instead of python-ldap * Fix config path for running wsgi in developer mode * Move the revoke abstract base class out of core * Updated from global requirements * Port test\_v2 unit test to Python 3 * Move the oauth1 abstract base class out of core * Drop the (unused) domain table * Don't set None for ldap.OPT\_X\_TLS\_CACERTFILE * Add API Change Tutorial * Deprecate keystone.common.kvs * Updating sample configuration file * Add is\_domain in token response * Switch to use \`new\_domain\_ref\` for testcases * Move the assignment abstract base class out of core * Add identity providers integration tests * Update documentation to remove keystone-all * Updating sample configuration file * Updated from global requirements * replace logging with oslo.log * Move the federation abstract base class out of core * Separate protocol schema * Updated from global requirements * Move the catalog abstract base class and common code out of core * Enhance federation group mapping validation * Add mapping validation tests * Fixes example in the mapping combinations docs * do not search file on real environment * Allow 'domain' property for local.group * Add conflict validation for idp update * Always add is\_admin\_project if admin project defined * Make keystone exit when fernet keys don't exist * Fix fernet audit ids for v2.0 * Revert "Revert "Unit test for checking cross-version migrations compatibility"" * Make all fixture project\_ids into uuids * Fixing D105, D203, and D205 PEP257 * Remove test\_invalid\_policy\_raises\_error * switch to tempest instead of deprecated tempest-lib * Move the resource abstract base class out of core * Correct RST syntax for a code block * Restructure policy abstract driver * Updated from global requirements * Add test for authentication when project and domain name clash * Fix doc build if git is absent * Restructure endpoint policy abstract driver * Clean up test\_receive\_identityId * Fix typos * Fixes incorrect deprecation warning for IdentityDriverV8 * Add other-requirements.txt * Fix D400 PEP257 * Imported Translations from Zanata * Updating sample configuration file * Customize config file location when run as wsgi app * Updated from global requirements * Updating sample configuration file * Updated from global requirements * Bump the required tox version to 2.3.1 * Add set\_config\_defaults() call to tests * update deprecation warning for falling back to default domain * Tests clean up global ldap settings * Define identity interface - easy cases * add missing deprecation reason for eventlet option * Remove comments mentioning eventlet * Remove support for generating ssl certs * Updating sample configuration file * Remove eventlet support * Default caching to on for request-local caching * Typo in sysctl command example Edit * Typo fix in tests * Add logging to cli if keystone.conf is not found * Fix post jobs * Refactor domain config upload * Keystone jobs should honor upper-constraints.txt * Fix confusing naming in ldap EnableEmuMixin * Updating sample configuration file * Deprecation reason for domain\_id\_immutable * Test list project hierarchy is correct for a large tree * Fix D401 PEP8 violation * OSprofiler release notes * Updating sample configuration file * Updated from global requirements * Add keystone service ID to observer audit * group federated identity docs together * Change Role/Region to role/region in keystone-manage bootstrap * Use mockpatch fixtures from fixtures * Set the values for the request\_local\_cache * Add missing backslash to keystone-manage bootstrap command in documentation * fix typo * Fix KeyError when rename to a name is already in use * Improve project name conflict message * Imported Translations from Zanata * Updating sample configuration file * Dev doc update for moving abstract base classes out of core * Simplify chained comparison * Update the description of the role driver option * Integrate OSprofiler in Keystone * Update the Administrator guide link * Clean up test case for shadow users * Fixes bug where the updated federated display\_name is not returned * Make AuthContext depend on auth\_token middleware * Fix totp test fails randomly 9.0.0 ----- * Update federated user display name with shadow\_users\_api * Update federated user display name with shadow\_users\_api * Remove comment from D202 rule * Remove backend interface and common code out of identity.core * Use messaging notifications transport instead of default * Run federation tests under Python 3 * Bandit test results * create a new \`advanced topics\` section in the docs 9.0.0.0rc2 ---------- * Correct \`role\_name\` constraint dropping * Correct \`role\_name\` constraint dropping * Base for keystone tempest plugin * Random project should return positive numbers * Imported Translations from Zanata * Improve error message for schema validation * Imported Translations from Zanata * The name can be just white character except project and user * Fix typos in Keystone files * Add \`patch\_cover\` to keystone * Fix keystone-manage config file path * Cleanup LDAP models * Correct test to support changing N release name * Correct \_populate\_default\_domain in tests * Imported Translations from Zanata * Removing redundant words * Imported Translations from Zanata * Correct test to support changing N release name * Fix keystone-manage config file path * Opportunistic testing with different DBs * Correct test\_implied\_roles\_fk\_on\_delete\_cascade * Fix table row counting SQL for MySQL and Postgresql * Switch migration tests to oslo.db DbTestCase * Correct test\_migrate\_data\_to\_local\_user\_and\_password\_tables * Fix test\_add\_int\_pkey\_to\_revocation\_event\_table for MySQL * Imported Translations from Zanata * Implement HEAD method for all v3 GET actions * Avoid name repetition in equality comparisons * Simplify repetitive unequal checks * Imported Translations from Zanata * Add test for domains list filtering and limiting * Imported Translations from Zanata * remove endpoint\_policy from contrib * Moved name formatting (clean) out of the driver * Add py3 debugging * Add release note for list\_limit support * Add release note for list\_limit support * Cleanup migration tests * Imported Translations from Zanata * Imported Translations from Zanata * Update dev docs and sample script for v3/bootstrap * add placeholder migrations for mitaka * Enables the notification tests in py3 * Update reno for stable/mitaka * Update .gitreview for stable/mitaka 9.0.0.0rc1 ---------- * Support \`id\` and \`enabled\` attributes when listing service providers * Check for already present user without inserting in Bootstrap * Mapping which yield no identities should result in ValidationError * Make backend filter testing more comprehensive * Move region configuration to a critical section * Change xrange to range for python3 compatibility * Remove reference to keystoneclient CLI * Document running in uwsgi proxied by apache * Updating sample configuration file * Imported Translations from Zanata * Correct Hints class filter documentation * Release note cleanup * Update reported version for Mitaka * Add docs for additional bootstrap endpoint parameters * Remove unused notification method and class * Consolidate @notifications.internal into Audit * Imported Translations from Zanata * Remove some translations * Imported Translations from Zanata * Fixed user in group participance * register the config generator default hook with the right name * Imported Translations from Zanata * Rename v2 token schema used for validation * Migrate\_repo init version helper * Remove TestFernetTokenProvider * Refactor TestFernetTokenProvider trust-scoped tests * Refactor TestFernetTokenProvider project-scoped tests * Refactor TestFernetTokenProvider domain-scoped tests * Refactor TestFernetTokenProvider unscoped token tests * Fixing mapping schema to allow local user * Fix keystone-manage example command path * Make modifications to domain config atomic * Add auto-increment int primary key to revoke.backends.sql * Add PKIZ coverage to trust tests * Consolidate TestTrustRedelegation and TestTrustAuth tests * Expose not clearing of user default project on project delete * Split out domain config driver and manager tests * Add notifications to user/group membership * Add ability to send notifications for actors * Updated from global requirements * Remove foreign assignments when deleting a domain * Correct create\_project driver versioning * Explicitly exclude tests from bandit scan * Move role backend tests * v2 tokens validated on the v3 API are missing timezones * Move domain config backend tests * Validate v2 fernet token returns extra attributes * Clarify virtualenv setup in developer docs * Fixes a few LDAP tests to actually run * Imported Translations from Zanata * Un-wrap function * Fix warning when running tox * Race condition in keystone domain config * Adding 'domain\_id' filter to list\_user\_projects() * Add identity endpoint creation to bootstrap * Updated from global requirements * Remove \_disable\_domain from the resource API * Remove \_disable\_project from the resource API * Remove the notification.disabled decorator * Remove unused notification decorators * Cleanup from from split of token backend tests * Split identity backend tests * Split policy backend tests * Split catalog backend tests * Split trust backend tests * Split token backend tests * Split resource backend tests * Split assignment backend tests * Updated from global requirements * Consolidate configuration default overrides * Updating sample configuration file * IPV6 test unblacklist * Fix trust chain tests 9.0.0.0b3 --------- * Minor edits to the developing doc * Add release notes for projects acting as domains * Fix keystone.common.wsgi to explicitly use bytes * fix sample config link that 404s * add hints to list\_services for templated backend * Fixes hacking for Py3 tests * Fixes to get cert tests running in Py3 * Fixes the templated backend tests for Python3 * remove pyc files before running tests * Stop using oslotest.BaseTestCase * Return 404 instead of 401 for tokens w/o roles * Remove unused domain driver method in legacy wrapper * Deprecate domain driver interface methods * Fix the migration issue for the user doesn't have a password * Add driver details in architecture doc * Shadow users - Shadow federated users * Projects acting as domains * Update developer docs for ubuntu 15.10 * Moved CORS middleware configuration into oslo-config-generator * V2 operations create default domain on demand * Make keystone tests work on leap years * Updating sample configuration file * Fix doc build warnings * Enable LDAP connection pooling by default * Delay using threading.local() to fix check job failure * Minor edits to the installation doc * Minor edits to the configuration doc * Minor community doc edits * Updated from global requirements * Followup for LDAP removal * Remove get\_session and get\_engine * No more legacy engine facade in tests * Use requst local in-process cache per request * Move admin\_token\_auth before build\_auth\_context in sample paste.ini * Update default domain's description * Reference config values at runtime * Use the new enginefacade from oslo.db * Updated from global requirements * Fix incorrect assumption when deleting assignments * Remove migration\_helpers.get\_default\_domain * db\_sync doesn't create default domain * Implied roles index with cascading delete * Fix project-related forbidden response messages * Fixes a bug when setting a user's password to null * Renamed TOTP passcode generation function * Updates TOTP release note * Simplify use of secure\_proxy\_ssl\_header * Shadow users - Separate user identities * Switch to configless bandit * Parameter to return audit ids only in revocation list * Add tests for fetching the revocation list * Updating sample configuration file * Deprecate logger.WritableLogger * Removing H405 violations from keystone * Updated from global requirements * Updated from global requirements * Updating sample configuration file * Remove useless {} from \_\_table\_args\_\_ * Time-based One-time Password * Fix inconsistencies between Oauth1DriverV8 interface and driver * Oauth1 manager sets consumer secret * Remove setting class variable * Allow user list without specifying domain * Adds user\_description\_attribute mapping support to the LDAP backend * encode user id for notifications * Add back a bandit tox job * Enable support for posixGroups in LDAP * Add is\_domain filter to v3 list\_projects * Add tests in preparation of projects acting as a domain * Avoid using \`len(x)\` to check if x is empty * Use the driver to get limits * Fallback to list\_limit from default config * Add list\_limit to the white list for configs in db * Updating sample configuration file * handle unicode names for federated users * Verify project unique constraints for projects acting as domains * wsgi: fix base\_url finding * Disable Admin tokens set to None * Modify rules for domain specific role assignments * Modify implied roles to honor domain specific roles * Modify rules in the v3 policy sample for domain specifc roles * Re-enable and undeprecate admin\_token\_auth * Don't describe trusts as an extension in configuration doc * Tidy up configuration documentation for inherited assignments * Clean up configuration documentataion on v2 user CRUD * Allow project domain\_id to be nullable at the manager level * Trivial: Cleanup unused conf variables * Updating sample configuration file * Updating sample configuration file * Fixes parameter in duplicate project name creation * Fix terms from patch 275706 * sensible default for secure\_proxy\_ssl\_header * Restricting domain\_id update * Allow project\_id in catalog substitutions * Avoid \`None\` as a redundant argument to dict.get() * Avoid "non-Pythonic" method names * Manager support for project cascade update * Updating sample configuration file * Expand implied roles in trust tokens * add a test that uses trusts and implies roles * Updating sample configuration file * Convert assignment.root\_role config option to list of strings * Avoid wrong deletion of domain assignments * Manager support for project cascade delete * AuthContextMiddleware admin token handling * Deprecate admin\_token\_auth * Adds better logging to the domain config finder * Extracts logic for finding domain configs * Fix nits from domain specific roles CRUD support * Change get\_project permission * Updated from global requirements * Enables token\_data\_helper tests for Python3 * Stop using nose as a Python3 test runner * Fix release note of removal of v2.0 trusts support * Remove PostParams middleware * Updated from global requirements * Moves policy setup into a fixture * Make pep8 \*the\* linting interface * Added tokenless auth headers to CORS middleware * Add backend support for deleting a projects list * Make fernet work with oauth1 authentication * Consolidate the fernet provider validate\_v2\_token() * Remove support for trusts in v2.0 * Add CRUD support for domain specific roles * Added CORS support to Keystone * Deprecate Saml2 auth plugin * Uses open context manager for templated catalogs * Disable the ipv6 tests in py34 * Missing 'region' in service and 'name' in endpoint for EndpointFilterCatalog * Small typos on the ldap.url config option help * Replace exit() with sys.exit() * include sample config file in docs * Fixes a language issue in a release note * Imported Translations from Zanata * Updated from global requirements * Support multiple URLs for LDAP server * Set deprecated\_reason on deprecated config options * Move user and admin crud to core * squash migrations - kilo * Adds validation negative unit tests * Use oslo.log specified method to set log levels * Add RENO update for simple\_cert\_extension deprecation * Opt-out certain Keystone Notifications * Update the home page * Release notes for implied roles * deprecate pki\_setup from keystone-manage * test\_credential.py work with python34 * Consolidate \`test\_contrib\_ec2.py\` into \`test\_credential.py\` * Reinitialize the policy engine where it is needed * Provide an error message if downgrading schema * Updated from global requirements * Consolidate the fernet provider issue\_v2\_token() * Consolidate the fernet provider validate\_v3\_token() * Add tests for role management with v3policy file * Fix some word spellings * Make WebSSO trusted\_dashboard hostname case-insensitive * Deprecate simple\_cert extension * Do not assign admin to service users * Add in TRACE logging for the manager * Add schema for OAuth1 consumer API * Correct docstrings * Remove un-used test code * Raise more precise exception on keyword mapping errors * Allow '\_' character in mapping\_id value * Implied Roles API * Revert "Unit test for checking cross-version migrations compatibility" * replace tenant with project in cli.py * Fix schema validation to use JSONSchema for empty entity * Replace tenant for project in resource files * Reuse project scoped token check for trusts * Add checks for project scoped data creep to tests * Add checks for domain scoped data creep * Use the oslo.utils.reflection to extract the class name * Test hyphens instead of underscores in request attributes * Simplify admin\_required policy * Add caching to role assignments * Enable bandit tests * Update bandit.yaml * Enhance manager list\_role\_assignments to support group listing * remove KVS backend for keystone.contrib.revoke * Fix trust redelegation and associated test * use self.skipTest instead of self.skip * Removed deprecated revoke KVS backend * Revert "skip test\_get\_token\_id\_error\_handling to get gate passing" * Updated from global requirements * Updated from global requirements * skip test\_get\_token\_id\_error\_handling to get gate passing * Ensure pycadf initiator IDs are UUID * Check for circular references when expanding implied roles * Improves domain name case sensitivity tests * Fixes style issues in a v2 controller tests * Prevents creating is\_domain=True projects in v2 * Refactors validation tests to better see the cases * Remove keystone/common/cache/\_memcache\_pool.py * Update mod\_wsgi + cache config docs * Address comments from Implied Role manager patch * Fix nits in include names patch * Unit test for checking cross-version migrations compatibility * Online schema migration documentation * Updated from global requirements * Remove additional references to ldap role attribs * Remove duplicate LDAP test class * Remove more ldap project references 9.0.0.0b2 --------- * Add testcases to check cache invalidation * Fix typo abstact in comments * deprecate write support for identity LDAP * Deprecate \`hash\_algorithm\` config option * Mark memcache and memcache\_pool token deprecated * List assignments with names * Remove LDAP Role Backend * Remove LDAP Resource and LDAP Assignment backends * Removes KVS catalog backend * Fix docstring * Strengthen Mapping Validation in Federation Mappings * Add checks for token data creep using jsonschema * Deprecating API v2.0 * Implied roles driver and manager * Add support for strict url safe option on new projects and domains * Remove bandit tox environment * Add linters environment, keep pep8 as alias * Make sure the assignment creation use the right arguments * Fix indentation for oauth context * Imported Translations from Zanata * document the bootstrapping process * Add release note for revert of c4723550aa95be403ff591dd132c9024549eff10 * Updated from global requirements * Enable \`id\`, \`enabled\` attributes filtering for list IdP API * Improve Conflict error message in IdP creation * Fedora link is too old and so updated with newer version * Support the reading of default values of domain configuration options * Correct docstrings for federation driver interface * Update v3policysample tests to use admin\_project not special domain\_id * Enable limiting in ldap for groups * Enable limiting in ldap for users * Doc FIX * Store config in drivers and use it to get list\_limit * Add asserts for service providers * Fix incorrect signature in federation legacy V8 wrapper * Tidy up release notes for V9 drivers * Adds an explicit utils import in test\_v3\_protection.py * Refactor test auth\_plugin config into fixture * Create V9 version of resource driver interface * Updated from global requirements * Separate trust crud tests from trust auth tests * Delete checks for default domain delete * correct help text for bootstrap command * Replace unicode with six.text\_type * Escape DN in enabled query * Test enabled emulation with special user\_tree\_dn * SQL migrations for implied roles * Revert "Validate domain ownership for v2 tokens" * Use assertIn to check if collection contains value * Updated from global requirements * Perform middleware tests with webtest * De-duplicate fernet payload tests * Reference driver methods through the Manager * Fix users in group and groups for user exact filters * Expose defect in users\_in\_group, groups\_for\_user exact filters * Replace deprecated library function os.popen() with subprocess * OAuth1 driver doesnt inherit its interface * Update man pages with Mitaka version and dates * Fixes hacking logger test cases to use same base * Adds a hacking check looking for Logger.warn usage * Change LOG.warn to LOG.warning * Remove redundant check after enforcing schema validation * Updating sample configuration file * Create V9 version of federation driver interface * Do not use \_\_builtin\_\_ in python3 * Define paste entrypoints * Add schema for federation protocol * Expose method list inconsistency in federation api * remove irrelevant parenthesis * Add return value * Test: make enforce\_type=True in CONF.set\_override * Updated from global requirements * Add schema for identity provider * Updating sample configuration file * Use six.moves.reload\_module instead of builtin reload * Fix the incompatible issue in response header * Wrong usage of "an" * Correct fernet provider reference * Correct DN/encoding in test * Support url safe restriction on new projects and domains * Correct the class name of the V9 LDAP role driver * Wrong usage of "a/an" * Trival: Remove unused logging import * Updating sample configuration file * Fix pep8 job * Fix some inconsistency in docstrings * Fix 500 error when no fernet token is passed * Cleanup tox.ini py34 test list * Fixes kvs cache key mangling issue for Py3 * Some small improvements on fernet uuid handling * Updated from global requirements * Updating sample configuration file * Fix key\_repository\_signature method for python3 * Add audit IDs to revocation events * Enable os\_inherit of Keystone v3 API * Use pip (and DevStack) instead of setuptools in docs * Correct developer documentation on venv creation * Updating sample configuration file * Updated from global requirements * Validate domain for DB-based domain config. CRUD * fix up release notes, file deprecations under right title * Updated Cloudsample * Update \`developing.rst\` to remove extensions stuff * Verify that user is trustee only on issuing token * Adds a base class for functional tests * Make \`bootstrap\` idempotent * Add \`keystone-manage bootstrap\` command * Changed the key repo validation to allow read only * Deprecated tox -downloadcache option removed * Fix defect in list\_user\_ids that only lists direct user assignments * Show defect in list\_user\_ids that only lists direct user assignments * Add API route for list role assignments for tree * Use list\_role\_assignments to get projects/domains for user * Add \`type' filter for list\_credentials\_for\_user * Clean up new\_credential\_ref usage and surrounding code * Create neutron service in sample\_data.sh * Updating sample configuration file * Updated from global requirements * Limiting for fake LDAP * Make @truncated common for all backends * Fix exposition of bug about limiting with ldap * Use assertDictEqual instead of assertEqualPolicies * refactor: Remove unused test method * Remove unfixable FIXME * Use new\_policy\_ref consistently * fix reuse of variables * Remove comments on enforcing endpoints for trust * refactor: move the common code to manager layer * Create V9 Role Driver * Create new version of assignment driver interface * Remove keystoneclient tests * Verify that attribute \`enabled\` equals True * Remove invalid comment about LDAP domain support * Pass dict into update() rather than \*\*kwargs * Refactor test use of new\_\*\_ref * Cleans up code for \`is\_admin\` in tokens * Deprecate ldap Role * Update extensions links * Improve comments in test\_catalog * Fix for GET project by project admin * Fix multiline strings with missing spaces * Updating sample configuration file * Remove invalid TODO in extensions * Updated from global requirements * Refactor: Remove use of self where not needed * Refactor: Move uncommon entities from setUp * Split resource tests from assignment tests * Remove invalid TODO related to bug 1265071 * Fix test\_crud\_user\_project\_role\_grants * Deprecate the pki and pkiz token providers * Remove invalid FIXME note * Refactor: Use Federation constants where possible * Remove exposure of routers at package level * Update API version info for Liberty * remove version from setup.cfg * Ensure endpoints returned is filtered correctly * Put py34 first in the env order of tox 9.0.0.0b1 --------- * Add release notes for mitaka-1 * set \`is\_admin\` on tokens for admin project * Use unit.new\_project\_ref consistently * Reference environment close to use * refactor: move variable to where it's needed * Needn't care about the sequence for cache validation * Updated from global requirements * Fix a typo in notifications function doc * Remove RequestBodySizeLimiter from middleware * Optimize "open" method with context manager * eventlet: handle system that misses TCP\_KEEPIDLE * force releasenotes warnings to be treated as errors * Cleanup region refs * Remove \`extras\` from token data * Use subprocess.check\_output instead of Popen * Remove deprecated notification event\_type * Remove check\_role\_for\_trust * Correct RoleNotFound usage * Remove example extension * Updating sample configuration file * Correct docstring warnings * Using the right format to render the docstring correctly * Add release notes for mitaka thus far * Accepts Group IDs from the IdP without domain * Cleanup use of service refs * Update docs for legacy keystone extensions * Correct SecurityError with unicode args * Updated from global requirements * Use idp\_id and protocol\_id in jsonhome * Use standard credential\_id parameter in jsonhome * Remove core module from the legacy endpoint\_filter extension * Minor cleanups for usage of group refs * Reject user creation using admin token without domain * Add Trusts unique constraint to remove duplicates * deprecate \`enabled\` option for endpoint-policy extension * remove useless config option in endpoint filter * Use [] where a field is required * Manager support for projects acting as domains * Config option for insecure responses * Add missing colon separators to inline comments * Simplify LimitTests * Rationalize list role assignment routing * Enable listing of role assignments in a project hierarchy * Capital letters * remove use of magic numbers in sql migrate extension tests * Use new\_trust\_ref consistently * Updating sample configuration file * Move endpoint\_filter migrations into keystone core * Move endpoint filter into keystone core * Move revoke sql migrations to common * Move revoke extension into core * Move oauth1 sql migrations to common * Move oauth1 extension into core * Move federation sql migrations to common * Move federation extension into keystone core * Fix string conversion in s3 handler for python 2 * Fix inaccurate debug mode response * Use unit.new\_user\_ref consistently * Imported Translations from Zanata * Updated from global requirements * Add testcases to check cache invalidation in endpoint filter extension * Fix the wrong method name * Updating sample configuration file * change some punctuation marks * Updated from global requirements * Remove hardcoded LDAP group schema from emulated enabled mix-in * Exclude old Shibboleth options from docs * Updated from global requirements * Use new\_domain\_ref instead of manually created ref * Use new\_region\_ref instead of manually created dict * Document release notes process * Use new\_service\_ref instead of manually created dict * Use unit.new\_group\_ref consistently * Use unit.new\_role\_ref consistently * Use unit.new\_domain\_ref consistently * Use unit.new\_region\_ref() consistently * Use unit.new\_service\_ref() consistently * Move AuthContext middleware into its own file * Use unit.new\_endpoint\_ref consistently * Use list\_role\_assignments to get assignments by role\_id * Pass kwargs when using revoke\_api.list\_events() * Add reno for release notes management * Make K2K Mapping Attribute Examples more visible * Add S3 signature v4 checking * Fix some nits inside validation/config.py * Add Mapping Combinations for Keystone to Keystone Federation * Remove manager-driver assignment metadata construct * Correct description in Keystone key\_terms * Imported Translations from Zanata * Handle fernet payload timestamp differences * Fix fernet padding for python 3 * More useful message when using direct driver import * Get user role without project id is not implemented * Update sample catalog templates * update mailmap with gyee's new email * Revert "Added CORS support to Keystone" * Updated from global requirements * test\_backend\_sql work with python34 * Use assertTrue/False instead of assertEqual(T/F) * Fix the issues found with local conf * Add test for security error with no message * Add exception unit tests with different message types * Cleanup message handling in test\_exception * Normalize fernet payload disassembly * Common arguments for fernet payloads assembly * Capitalize a Few Words * I18n safe exceptions * Keystone Spelling Errors in docstrings and comments * [rally] remove deprecated arg * Move endpoint\_policy migrations into keystone core * Promote an arbitrary string to be a docstring * Fix D204: blank line required after class docstring (PEP257) * Fix D202: No blank lines after function docstring (PEP257) * Update Configuring Keystone doc for consistency * Comment spelling error in assignment.core file * Fix exceptions to use correct titles * Fix UnexpectedError exceptions to use debug\_message\_format * Fix punctuation in doc strings * Fix docstring * Updating sample configuration file * Explain default domain in docs for other services * Correct bashate issues in gen\_pki.sh * Fix incorrect federated mapping example * change stackforge url to openstack url * Updated from global requirements * Adds already passing tests to py34 run * Wrong usage of "an" * Allow the PBR\_VERSION env to pass through tox * Fix D200: 1 line docstrings should fit with quotes (PEP257) * Fix D210: No whitespaces allowed surrounding docstring text (PEP257) * Fix D300: Use """triple double quotes""" (PEP257) * Fix D402: First line should not be the function's "signature" (PEP257) * Fix D208: Docstring over indented. (PEP257) * Add docstring validation * Add caching to get\_catalog * Fix fernet key writing for python 3 * Update test modules passing on py34 * Updated from global requirements * Forbid non-stripped endpoint urls * fix deprecation warnings in cache backends * Create tests for set\_default\_is\_domain in LDAP * Enable try\_except\_pass Bandit test * Enable subprocess\_without\_shell\_equals\_true Bandit test * Correct typo in copyright * Updated from global requirements * switch to oslo.cache * Updating sample configuration file * Updated from global requirements * keystone-paste.ini docs for deployers are out of date * Correct the filename * More info in RequestContext * Fix some nits in \`configure\_federation.rst\` * add placeholder migrations for liberty * Remove bas64utils and tests * Create a version package * Remove oslo.policy implementation tests from keystone * Refactor: Don't hard code 409 Conflict error codes * Fix use of TokenNotFound * Refactor: change 403 status codes in test names * Refactor: change 410 status codes in test names * Refactor: change 400 status codes in test names * Refactor: change 404 status codes in test names * Updated from global requirements * Imported Translations from Zanata * add initiator to v2 calls for additional auditing * Fixed missed translatable string inside exception * Handle 16-char non-uuid user IDs in payload * Additional documentation for services * Rename fernet methods to match expiration timestamp * Updated from global requirements * Enable password\_config\_option\_not\_marked\_secret Bandit test * Enable hardcoded\_bind\_all\_interfaces Bandit test * Documentation for other services * Reclassify get\_project\_by\_name() controller method * Trivial fix of some typos found * Filters is\_domain=True in v2 get\_project\_by\_name * Add test case passing is\_domain flag as False 8.0.0 ----- * Ensure token validation works irrespective of padding * Ensure token validation works irrespective of padding * Imported Translations from Zanata * Rename RestfulTestCase.v3\_authenticate\_token() to v3\_create\_token() * Improving domain\_id update tests * Show v3 endpoints in v2 endpoint list * Expose 1501698 bug * Replace sqlalchemy-migrate occurences from code.google to github * Fix unreachable code in test\_v3 module * Imported Translations from Zanata * Use deepcopy of mapping fixtures in tests * Show v3 endpoints in v2 endpoint list * Enable Bandit 0.13.2 tests * Update bandit blacklist\_imports config * Cleanup \_build\_federated\_info * Add LimitRequestBody to sample httpd config * Make \_\_all\_\_ immutable * Skip rows with empty remote\_ids * Includes server\_default option in is\_domain column * Remove unused get\_user\_projects() * Deprecate httpd/keystone.py * Skip rows with empty remote\_ids * Fix order of arguments in assertDictEqual * Cleanup fernet validate\_v3\_token * Update bandit blacklist\_calls config * Add unit test for creating RequestContext * Add user\_domain\_id, project\_domain\_id to auth context * Add user domain info to federated fernet tokens * Unit tests for fernet validate\_v3\_token * Fix order of arguments in assertEqual * Updating sample configuration file * Cleanup of Translations * Imported Translations from Zanata * Uses constants for 5XX http status codes in tests * Fixes v3\_authenticate\_token calls - no default * Fixes the way v3\_admin is called to match its def * Declares expected\_status in method signatures * Refactor: Don't hard code the error code * Correct docstrings * Correct comment to not be driver-specific * Move development environment setup instructions to standard location * Fix typo in config help * Use the correct import for range * Adds interface tests for timeutils * Add unit tests for token\_to\_auth\_context * Updating sample configuration file 8.0.0.0rc1 ---------- * Open Mitaka development * Bring bandit config up-to-date * Update the examples used for the trusted\_dashboard option * Log message when debug is enabled * Clean up bandit profiles * federation.idp use correct subprocess * Change ignore-errors to ignore\_errors * Imported Translations from Zanata * Remove unused code in domain config checking * Relax newly imposed sql driver restriction for domain config * Add documentation for configuring IdP WebSSO * Updated from global requirements * check if tokenless auth is configured before validating * Fix the referred [app:app\_v3] into [pipeline:api\_v3] * Updated from global requirements * Issue deprecation warning if domain\_id not specified in create call * functional tests for keystone on subpaths * Removed the extra http:// from JSON schema link * Document httpd for accept on /identity, /identity\_admin * Updated from global requirements * Update federation router with missing call * Reject rule if assertion type unset * Update man pages with liberty version and dates * Refactor: Don't hard code the error code * Move TestClient to test\_versions * Use oslo.log fixture * Update apache-httpd.rst * Updated from global requirements * Remove padding from Fernet tokens * Imported Translations from Transifex * Updated from global requirements * Fixed typos in 'developing\_drivers' doc * Stop using deprecated keystoneclient function * Change tests to use common name for keystone.tests.unit * Removes py3 test import hacks * Updating sample configuration file * Fixes confusing deprecation message 8.0.0.0b3 --------- * Add methods for checking scoped tokens * Build oslo.context RequestContext * Correct docstring for common.authorization * Deprecate LDAP Resource Backend * Added CORS support to Keystone * List credentials by type * Fixes a typo in a comment * Tokenless authz with X.509 SSL client certificate * Support project hierarchies in data driver tests * Stable Keystone Driver Interfaces * Initial support for versioned driver classes * Add federated auth for idp specific websso * Adds caching to paste deploy's egg lookup * Fix grammar in doc string * Test list\_role\_assignment in standard inheritance tests * Broaden domain-group testing of list\_role\_assignments * Add support for group membership to data driven assignment tests * Add support for effective & inherited mode in data driven tests * Add support for data-driven backend assignment testing * Updated from global requirements * Change JSON Home for OS-FEDERATION to use /auth/projects|domains * Unit tests for is\_domain field in project's table * Group tox optional dependencies * Provide new\_xyz\_ref functions in tests.core * Refactor mapping rule engine tests to not create servers * Updating sample configuration file * Correct docstrings in resource/core.py * Validate Mapped User object * Set max on max\_password\_length to passlib max * Simplify federated\_domain\_name processing * Get method's class name in a python3-compatible way * Stop reading local config for domain-specific SQL config driver * Enforce .config\_overrides is called exactly once * Use /auth/projects in tests * Remove keystone/openstack/\* from coveragerc * Rationalize unfiltered list role assignment test * Change mongodb extras to lowercase * Refactor: Provider.\_rebuild\_federated\_info() * Refactor: rename Fernet's unscoped federated payload * Fernet payloads for federated scoped tokens * No More .reload\_backends() or .reload\_backend() * Ensure ephemeral user's user\_id is url-safe * Use min and max on IntOpt option types * Adds a notification testcase for unbound methods * Do not revoke all of a user's tokens when a role assignment is deleted * Handle tokens created and quickly revoked with insufficient timestamp precision * Show that unscoped tokens are revoked when deleting role assignments * Prevent exception due to missing id of LDAP entity * Expose exception due to missing id of LDAP entity * Add testcase to test invalid region id in request * Add region\_id filter for List Endpoints API * Remove references to keystone.openstack.common * Remove all traces of oslo incubator * Updating sample configuration file * Test v2 tokens being deleted by v3 * Use entrypoints for paste middleware and apps * update links in http-api to point to specs repo * Add necessary executable permission * Refactor: use fixtures.TempDir more * Add is\_domain field in Project Table * Prevent exception for invalidly encoded parameters * Extras for bandit * Use extras for memcache and MongoDB packages * Use wsgi\_scripts to create admin and public httpd files * Update Httpd configuration docs for sites-available/enabled * Remove unnecessary check * Update 'doc/source/setup.rst' * Remove unnecessary load\_backends from TestKeystoneTokenModel * Updated from global requirements * Imported Translations from Transifex * Updated from global requirements * Show helpful message when request body is not provided * Fix logging in federation/idp.py * Enhance tests for saml2 signing exception logging * Remove deprecated methods from assignment.Manager * Stop using deprecated assignment manager methods * EndpointFilter driver doesnt inherit its interface * Hardens the validated decorator's implementation * Updating sample configuration file * Simplify rule in sample v3 policy file * Improve a few random docstrings * Maintain datatypes when loading configs from DB * Remove "tenants" from user\_attribute\_ignore default * Use oslo\_config PortOpt support * Updated from global requirements * Updated from global requirements * Fix the misspelling * When validating a V3 token as V2, use the v3\_to\_v2 conversion * Do not require the token\_id for converting v3 to v2 tokens * Maintain the expiry of v2 fernet tokens * Fix typo in doc-string * Validate domain ownership for v2 tokens * Fix docstring in mapped plugin * Updated from global requirements * Minor grammar fixes to connection pooling section * Creates a fixture representing as LDAP database * Sample config help for supplied drivers * Improve List Role Assignments Filters Performance * Update docs for stevedore drivers * Fixes an incorrect docstring in notifications * Stop calling deprecated assignment manager methods * Updated from global requirements * Updating sample configuration file * Adds backend check to setup of LDAP tests * Improve a few random docstrings (H405) * Remove excessive transformation to list * Stop calling deprecated assignment manager methods * Remove reference of old endpoint\_policy in paste file * Fernet 'expires' value loses 'ms' after validation * Correct enabled emulation query to request no attributes * NotificationsTestCase running in isolation * Adds/updates notifications test cases * Fix duplicate-key pylint issue * Fix explicit line joining with backslash * Fixes an issue with data ordering in the tests * Imported Translations from Transifex * Allow Domain Admin to get domain details * Assignment driver cleaning * Cleanup tearDown in unit tests * Fix unbound error in federation \_sign\_assertion * Fix typos of RoleAssignmentV3.\_format\_entity doc * Updating sample configuration file * Updated from global requirements * Remove unnecessary check from notifications.py * Remove oslo import hacking check * Use dict.items() rather than six.iteritems() * Cleanup use of iteritems * Imported Translations from Transifex * Missing ADMIN\_USER in sample\_data.sh * Update exported variables for openstack client * Use extras for ldap dependencies * Add better user feedback when bind is not implemented * Test to ensure fernet key rotation results in new key sets * Better error message when unable to map user * Refactor \_populate\_roles\_for\_groups() * Add groups in scoped federated tokens * Adds missing list\_endpoints tests * Reject create endpoint with invalid urls * Explain the "or None" on eventlet's client\_socket\_timeout * Reduce number of Fernet log messages * Fix test\_admin to expect admin endpoint * Fixes a docstring to reflect actual return values * Give some message when an invalid token is in use 8.0.0.0b2 --------- * Updated from global requirements * Ensure database options registered for tests * Document sample config updated automatically * Test function call result, not function object * Test admin app in test\_admin\_version\_v3 * Updating sample configuration file * Handle non-numeric files in key\_repository * Fix remaining mention of KLWT * Updated from global requirements * Replace 401 to 404 when token is invalid * Assign different values to public and admin ports * Fix four typos and Add one space on keystone document * Reuse token\_ref fetched in AuthContextMiddleware * Refactor: clean up TokenAPITests * pemutils isn't used anymore * Imported Translations from Transifex * Fix test\_exception.py for py34 * Fix s3.core for py34 * Updating sample configuration file * Fix test\_utils for py34 * test\_base64utils works with py34 * Minor fix in the \`configuration.rst\` * Correct spacing in \`\`mapping\_combinations.rst\`\` * add federation docs for mod\_auth\_mellon * Avoid the hard coding of admin token * Adding Documentation for Mapping Combinations * Clean up docs before creating new ones * Document policy target for operation * Fix docs in federation.routers * Fix docstrings in contrib * Additional Fernet test coverage * Refactor websso \`\`origin\`\` validation * Docs link to ACTIONS * Clean up code to use .items() * Document default value for tree\_dn options * Remove unnecessary ldap imports * Move backends.py to keystone.server * move clean.py into keystone/common * Updated from global requirements * Remove unnecessary executable permission * Move cli.py into keystone.cmd * Do not remove expired revocation events on "get" * Clean up notifications type checking * Federation API provides method to evaluate rules * Move constants out of federation.core * Implement backend filtering on membership queries * Moves keystone.hacking into keystone.tests * Add missing "raise" when throwing exception * Log xmlsec1 output if it fails * Fix test method examining scoped federation tokens * Spelling correction * Fixes grammar in setup.rst in doc source * Updated from global requirements * Deprecate LDAP assignment driver options * Register fatal\_deprecations before use * Use oslo.utils instead of home brewed tempfile * Updating sample configuration file * Add testcases for list\_role\_assignments of v3 domains * Centralizing build\_role\_assignment\_\* functions * Replace reference of ksc with osc * Updated from global requirements * Changing exception type to ValidationError instead of Forbidden * Standardize documentation at Service Managers * Fixes grammar in the httpd README * Fix the incorrect format for docstring * Imported Translations from Transifex * Fixes docstring to make it more precise * Removed optional dependency support * Decouple notifications from DI * Adds proper isolation to templated catalog tests * Fix log message in one of the v3 create call methods * Catch exception.Unauthorized when checking for admin * Remove convert\_to\_sqlite.sh * Fix for LDAP filter on group search by name * Remove fileutils from oslo-incubator * Remove comment for doc building bug 1260495 * Fix code-block in federation documentation * Modified command used to run keystone-all * Delete extra parentheses in assertEqual message * Fix the invalid testcase * Updating sample configuration file * Add unit test for fernet provider * Update federation docstring * Do not specify 'objectClass' twice in LDAP filter string * Fix tox -e py34 * Change mapping model so rules is dict * Add test case for deleting endpoint with space in url * Update requirements by hand * Consolidate the fernet provider issue\_v3\_token() * Group role revocation invalidates all user tokens * OS-FEDERATION no longer extension in docs * Switch from deprecated oslo\_utils.timeutils.strtime * Remove unused setUp for RevokeTests * Update MANIFEST.in * Update sample config file * Disable migration sanity check * Updated from global requirements * Use oslo.service ServiceBase when loading from eventlet * Document use of wip up to developer * Simplify fernet rotation code * Tests for correct key removed * Relax the formats of accepted mapping rules for keystone-manage * Python 3: Use range instead of xrange for py3 compatibility 8.0.0.0b1 --------- * Document entrypoint namespaces * Short names for auth plugins * Update sample configuration file * Switch to oslo.service * Update sample configuration file * Remove redundant config * Don't try to drop FK constraints for sqlite * Remove unused requirements * Add missing keystone-manage commands to doc * Mask passwords in debug log on user password operations * Add test showing password logged * Adds some debugging statements * Imported Translations from Transifex * Use stevedore for auth drivers * Refactor extract function load\_auth\_method * Add unit test to exercise key rotation * Fix Fernet key rotation * Update version for Liberty 8.0.0a0 ------- * Refactor: move PKI-specific tests into the appropriate class * Needn't load fernet keys twice * Pass environment variables of proxy to tox * Fix tests failing on slower system * Mapping Engine CLI * Imported Translations from Transifex * Fix spelling in configuration comment * Switch keystone over to oslo\_log versionutils * Updated from global requirements * Use lower default value for sha512\_crypt rounds * Updated from global requirements * Add more Rally scenarios * Remove unnecessary dependencies from KerberosDomain * Remove deprecated external authentication plugins * Remove unnecessary code for default suffix * Remove custom assertions for python2.6 * Avoid using the interactive interpreter for a one-liner * Add validity check of 'expires\_at' in trust creation * Revocation engine refactoring * Updated from global requirements * Rename directory with rally jobs files * Fix req.environ[SCRIPT\_NAME] value * Don't query db if criteria longer than col length * Updated from global requirements * Run WSGI with group=keystone * Consolidate test-requirements files * Switch from deprecated isotime * Fix the wrong order of parameters when using assertEqual * Add testcases to test DefaultDomain * Remove the deprecated ec2 token middleware * Replace blacklist\_functions with blacklist\_calls * updates sample\_data script to use the new openstack commands * Log info for Fernet tokens over 255 chars * Update functional tox env requirements * Update sample config file * Correct oauth1 driver help text * Rename driver to backend and fix the inaccurate docstring * Add "enabled" to create service provider example * Update testing keystone2keystone doc * Removes unused database setup code * Refactor: use \_\_getitem\_\_ when the key will exists * Refactor: create the lookup object once * Order routes so most frequent requests are first * \`api\_curl\_examples.rst\` is out of date * Don't assume project IDs are UUID format * Don't assume group IDs are UUID format * Don't fail on converting user ids to bytes * Move endpoint policy into keystone core * Update sample config file * Tests don't override default auth methods/plugins * Tests consistently use auth\_plugin\_config\_override * Test use config\_overrides for configs * Correct tests setting auth methods to a non-list * Make sure LDAP filter is constructed correctly * basestring no longer exists in Python3 * Add mocking for memcache for Python3 tests * Fix xmldsig import * Refactor deprecations tests * Switch from MySQL-python to PyMySQL * Improve websso documentation * Remove the deprecated compute\_port option * Workflow documentation is now in infra-manual * Remove XML middleware stub * Rename sample\_config to genconfig * Imported Translations from Transifex * Replace ci.o.o links with docs.o.o/infra * Sync oslo-incubator cc19617 * Use single connection in get\_all function * Removes temporary fix for doc generation * Improve error message when tenant ID does not exist * Updated from global requirements * Add missing part for \`token\` object * Remove identity\_api from AuthInfo dependencies * Move bandit requirement to test-requirements-bandit.txt * Adds inherited column to RoleAssignment PK * Update dev setup requirements for Python 3.4 * Update sample config file * Remove support for loading auth plugin by class * Use [] where a value is required * De-duplicate auth methods * Remove unnecessary oauth\_api check * Use short names for drivers * Fixes deprecations test for Python3 * Add mocking for ldappool for Python3 tests * Fixes a whitespace issue * Handles modules that moved in Python3 * Handles Python3 builtin changes * Fixes use of dict methods for Python3 * Updated from global requirements * Replace github reference by git.openstack.org and change a doc link * Refactor \_create\_attribute\_statement IdP method * Revert "Loosen validation on matching trusted dashboard" * Updated from global requirements * Use correct LOG translation indicator for errors * Add openstack\_user\_domain to assertion * Pass-in domain when testing saml signing * Fixes test nits from a previous review * Implement validation on the Identity V3 API * Fix tiny typo in comment message * Updates the \*py3 requirements files * Fixes mocking of oslo messaging for Python3 * pycadf now supports Python3 * eventlet now supports Python3 * Updated from global requirements * Add openstack\_project\_domain to assertion * Use stevedore for backend drivers * Prohibit invalid ids in subtree and parents list * Update sample config * Fix sample policy to allow user to check own token * Replaced filter with a list comprehension * Ignore multiple imports per line for six.moves * Fixes order of imports for pep8 * pep8 whitespace changes * Remove randomness from test\_client\_socket\_timeout * Allow wsgiref to reconstruct URIs per the WSGI spec * Fix the misuse of \`versionutils.deprecated\` * Updated from global requirements * Update openid connect docs to include other distros 2015.1.0 -------- * Updated from global requirements * Remove pysqlite test-requirement dependency * Fixes tests to use the config fixture * Isolate injection tests * Sync oslo-incubator Ie51669bd278288b768311ddf56ad31a2f28cc7ab * Sync oslo-incubator Ie51669bd278288b768311ddf56ad31a2f28cc7ab * Fixes cyclic ref detection in project subtree * Updated from global requirements * Updated from global requirements * Release Import of Translations from Transifex * Make memcache client reusable across threads * Imported Translations from Transifex * Remove project association before removing endpoint group * Loosen validation on matching trusted dashboard * adds a tox target for functional tests * Adds an initial functional test * Fix the incorrect comment * Set default branch to stable/kilo * Remove assigned protocol before removing IdP * Expose domain\_name in the context for policy.json * Update developer doc to reference Ubuntu 14 * Make memcache client reusable across threads * Update Get API version Curl example * Remove unused policy rule for get\_trust * backend\_argument should be marked secret * Update man pages for the Kilo release * make sure we properly initialize the backends before using the drivers * WebSSO should use remote\_id\_attribute by protocol * Work with pymongo 3.0 * Fix incorrect setting in WebSSO documentation * Stops injecting revoke\_api into TestCase * Checking if Trust exists should be DRY * Use correct LOG translation indicator for warnings * backend\_argument should be marked secret * Fix signed\_saml2\_assertion.xml tests fixture * Don't provide backends from \_\_all\_\_ in persistence * Add domain\_id checking in create\_project * Update keystone.sample.conf * Use choices in config.py * make sure we properly initialize the backends before using the drivers * WebSSO should use remote\_id\_attribute by protocol * Refactor common function for loading drivers * Tests don't override default config with default * Refactor MemcachedBackend to not be a Manager * Update openstack-common reference in openstack/common/README * Exposes bug on role assignments creation * Removes discover from test-reqs * Work with pymongo 3.0 2015.1.0rc1 ----------- * Update man pages for the Kilo release * Add placeholders for reserved migrations * Redundant events on group grant revocation * Open Liberty development * Improved policy setting in the 'v3 filter' tests * Handle NULL value for service.extra in migration 066 * Skip SSL tests because some platforms do not enable SSLv3 * Fix the typo in \`token/providers/fernet/core.py\` * Fix index name the assignment.actor\_id table * Add index to the revocation\_event.revoked\_at * Document websso setup * Allow identity provider to be created with remote\_ids set to None * Update testing docs * Import fernet providers only if used in keystone-manage * Imported Translations from Transifex * Fix multiple SQL backend usage validation error * Expose multiple SQL backend usage validation error * Fix for notifications for v2 role grant/delete * Update sample config file * Fix errors in ec2 signature logic checking * Don't add unformatted project-specific endpoints to catalog * Reload drivers when their domain config is updated * Correcting the name of directory holding dev docs * Fixes bug in Federation list projects endpoint * Exposes bug in Federation list projects endpoint * Updated from global requirements * Refactor assignment driver internal clean-up method names * Remove unnecessary .driver. references in assignment manager * Rename notification for create/delete grants * Drop sql.transaction() usage in migration * Update configuration documentation for domain config * Fix for migration 062 on MySQL * Bump advertised API version to 3.4 * Extract response headers to private method * Deprecate eventlet config options * Imported Translations from Transifex * remove useless nocatalog tests of endpoint\_filter * Add API to create ecp wrapped saml assertion * Add relay\_state\_prefix to Service Provider * Change the way values are migrated for 007\_add\_remote\_id\_table * Add routing for list\_endpoint\_groups\_for\_project * Use ORM in upgrade test instead of manual query construction * Remove empty request bodies * Remove unnecessary import that was not checked * IdP ID registration and validation * Imported Translations from Transifex * add test of /v3/auth/catalog for endpoint\_filter * Entrypoints for commands * More content in the guide for core components' migration * Make trust manager raise formatted message exception * Revert "Document mapping of policy action to operation" * Remove SQL Downgrades * Add caching to getting of the fully substituted domain config * Refactor \_create\_projects\_hierarchy in tests * Fixes bug when getting hierarchy on Project API * Exposes bug when getting hierarchy on Project API * Move common checks into base testcase * Tests use common base class * use tokens returned by delete\_tokens to invalidate cache * Loosen the validation schema used for trustee/trustor ids * region.description is optional and can be null * Update access control configuration in httpd config * Document mapping of policy action to operation * Update install.rst for Fedora * Update sample config file * Remove parent\_id in v2 tenant response * Tox env for Bandit * Refactor: extract and rename unique\_id method * create \_member\_ role as specified in CONF * Fix sample policy to allow user to revoke own token * Add unit tests for sample policy token operations * Mark some strings for translation * Add fernet to test\_supported\_token\_providers * Fix up token provider help text * Tests use Database fixture * Remove parent\_id in v2 token response * Update ServiceProviderModel attributes * Add docstrings to keystone.notifications functions * Remove unused metadata parameter from get\_catalog methods * Imported Translations from Transifex * Cleanup use of .driver * Specify time units for default\_lock\_timeout * Remove stevedore from test-requirements * Lookup identity provider by remote\_id for websso * Deal with PEP-0476 certificate chaining checking * Distinguish between unset and empty black and white lists * Remove unused domain config method paramters * Correct path in request logging * Correct request logging query parameters separator * Fix setting default log levels * On creation default service name to empty string * Needn't workaround when invoking \`app.request()\` 2015.1.0b3 ---------- * Imported Translations from Transifex * Support upload domain config files to database * Update sample httpd config file * Update Apache httpd config docs for token persistence * Cleanup Fernet testcases and add comments * Add inline comment and docstrings fixes for Fernet * Fix nullable constraints in service provider table * Move backend LDAP role testing to the new backend testing module * URL quote Fernet tokens * Use existing token test for Fernet tokens * Implement Fernet tokens for v2.0 tokens * Refactor code supporting status in JSON Home * remove expected backtrace from logs * Log when no external auth plugin registered * Adds test for federation mapping list order issues * Updated from global requirements * Enable sensitive substitutions into whitelisted domain configs * Imported Translations from Transifex * Create a fixture for key repository * Ignore unknown groups in lists for Federation * Remove RestfulTestCase.admin\_request * Remove SSL configuration instructions from HTTPd docs * Wrap apache-httpd.rst * Remove fix for migration 37 * Cleanup for credentials schema test * Refactor sql filter code for clarity * Prefer . to setattr()/getattr() * Build domain scope for Fernet tokens * Mark the domain config API as experimental * Imported Translations from Transifex * Allow methods to be carried in Fernet tokens * Federated token formatter * Refactor: make Fernet token creation/validation API agnostic * Convert audit\_ids to bytes * Drop Fernet token prefixes & add domain-scoped Fernet tokens * Add JSON schema validation for service providers * Implements whitelist and blacklist mapping rules * Adding utf8 to federation tables * Eventlet green threads not released back to pool * Abstract the direct map concept into an object * Remove redundant creation timestamp from fernet tokens * Fix deprecated group for eventlet\_server options * Sync oslo-incubator to f2cfbba * Cleanup test keeping unnecessary fixture references * Fix typo in name of variable in resource router * Add test to list projects by the parent\_id * Fixes minor spelling issue * Crosslink to other sites that are owned by Keystone * Imported Translations from Transifex * move region and service exist checks into manager layer * make credential policy check ownership of credential * Remove unused threads argument * Refactor: remove dep on trust\_api / v3 token helper * Enable use of database domain config * add oauth authentication to config file * Prevent calling waitall() inside a GreenPool's greenthread * Rename get\_events to list\_events on the Revoke API * Address nits for default cache time more explicit * add cadf notifications for oauth * Add scope info to initiator data for CADF notifications * Removed maxDiff attribute from TestCase * Refactoring: use BaseTestCase instead of TestCase * Moved sys.exit mocking into BaseTestClass * Refactor: move initiator test to cadf specific section * Refactor: create a common base for notification tests * Migrations squash * Consistently use oslo\_config.cfg.CONF * Removes logging code that supported Python <2.7 * Refactoring: removed client method from TestCase * Refactoring: remove self.\_config\_file\_list from TestCase * Deprecate passing "extras" in token data * 'Assignment' has no attr 'get\_domain\_by\_name' * Refactor: make extras optional in v3 get\_token\_data * Remove extra semicolon from mapping fixtures * Imported Translations from Transifex * Fix seconds since epoch use in fernet tokens * Add API support for domain config * Remove unused checkout\_vendor * Move test\_core to keysteone.tests.unit.tests * Fixes the SQL model tests * Add documentation for key terms and basic authenticating * Remove useless comment from requirements.txt * Move pysaml to requirements.txt for py3 * Docstring fixes in fernet.token\_formatters * Made project\_id required for ec2 credential * Add Federation mixin for setting up data * Refactor: remove token formatters dep on 'token\_data' on create() * Refactor: rename the "standard" token formatter to "scoped" * Add unscoped token formatter for Fernet tokens * Fix the wrong order of parameters when using assertEqual * Imported Translations from Transifex * Spelling and grammar cleanup * Fixes bug in SQL/LDAP when honoring driver\_hints * Remove policy parsing exception * Cleanup policy related tests * Remove incubated version of oslo policy * Use oslo.policy instead of incubated version * Fixes minor whitespace issues * Updated from global requirements * Add checking for existing group/option to update domain config * Stop debug logging of Ldap while running unit tests * Exposes bug in SQL/LDAP when honoring driver\_hints * Updated from global requirements * Fix typos in tests/unit/core.py * Remove unnecessary import * Update developer docs landing page * Add support for whitelisting and partial domain configs * Change headers to be byte string friendly * fix import order in federation controller * Imported Translations from Transifex * Fix a minor coding nit in Fernet testing * Move install of cryptography before six * refactor: extract and document audit ID generation * Update sample config file * log query string instead of openstack.params and request args * Cleanup docstrings in test\_v3\_federation.py * refactor: consistently refer to "unpacked tokens" as the token's "payload" * refactor: extract fernet packing & unpacking methods * Fix nits from 157495 * Deprecate Eventlet Deployment in favor of wsgi containers * remove old docstr referring to keyczar * Implement backend driver support for domain config * Use revocation events for lightweight tokens * Avoid multiple instances for a provider * Always load revocation manager * Cleanup comments from 159865 * Updated from global requirements * Rename "Keystone LightWeight Tokens" (KLWT) to "Fernet" tokens * Make the default cache time more explicit in code * Keystone Lightweight Tokens (KLWT) * Refactor and provide scaffolding for domain specific loading * Populate token with service providers * Add CADF notifications for trusts * Get initiator from manager and send to controller * Add in non-decorator notifiers * Implemented caching in identity layer * Imported Translations from Transifex * Use dict comprehensions instead of dict constructor * Remove deprecated methods and functions in token subsystem * Authenticate local users via federated workflow * Move UserAuthInfo to a separate file * Make RuleProcessor.\_UserType class public * Enhance user identification in mapping engine * Remove conditional check (and test) for oauth\_api * Fixes test\_multiple\_filters filters definition * Remove conditionals that check for revoke\_api * Use correct dependency decorator * Add minimum release support notes for federation * Update \`os service create\` examples in config services * Reference OSC docs in CLI examples * Chain a trust with a role specified by name * Add parent\_id to test\_project\_model * Revamp the documentation surrounding notifications * Remove unused tmp directory in tests * Correct initialization order for logging to use eventlet locks * add missing links for v3 OS-EC2 API response * Remove explicit mentions of JSON from test\_v2 * Rename test\_keystoneclient\* * Rename test\_content\_types * Fix for KVS cache backend incompatible with redis-py * Enable endpoint\_policy, endpoint\_filter and oauth by default * Add links to extensions that point to api specs * Classifying extensions and defining process * Imported Translations from Transifex * Add oslo request id middleware to keystone paste pipeline * Uses SQL catalog driver for v2 REST tests * Fixed skip msg in templated catalog test * Remove invalid comment/statement at role manager * Standardize notifications types as constants * Change use of random to random.SystemRandom * Remove extra call to oauth manager from tests * Remove an extra call to create federation manager * Updated from global requirements * Imported Translations from Transifex * Improve List Role Assignment Tests * Enable filtering in LDAP backend for listing entities * Refactor filter and sensitivity tests in prepartion for LDAP support * Imported Translations from Transifex * Provide additional detail if OAuth headers are missing * Add WebSSO support for federation * Check consumer and project id before creating request token * Regenerate sample config file * Move eventlet server options to a config section * refactor: use \_get\_project\_endpoint\_group\_url() where applicable * Update sample config file * Consistently use oslo\_config.cfg.CONF * Imported Translations from Transifex * Removes unnecessary checks when cleaning a domain * Remove check\_role\_for\_trust from sample policies * Remove duplicated test for get\_role * Add a test for create\_domain in notifications * Add CADF notification handling for policy/region/service/endpoint * Publicize region/endpoint/policy/service events * Add CADF notifications for most resources * Updated from global requirements * Drop foreign key (domain\_id) from user and group tables * Make federated domain configurable * Imported Translations from Transifex * Move backend role tests into their own module * Fix nits from patch #110858 * Fix invalid super() usage in memcache pool * Add a domain to federated users * Wrap dependency registry * Remove unnecessary code setting provider * Fix tests to not load federation manager twice * Fix places where role API calls still called assignment\_api * fix a small issue in test\_v3\_auth.py * Imported Translations from Transifex * rename cls in get\_auth\_context to self * make tests of endpoint\_filter check endpoints num * remove the Conf.signing.token\_format option support * Remove list\_endpoint\_groups\_for\_project from sample policies * Add get\_endpoint\_group\_in\_project to sample policy files * Check for invalid filtering on v3/role\_assignments * Remove duplicate token revocation check * Remove incubator version of log and local * Use oslo.log instead of incubator * Move existing tests to unit * Cleanup tests to not set multiple workers * Use subunit-trace from tempest-lib * Log exceptions safely * Imported Translations from Transifex * Refactor \_send\_audit\_notification * Updated from global requirements * Remove excess brackets in exception creation * Update policy doc to use new rule format * remove the unused variables in indentity/core.py * fix assertTableColumns * Imported Translations from Transifex * make federation part of keystone core * Small cleanup of cloudsample policy * Fix error message on check on RoleV3 * Improve creation of expected assignments in tests * Add a check to see if a federation token is being used for v2 auth * Adds a fork of python-ldap for Py3 testing * Updates Python3 requirements * Sync with oslo-incubator * Add local rules in the federation mapping tests * Don't try to convert LDAP attributes to boolean * Add schema for endpoint group * Split the assignments controller * Use \_VersionsEqual for a few more version tests * Remove test PYTHONHASHSEED setting * Correct version tests for result ordering * Correct a v3 auth test for result ordering * Correct catalog response checker for result ordering * Correct test\_get\_v3\_catalog test for result ordering * Correct test\_auth\_unscoped\_token\_project for result ordering * Fix the syntax issue on creating table \`endpoint\_group\` * Change hacking check to verify all oslo imports * Change oslo.i18n to oslo\_i18n * Change oslo.config to oslo\_config * Change oslo.db to oslo\_db * Remove XMLEquals from tests * Remove unused test case * Don't coerce port config values * Make identity id mapping handle unicode * Improve testing of unicode id mapping * Add new "RoleAssignment" exception * Imported Translations from Transifex * log wsgi requests at INFO level * Fix race on default role creation * Imported Translations from Transifex * Unscoped to Scoped only * Refactor federation SQL backend 2015.1.0b2 ---------- * Set initiators ID to user\_id * Updated from global requirements * Change oslo.messaging to oslo\_messaging * Change oslo.serialization to oslo\_serialization * Handle SSL termination proxies for version list * Imported Translations from Transifex * Update federation config to use Service Providers * Drop URL field from region table * Create K2K SAML assertion from Service Provider * Service Providers API for OS-FEDERATION * Implements subtree\_as\_ids query param * Refactor role assignment assertions * Fixes 'OS-INHERIT:inherited\_to' info in tests * During authentication validate if IdP is enabled * Fix typo in Patch #142743 * Make the LDAP dependency clear between identity, resource & assignment * Implements parents\_as\_ids query param * Internal notifications for cleanup domain * Multiple IDP authentication URL * Change oslo.utils to oslo\_utils * Imported Translations from Transifex * Regenerate sample config file * Make unit tests call the new resource manager * Make controllers and managers reference new resource manager * Remove unused pointer to assignment in identity driver * Move projects and domains to their own backend * Make role manager refer to role cache config options * Documentation fix for Keystone Architecture * Imported Translations from Transifex * Fix evaluation logic of federation mapping rules * Deprecate LDAP Assignment Backend * Fix up \_ldap\_res\_to\_model for ldap identity backend * Remove local conf information from paste-ini * Use RequestBodySizeLimiter from oslo.middleware * Adds a wip decorator for tests * Remove list\_user\_projects method from assignment * Updated from global requirements * Remove unnecessary code block of exception handling * Updated from global requirements * Add library oslo.concurrency in config-generator config file * Updated from global requirements * Explicit Unscoped * add missing API in docstring of EndpointFilterExtension * fix test\_ec2\_list\_credentials * Assignment sql backend create\_grant refactoring * Updated from global requirements * Imported Translations from Transifex * Remove TODO comment which has been addressed * Refactor keystone-all and http/keystone * Updated from global requirements * Identify groups by name/domain in mapping rules * do parameter check before updating endpoint\_group * Move sql specific filter test code into test\_backend\_sql * Fix incorrect filter test name * Update the keystone sample config * Minor fix in RestfulTestCase * Scope federated token with 'token' identity method * Correct comment about circular dependency * Refactor assignment manager/driver methods * Make unit tests call the new, split out, role manager * Make controllers call the new, split out, role manager * Correct doc string for grant driver methods * Split roles into their own backend within assignments * correct the help text of os\_inherit * Update Inherited Role Assignment Extension section * Limit lines length on configuration doc * Fixes spacing in sentences on configuration doc * Fixes several typos on configuration doc * Trust redelegation * add missing parent\_id parameter check in project schema * Fix incorrect session usage in tests * Fix migration 42 downgrade * Updated from global requirements * Additional test coverage for password changes * Fix downgrade test for migration 61 on non-sqlite * Fix transaction issue in migration 44 downgrade * Correct failures for H238 * Move to hacking 0.10 * Updated from global requirements * Remove unused fields in base TestCase * Keystoneclient tests from venv-installed client * Fix downgrade from migration 61 on non-sqlite * explicit namespace prefixes for SAML2 assertion * Remove requirements not needed by oslo-incubator modules anymore * Remove unused testscenarios requirement * Cleanup test-requirements for keystoneclient * Fix tests using extension drivers * Ensure manager grant methods throw exception if role\_id is invalid * update sample conf using latest oslo.conf * Remove unnecessary oslo incubator bits * let endpoint\_filter sql backend return dict data * Tests fail only on deprecation warnings from keystone * switch from sample\_config.sh to oslo-config-generator * Add positive test case for content types * Update the keystone.conf sample * remove invalid note * invalidate cache when updating catalog objects * Enable hacking rule H302 * fix wrong self link in the response of endpoint\_groups API * Imported Translations from Transifex * improve the EP-FILTER catalog length check in test\_v3.py * Don't allow deprecations during testing * Fix to not use deprecated Exception.message * Integrate logging with the warnings module * rename oslo.concurrency to oslo\_concurrency * Fix to not use empty IN clause * Be more precise with flake8 filename matches * Use bashate to run\_tests.sh * Move test\_utils to keystone/tests/unit/ * add circular check when updating region * fix the wrong update logic of catalog kvs driver * Removes a Py2.6 version of assertSetEqual * Removes a Py2.6 version of inspect.getcallargs * Removes a bit of WSGI code converts unicode to str * Expanded mutable hacking checks * Make the mutable default arg check very strict * sync to oslo commit 1cf2c6 * Update federation docs to point to specs.o.org * Memcache connection pool excess check * Always return the service name in the catalog * Update docs to no longer show XML support 2015.1.0b1 ---------- * Check and delete for policy\_association\_for\_region\_and\_service * Remove unnecessary ldap import * Rename \`removeEvent\` to be more pythonic * Fix the way migration helpers check FK names * Remove XML support * Fix modifying a role with same name using LDAP * Add a test for modifying a role to set the name the same * Fix disabling entities when enabled is ignored * Add tests for enabled attribute ignored * Cleanup eventlet use in tests * Fix update role without name using LDAP * Add test for update role without name * Inherited role assignments to projects * Updated from global requirements * Fix inherited user role test docstring * Fixes links in Shibboleth configuration docs * Updated from global requirements * fix wrong indentation in contrib/federation/utils.py * Adds openSUSE support for developer documentation * User ids that begin with 0 cannot authenticate through ldap * Typo in policy call * Updated from global requirements * Remove endpoint\_substitution\_whitelist config option * Correct max\_project\_tree\_depth config help text * Adds correct checks in LDAP backend tests * Updated from global requirements * Add an identity backend method to get group by name * Create, update and delete hierarchical projects * drop developer support for OS X * Ignore H302 - bug 1398472 * Remove irrelative comment * remove deprecated access log middleware * Multiple IdPs problem * Fixes docstring at eventlet\_server * Fix the copy-pasted help info for db\_version * Updated from global requirements * TestAuthPlugin doesn't use test\_auth\_plugin.conf * Add missing translation marker for dependency * Use \_ definition from keystone.i18n * Remove Python 2.6 classifier * Correct token flush logging * Speed up memcache lock * Moves hacking tests to unit directory * Fixes create\_saml\_assertion() return * Add import i18n to federation/controllers.py * Correct use of config fixture * Extends hacking check for logging to verify i18n hints * Adds missing log hints for level E/I/W * make sample\_data.sh account for the default options in keystone.conf * Adds dynamic checking for mapped tokens * Updated from global requirements * Enable cloud\_admin to list projects in all domains * Remove string from URL in list\_revoke\_events() * Configuring Keystone edits * Update keystone readme to point to specs.o.org * Imported Translations from Transifex * Add WSGIPassAuthorization to OAuth docs * Increase test coverage of test\_versions.py * Move test\_pemutils.py to unit test directory * Don't return \`\`user\_name\`\` in mapped.Mapped class * Increase test coverage of test\_base64utils.py * Move base64 unit tests to keystone/tests/unit dir * Move injection unit tests to keystone/tests/unit * Move notification unit tests to unit test dir * Allow for REMOTE\_USER name in federation mapping * Doc about specifying domains in domains specific backends * Remove useless field passed into SQLAlchemy "distinct" statement * Exclude domains with inherited roles from user domain list * Improve testing of exclusion of inherited roles * Fix project federation tokens for inherited roles * Improve testing of project federation tokens for inherited roles * Fix domain federation tokens for inherited roles * Improve testing of domain federation tokens for inherited roles * Fix misspelling at configuration.rst file * Remove duplicate setup logic in federation tests * Imported Translations from Transifex * Enable hacking rule H904 * Move shib specific documentation * Additional debug logs for federation flows * Add openid connect support * Imported Translations from Transifex * Enable hacking rule H104 File contains nothing but comments * Rename \_handle\_saml2\_tokens() method * Updated from global requirements * Update references to auth\_token middleware * Use true() rather than variable/singleton * Change ca to uppercase in keystone.conf * default revoke driver should be the non-deprecated driver * Prevent infinite loop in token\_flush * Adds IPv6 url validation support * Provide useful info when parsing policy file * Doc about deleting a domain specific backend domain * Updated from global requirements * Remove token persistence proxy * Correct use of noqa * Use oslo.concurrency instead of sync'ed version * revise error message for keystone.token.persistence pkg * Change config option examples to v3 * Sync modules from oslo-incubator * test\_utils use jsonutils from oslo.serialization * Add fileutils module * Move check\_output and git() to test utils * Remove nonexistant param from docstring * Fixes aggressive use of translation hints * PKI and PKIZ tokens unnecessary whitespace removed * Move unit tests from test\_backend\_ldap * Use correct name of oslo debugger script * Updated from global requirements * Imported Translations from Transifex * Change /POST to /ECP at federation config * Base methods to handle hierarchical projects * use expected\_length parameter to assert expected length * fix the wrong order of assertEqual args in test\_v3 * sys.exit mock cleanup * Tests raise exception if logging problem * Correct the code path of implementation for the abstract method * Use newer python-ldap paging control API * Add xmlsec1 dependency comments * Add parent\_id field to projects * Add max-complexity to pep8 for Keystone * Remove check\_password() in identity.backend.ldap * Restrict certain APIs to cloud admin in domain-aware policy * Remove unused ec2 driver option * Extract Assignment tests from IdentityTestCase * Clean up federated identity audit code * obsolete deployment docs * Remove database setup duplication * Fixes endpoint\_filter tests * Fixes a spelling error in hacking tests * Fixes docstrings to be more accurate * Update the feature/hierarchical-multitenancy branch * Updated from global requirements 2014.2 ------ * updated translations * Remove deprecated KVS trust backend * Imported Translations from Transifex * Ensure sql upgrade tests can run with non-sqlite databases * Ensure sql upgrade tests can run with non-sqlite databases * Validates controller methods exist when specified * Fixes an error deleting an endpoint group project * Add v3 openstackclient CLI examples * Update the CLI examples to also use openstackclient * Replace an instance of keystone/openstack/common/timeutils * Use importutils from oslo.utils * Use jsonutils from oslo.serialization * Update 'Configuring Services' documentation * Use openstackclient examples in configuration documentation * Validates controller methods exist when specified * Fixes an error deleting an endpoint group project * Switch LdapIdentitySqlAssignment to use oslo.mockpatch * Fix tests comparing tokens * Remove deprecated TemplatedCatalog class * Remove images directory from docs * Remove OS-STATS monitoring * Remove identity and assignment kvs backends * Add an XML code directive to a shibboleth example * revise docs on default \_member\_ role * Convert unicode to UTF8 when calling ldap.str2dn() * Fix tests comparing tokens * Fix parsing of emulated enabled DN * Handle default string values when using user\_enabled\_invert * Handle default string values when using user\_enabled\_invert * Convert unicode to UTF8 when calling ldap.str2dn() * Fix parsing of emulated enabled DN * Add test for getting a token with inherited role * wrong logic in assertValidRoleAssignmentListResponse method * Open Kilo development 2014.2.rc1 ---------- * Enhance FakeLdap to require base entry for subtree search * Imported Translations from Transifex * Uses session in migration to stop DB locking * Address some late comments for memcache clients * Set issuer value to CONF.saml.idp\_entity\_id * Updated from global requirements * Add placeholders for reserved migrations * Mark k2k as experimental * Add version attribute to the SAML2 Assertion object * New section for CLI examples in docs * Fix failure of delete domain group grant when identity is LDAP * Clean up the Configuration documentation * Adding an index on token.user\_id and token.trust\_id * Update architecture documentation * Fix a spelling mistake in keystone/common/utils.py * Imported Translations from Transifex * Prevent infinite recursion on persistence core on init * Read idp\_metadata\_path value from CONF.saml * Remove duplicated assertion * Fix create and user-role-add in LDAP backend * Fix minor spelling issues in comments * Add a pool of memcached clients * Update URLs for keystone federation configuration docs * add --rebuild option for ssl/pki\_setup * Mock doesn't have assert\_called\_once() * Do not run git-cloned ksc master tests when local client specified * Add info about pysaml2 into federation docs * Imported Translations from Transifex * Remove unused cache functions from token.core * Updated from global requirements * Safer check for enabled in trusts * Set the default number of workers when running under eventlet * Add the processutils from oslo-incubator * Update 'Configure Federation' documentation * Ensure identity sql driver supports domain-specific configuration * Allow users to clean up role assignments * Adds a whitelist for endpoint catalog substitution * Revoke the tokens of group members when a group role is revoked * Change pysaml2 comment in test-requrements.txt * Document Keystone2Keystone federation * Set LDAP certificate trust options for LDAPS and TLS * Fail on empty userId/username before query * Refactor FakeLdap to share delete code * ldap/core deleteTree not always supported * Reduce unit test log level for notifications * Fix delete group cleans up role assignments with LDAP * Refactor LDAP backend using context manager for connection * Fix fakeldap search\_s documentation * Add delete notification to endpoint grouping * Fix using local ID to clean up user/group assignments * Add characterization test for cleanup role assignments for group * Fix LDAP group role assignment listing * Correct typos in keystone/common/base64utils.py docstrings * Add V3 JSON Home support to GET / * Ensure a consistent transactional context is used * Adds hint about filter placement to extension docs * Adds pipeline hints to the example paste config * Make the extension docs a top level entry in the landing page * LDAP: refactor use of "1.1" OID * Fix Policy backend driver documentation * improve dependency injection doc strings * Document mod\_wsgi doesn't support chunked encoding * Making KvsInheritanceTests use backend KVS * Keystone local authenticate has an unnecessary pending audit record * Use id attribute map for read-only LDAP * Stop skipping LDAP tests * Update the revocation configuration docs * Fixes formatting error in debug log statement * Remove trailing space from string * Update paste pipelines in configuration docs * Update man pages * Updates package comment to be more accurate * Fixed typo 'in sane manner' to 'in a sane manner' * Enable filtering of services by name * correct typos * Fixes code comment to be more accurate * Prevent domains creation for the default LDAP+SQL * Add testcase for coverage of 002\_add\_endpoint\_groups * Fix oauth sqlite migration downgrade failure * Sync jsonutils from oslo-incubator 32e7f0b5 * Imported Translations from Transifex * Avoid conversion of binary LDAP values * Remove unused variable TIME\_FORMAT * Add characterization test for group role assignment listing * Fix dn\_startswith * Use oslo\_debug\_helper and remove our own version * Fixes a mock cleanup issue caused by oslotest * Add rst code-blocks to a bunch of missing examples * Capitalize all instances of Keystone in the docs 2014.2.b3 --------- * Update the docs that list sections in keystone.conf * Fixed spelling mistakes in comments * use one indentation style * Fix admin server doesn't report v2 support in Apache httpd * Add test for single app loaded version response * Work toward Python 3.4 support and testing * Update the federation configuration docs for saml2 * Add docs for enabling endpoint policy * warn against sorting requirements * Adds region back into the catalog endpoint * Remove extra V3 version router * Implementation of Endpoint Grouping * Fix minor nits for token2saml generation * Routes for Keystone-IdP metadata endpoint * Generate IdP Metadata with keystone-manage * IdP SAML Metadata generator * Implement validation on Trust V3 API * Create SAML generation route and controller * trustor\_user\_id not available in v2 trust token * Transform a Keystone token to a SAML assertion * Remove TODO that was done * Fix region schema comment * Remove unused \_validate\_endpoint * Fix follow up review issues with endpoint policy backend patch * controller for the endpoint policy extension * Mark the revoke kvs backend deprecated, for removal in Kilo * Fix logging config twice * Implement validation on the Catalog V3 API * General logging cleanup in keystone.notifications * Lower log level for notification registration * backend for policy endpoint extension * Implement validation on Credential V3 * Implement validation on Policy V3 API * Fix token flush fails with recursion depth exception * Spelling errors fixed in the comments * Add index for actor\_id in assignments table * Endpoint table is missing reference to region table * add missing log hints for level C/E/I/W * Add audit support to keystone federation * Add string id type validation * Implement validation on Assignment V3 API * Adds tests that show how update with validation works * Mark the trust kvs backend deprecated, for removal in Kilo * Test cleanup: do not leak FDs during test runs * Do not load auth plugins by class in tests * JSON Home data is required * Cleanup superfluous string comprehension and coersion * Add commas for ease of maintenance * Comments to docstrings for notification emit methods * Notification cleanup: namespace actions * Mark kvs backends as deprecated, for removal in Kilo * Add bash code style to some portions of configuration.rst * Update sample config * Update tests to not use token\_api * Make persistence manager in token\_provider\_api private * Enhance GET /v3 to handle Accept header * Enhance V3 extensions to provide JSON Home data * Enhance V3 extension class to integrate JSON Home data * Change OS-INHERIT extension to provide JSON Home data * Change the sub-routers to provide JSON Home data * Change V3 router classes to provide JSON Home data * Create additional docs for role assignment events * Add libxmlsec1 as external package dependency on OS X * Add \_\_repr\_\_ to KeystoneToken model * Add extra guarding to revoke\_by\_audit\_id methods * Mark methods on token\_api deprecated * Remove SAML2 plugin dependency on token\_api * Remove oauth controller dependency on token\_api * Remove assignment\_api dependency on token\_api * Notification Constant Cleanup and internal notify type * Remove wsgi and base controller dependency on token\_api * Remove identity\_api dependency on token\_api * Remove trust dependency on token\_api * Update AuthContextMiddleware to not use token\_api * Revoke by Audit Id / Audit Id Chain instead of expires * assignment controller error path fix * Make SQL the default backend for Identity & Assignment unit tests * Add CADF notifications for role assignment create and delete * Add notifications for policy, region, service and endpoint * Enhance V3 version controller to provide JSON Home response * Provide the V3 routers to the V3 extension controller * Enhance V3 routers to store basic resource description * Correct the signature for some catalog abstract method signatures * Convert to urlsafe base64 audit ids * Sync Py2 and Py3 requirements files * Sync with oslo-incubator * Add audit ids to tokens * Fixing simple type in comment * Create authentication specific routes * Standardizing the Federation Process * Enable filtering of credentials by user ID * Expose context to create grant and delete grant * Redirect stdout and stderr when using subprocess * Back off initial migration to 34 * Back off initial migration to 35 * Use python convention for function names in test\_notifications * Use mail for the default LDAP email attribute name * Bump hacking to 0.9.x series * Fixes an issue with the XMLEquals matcher * Do not require method attribute on plugins * Remove \_BaseFederationExtension * Add a URL field to region table * Remove unnecessary declaration of CONF * Remove trailing space in tox.ini * Rename bash8 requirement * Updates the sample config * remove unused import * Clean whitespace off token * Support the hints mechanism in list\_credentials() * Keystone service throws error on receiving SIGHUP * Remove strutils and timeutils from openstack-common.conf * Use functions in oslo.utils * Add an OS-FEDERATION section to scoped federation tokens * Ensure roles created by unit tests have correct attributes * Update control\_exchange value in keystone.conf * swap import order of lxml * add i18n to lxml error * Check for empty string value in REMOTE\_USER * Refactor names in catalog backends * Update CADF auditing example to show non-payload information * Remove ec2 contrib dependency on token\_api * Expose token revocation list via token\_provider\_api * Remove assignment controller dependency on token\_api * Refactor serializer import to XmlBodyMiddleware * Delete intersphinx mappings * Fix documentation link * Make token\_provider\_api contain token persistence * Remove S3 middleware tests from tox.ini * Remove unused function * Add oslo.utils requirement * Surround REMOTE\_USER variable name with quotes * Remove \`with\_lockmode\` use from Trust SQL backend * Allow LDAP lock attributes to be used as enable attributes * Improve instructions about federation * Do not override venvs * Imported Translations from Transifex * Remove debug CADF payload for every authN request * Don't override tox envdir for pep8 and cover jobs * Change V3 extensions to use resources * Enhance V3 extension class to use resources * V3 Extension class * Change V3 router classes to use resources * Enhance V3 router class for resources * Class for V3 router packages * Filter List Regions by 'parent\_region\_id' * Refactor existing endpoint filter tests * Trust unit tests should target additional threat scenarios * Update the config file * Fix revocation event handling with MySQL * Set default token provider to UUID * Add filters to the collections 'self' link * Issue multiple SQL statements in separate engine.execute() calls * Remove fixture from openstack-common.conf * Use config fixture from oslo.config * Fix revoking a scoped token from an unscoped token * Updated from global requirements * KeyError instead of exception.KeyError * Catch correct oslo.db exception * Update setup docs with Fedora 19+ dependencies * Add a test for revoking a scoped token from an unscoped * Fix revoking domain-scoped tokens * Correct revocation event test for domain\_id * Add pluggable range functions for token flush * Configurable python-keystoneclient repo * Fix invalid self link in get access token * Add workaround to support tox 1.7.2 * Fixes a capitalization issue * Do not consume trust uses when create token fails * Refactor set domain-id and mapping code * Remove duplicated asserts * Fix for V2 token issued\_at time changing * Add tests related to V2 token issued\_at time changing * Sample config update * Add the new Keystone TokenModel * Add X-Auth-Token header in federation examples * Check url is in the 'self' link in list responses * Clean up EP-Filter after delete project/endpoint * add internal delete notification for endpoint * remove static files from docs * Move token persistence classes to token.persistence module * cache the catalog * Disable a domain will revoke tokens under the same domain * Sqlite files excluded from the repo * Adding support for ldap connection pooling * Details the proper way to call a callable 2014.2.b2 --------- * Add the new oslo.i18n as a dependency for Python 3 * Fixes test\_exceptions.py for Python3 * Fixes test\_wsgi for Python3 * Adds several more test modules that pass on Py3 * Reduces the amount of mocked imports for Python 3 * Disables LDAP unit tests * Updated from global requirements * Initial implementation of validator * Mark the 'check\_vX\_token' methods deprecated * Extracting get group roles for project logic to drivers * implement GET /v3/catalog * Adds coverage report to py33 test runs * Fixed tox cover environment to share venv * Regenerate sample config file * Check that region ID is not an empty string * auth tests should not require admin token * Example JSON files should be human-readable * Consolidate \`assert\_XXX\_enabled\` type calls to managers * Move keystone.token.default\_expire\_time to token.provider * Move token\_api.unique\_id to token\_provider\_api * Capitalize a few project names in configuring services doc * Fixes a Python3 syntax error * Introduce pragma no cover to asbtract classes * Update middleware that was moved to keystonemiddleware * Sync with oslo-incubator * project disabled/deleted notification recommendations * render json examples with syntax highlighting * Use oslo.i18n * Make sure unit tests set the correct log levels * Clean up the endpoint filtering configuration docs * Avoid loading a ref from SQL to delete the ref * Add revocation extension to default pipeline * multi-backend support for identity * Update docs to reflect new db\_sync behaviour * Migrate default extensions * Add oslo.i18n as dependency * Do not use lazy translation for keystone-manage * Update the configuration docs for the revocation extension * Remove deprecated token\_api.list\_tokens * Imported Translations from Transifex * Add keystonemiddleware to requirements * Add \_BaseFederationExtension class * Correct the region table to be InnoDB and UTF8 * HEAD responses should return same status as GET * Updated from global requirements * Sync with oslo-incubator e9bb0b59 * Add schema check for OS-FEDERATION mapping table * Make OS-FEDERATION core.Driver methods abstract * update example with a status code we actually use * Correct docstring for assertResponseSuccessful * Fix the section name in CONTRIBUTING.rst * Fix OAuth1 to not JSON-encode create access token response * Ending periods in exception messages deleted * Ensure that in v2 auth tenant\_id matches trust * Add identity mapping capability * Do not use keystone's config for nova's port * Fix docs and scripts for pki\_setup and ssl\_setup * LDAP: Added documentation for debug\_level option * Updated from global requirements * Fixes the order of assertEqual arguments * remove default=None for config options * Fix test for get\_\*\_by\_name invalidation * Do not support toggling key\_manglers in cache layer * Implicitly ignore attributes that are mapped to None in LDAP * Move bash8 to run under pep8 tox env * Remove db, db.sqlalchemy from openstack-common.conf * Remove backend\_entities from backend\_ldap.conf * Consolidate provider calls to token\_api.create\_token * Adds hacking check for debug logging translations * Updates Python3 requirements to match Python2 * Adds oslo.db support for Python 3 tests * Do not leak SQL queries in HTTP 409 (conflict) * Imported Translations from Transifex * Do not log 14+ INFO lines on a broken pipe error (eventlet) * Regenerate sample config file * deprecate LDAP config options for 'tenants' * the user\_tenant\_membership table was replaced by "assignment" * Corrects minor spelling mistakes * Ignoring order of user list in TenantTestCase * Make gen\_pki.sh & debug\_helper.sh bash8 compliant * TestAuthInfo class in test\_v3\_auth made more efficient * Update docs to reference #openstack-keystone * Don't set sqlite\_db default * Migrate ID generation for users/groups from controller to manager * oslo.db implementation * Test \`common.sql\` initialization * Kerberos as method name * test REMOTE\_USER does not authenticate * Document pkiz as provider in config * Only emit disable notifications for project/domain on disable * Fix the typo and reformat the comments for the added option * Updated from global requirements * fix flake8 issues * Update sample keystone.conf file * Fix 500 error if request body is not JSON object * Default to PKIZ tokens * Fix a few typos in the shibboleth doc * pkiz String conversion * Fixes catalog URL formatting to never return None * Updates keystone.catalog.core.format\_url tests * Ignore broken endpoints in get\_catalog * Allow for multiple PKI Style Providers * Add instructions for removing pyc files to docs * Password trunction makes password insecure * enable multiple keystone-all worker processes * Add cloud auditing notification documentation * Block delegation escalation of privilege * Fixes typo error in Keystone * Add missing docstrings and 1 unittest for LDAP utf-8 fixes * Properly invalidate cache for get\_\*\_by\_name methods * Make sure domains are enabled by default * Convert explicit session get/begin to transaction context 2014.2.b1 --------- * remove unnecessary word in docs: 'an' * add docs on v2 & v3 support in the service catalog * Add v3 curl examples * Use code-block for curl examples * Sync service module from oslo-incubator * remove unneeded definitions of Python Source Code Encoding * gitignore etc/keystone/ * Enforce \`\`saml2\`\` protocol in Apache config * install gettext on OS X for msgfmt * Use translation hints * Add v2 & v3 API documentation * Make sure all the auth plugins agree on the shared identity attributes * update release support warning for domain-specific drivers * Catalog driver generates v3 catalog from v2 catalog * Compressed Token Provider * document keystone-specs instead of LP blueprints in README * fixed several pep8 issues * Invalid command referenced in federation documentation * Fix curl example refs in docs * pep8: do not test locale files * Consistenly use jsonutils instead of json * Fix type error message in format\_url * Updated from global requirements * remove out of date docs for Fedora 15 * Make sure scoping to the project of a disabled domain result in 401 * document pki\_setup and ssl\_setup in keystone.conf.sample * Fixed wrong behavior when updating tenant or user with LDAP backends * Cleanup openstack-common.conf and sync from olso * recommend excluding 35357 from ephemeral ports * Fixes duplicated DELETE queries on SQL backends * Refactor tests regarding required attributes * Suggest users to remove REMOTE\_USER from shibd conf * Refactor driver\_hints * Imported Translations from Transifex * Code which gets and deletes elements of tree was moved to one method * indicate that sensitive messages can be disabled * Check that the user is dumb moved to the common method * Fix spelling mistakes in docs * Replace magic value 'service/security' in CadfNotificationWrapper * Replace assertTrue and assertFalse with more suitable asserts * replaced unicode() with six.text\_type() * Remove obsolete note from ldap * install from source docs never actually install the keystone service * LDAP fix for get\_roles\_for\_user\_and\_project user=group ID * Cleanup of ldap assignment backend * Remove all mostly untranslated PO files * Mapping engine does not handle regex properly * SQL fix for get\_roles\_for\_user\_and\_project user=group ID * Unimplemented get roles by group for project list * sql migration: ensure using innodb utf8 for assignment table * Update mailmap entry for Brant * Reduce log noise on expired tokens * Add note for v3 API clients using auth plugin docs * Refactor test\_auth trust related tests * Add detailed federation configuration docs * remove a few backslash line continuations * Reduce excess LDAP searches * Regenerate sample config * Fix version links to docs.openstack.org * Add mailmap entry * Refactor create\_trust for readability * Adds several more tests to the Python 3 test run * Fixed the policy tests in Python 3 * Fixed the size limit tests in Python 3 * fixed typos found by RETF rules in RST files * Remove the configure portion of extension docs * Ensure token is a string * Fixed some typos throughout the codebase * Allow 'description' in V3 Regions to be optional * More random values for oAuth1 verifier * Add rally performance gate job for keystone * Set proper DB\_INIT\_VERSION on db\_version command * Escape values in LDAP search filters * Migration DB\_INIT\_VERSION in common place * Redundant unique constraint * Correct \`nullable\` values in models and migrations * Move hacking code to a separate fixture * Some methods in ldap were moved to superclass * Sync with oslo-incubator 28fba9c * Use oslo.test mockpatch * Check that all po/pot files are valid * No longer allow listing users by email * Refactor notifications * Add localized response test * Refactor service readiness notification * Make test\_revoke expiry times distinct * Removed duplication with list\_user\_ids\_for\_project * Fix cache configuration checks * setUp must be called on a fixture's parent first * First real Python 3 tests * Make the py33 Jenkins job happy * Fix the "search for sql.py" files for db models * Sync with oslo-incubator 74ae271 * no one uses macports * Updated from global requirements * Compatible server default value in the models * Explicit foreign key indexes * Added statement for ... if ... else * Imported Translations from Transifex * Ignore broken endpoints in get\_v3\_catalog * Fix typo on cache backend module * Fix sql\_upgrade tests run by themselves * Discourage use of pki\_setup * add dependencies of keystone dev-enviroment * More efficient DN list for LDAP role delete * Stronger assertion for test\_user\_extra\_attribute\_mapping * Refactor test\_password\_hashed to the backend testers * Remove LDAP password hashing code * More notification unit tests * Add missing import, remove trailing ":" in middleware example * Fixes for in-code documentation * Isolate backend loading * Sync with oslo-incubator 2fd457b * Adding one more check on project\_id * Moves test database setup/teardown into a fixture * Make the LDAP debug option a configurable setting * Remove unnecessary dict copy * More debug output for test * Code which gets elements of tree in ldap moved to a common method * Removed unused code * Don't re-raise instance * Fix catalog Driver signatures * Include extra attributes in list results * Allow any attributes in mapping * Enhance tests for user extra attribute mapping * Fix typo of ANS1 to ASN1 * Updated from global requirements * Refactor: moved flatten function to utils * Collapse SQL Migrations * Treat LDAP attribute names as case-insensitive * replace word 'by' with 'be' * Configurable token hash algorithm * Adds style checks to ease reviewer burden * Adding more descriptive error message * Fixed wrong behavior in method search\_s in BaseLdap class * Fix response for missing attributes in trust * Refactor: move federation functions to federation utils * List all forbidden attributes in the request body * Convert test\_backend\_ldap to config fixture * Add tests for user ID with comma * Fix invalid LDAP filter for user ID with comma * Remove assignment proxy methods/controllers * Remove legacy\_endpoint\_id and enabled from service catalog * Replace all use of mox with mock * Fix assertEqual arguments order(catalog, cert\_setup, etc) * Remove common.V3Controller.check\_required\_params() method * Fix parallel unit tests keystoneclient partial checkout * Sync from oslo db.sqlalchemy.migration * Removes unused db\_sync methods * Removes useless wrapper from manager base class * Cleanup of test\_cert\_setup tests * Sanitizes authentication methods received in requests * Fix create\_region\_with\_id raise 500 Error bug * For ldap, API wrongly reports user is in group * support conventional domain name with one or more dot * Remove \_delete\_tokens function from federation controller * Keystone doesn't use pam * Fixed small capitalization issue * Fix Jenkins translation jobs * Removes some duplicate setup from a testcase * Updated from global requirements * Enable concurrent testing by default * Cleanup ldap tests (mox and reset values) * Check domain\_id with equality in assignment kvs * Moves database setup/teardown closer to its usage * Cleanup config.py * Clean up config help text * Imported Translations from Transifex * test\_v3\_token\_id correctly hash token * Safer noqa handling * Remove noqa form import \_s * Fix assertEqual arguments order(auth\_plugin, backend, backend\_sql, etc) * Expand the use of non-ascii values in ldap test * Properly handle unicode & utf-8 in LDAP * Refactor LDAP API * Use in-memory SQLite for sql migration tests * Use in-memory SQLite for testing * Remove extraenous instantiations of managers * Make service catalog include service name * Add placeholders for reserved migrations 2014.1.rc1 ---------- * Open Juno development * Enable lazy translations in httpd/keystone.py * Avoid using .values() on the indexed columns * Imported Translations from Transifex * revert deprecation of v2 API * Remove unnecessary test setUps * code hygiene; use six.text\_type, escape regexp's, use key function * Use CMS to generate sample tokens * Allows override of stdout/stderr/log capturing * exclude disabled services from the catalog * refactor AuthCatalog tests * Rename keystone.tests.fixtures * Change the default version discovery URLs * Remove extra cache layer debugging * Updated from global requirements * Fix doc build errors with SQLAlchemy 0.9 * Sync oslo-incubator db.sqlalchemy b9e2499 * Create TMPDIR for tests recursively * Always include 'enabled' field in service response * test tcp\_keepidle only if it's available on the current platform * Add dedicated URL for issuing unscoped federation tokens * Cleanup revocation query * Reduce environment logging * Use assertIsNone when comparing against None * Removes the use of mutables as default args * Add a space after the hash for block comments * Filter SAML2 assertion parameters with certain prefix * Use assertIn in test\_v3\_catalog * Add support for parallel testr workers in Keystone * is\_revoked check all viable subtrees * update sample conf * explicitly import gettext function * expires\_at should be in a tuple not turned into one * Comparisons should account for instantaneous test execution * Start using to oslotest * Uses generator expressions instead of filter * Remove unused db\_sync from extensions * Ability to turn off ldap referral chasing * Add user\_id when calling populate\_roles\_for\_groups * Store groups ids objects list in the OS-FEDERATION object * Make domain\_id immutable by default * Do not expose internal data on UnexpectedError * Use oslo db.sqlalchemy.session.EngineFacade.from\_config * Uses explicit imports for \_ * Rename scope\_to\_bad\_project() to test\_scope\_to\_bad\_project() * Make LIVE Tests configurable with ENV * Filter out nonstring environment variables before rules mapping * Provide option to make domain\_id immutable * Replace httplib.HTTPSConnection in ec2\_token * Move test .conf files to keystone/tests/config\_files * Removal of test .conf files * Don't automatically enable revocation events * Ensure v3policysample correctly limits domain\_admin access * Sync db, db.sqlalchemy from oslo-incubator 0a3436f * Do not use keystone.conf.sample in tests * Filter LDAP dumb member when listing role assignments * Updated from global requirements * Remove unnecessary oauth1.Manager constructions * Enforce groups presence for federated authn * Update sample config * Very minor cleanup to default\_fixtures * Cleanup keystoneclient tests * Cleanup fixture data added to test instances * Cleans up test data from limit tests * Cleanup of instance attrs in core tests * Cleanup backends after each test * Fixup region description uniqueness * Add slowest output to tox runs (testr) * Add missing documentation for enabling oauth1 auth plugin * Add missing documentation for enabling federation auth plugin * Use class attribute to represent 'user' and 'group' * Configurable temporary directory for tests * Call an existing method in sync cache for revoke events * Remove unnecessary calls to self.config() * remove the unused variable in test\_sql\_upgrade * remove hardcoded SQL queries in tests * Fix db\_version failed with wrong arguments * Use config fixture * Fix docstrings in federation related modules * Sync db, db.sqlalchemy, gettextutils from oslo-incubator 6ba44fd * V3 xml responses should use v3 namespace * trust creation allowed with empty roles list * Fix test\_provider\_token\_expiration\_validation transient failure * Fix include only enabled endpoints in catalog * Add unit tests for disabled endpoints in catalog 2014.1.b3 --------- * Update ADMIN\_TOKEN description in docs * Mark revoke as experimental * Import order is fixed * Remove unused function from tests * Add OS-OAUTH1 to consumers links section * Don't need session.flush in context managed by session * Imported Translations from Transifex * allow create credential with the system admin token * Stop gating on up-to-date sample config file * Always include 'enabled' field in endpoint response * Add the last of the outstanding helpstrings to config * Token Revocation Extension * Remove vim headers * Removes use of timeutils.set\_time\_override * drop key distribution from icehouse * Limited use trusts * Update curl api example to specify tenant * Update Oslo wiki link in README * Properly configure OS-EP-FILTER test backend * Add tests for endpoint enabled * Remove the un-used and non-maintained PAM identity backend * Remove paste\_deploy from test\_overrides.conf * SQLAlchemy Change to support more strict dialect checking * Remove "test-only" pam config options * Imported Translations from Transifex * Fix get project users when no user exists * deprecate XML support in favor of JSON * Lazy gettextutils behavior * Fix the order of assertEqual arguments(keystoneclient, kvs, etc) * Update Oslo wiki link in README * Removes a redundant test * Remove unused variable * Implement V3 Specific Version of EC2 Contrib * revocation\_list only call isotime on datetime objects * Support authentication via SAML 2.0 assertions * Fix table name typo in test\_sql\_upgrade * Cleanup and add more config help strings * Ensure v2 API only returns projects in the default domain * Support for mongo as dogpile cache backend * v3 endpoint create should require url * Fix issue with DB upgrade to assignment table * Remove duplicated cms file * oauth1 extension migration fails with DB2 * Handle exception messages with six.text\_type * Remove common.sql.migration * Unimplemented error on V3 get token * Updated from global requirements * Replace assertEqual(None, \*) with assertIsNone in tests * Fix keystone-manage db\_version * Fix assertEqual arguments order(\_ldap\_tls\_livetest, backend\_kvs, etc) * Fix assertEqual arguments order(backend\_ldap, cache, v3\_protection) * Fix the order of assertEqual arguments(v3\_auth, v3\_identity) * Move \_BaseController to common/controllers.py * Remove oslo rpc * Fix webob.exc.HTTPForbidden parameter miss * Remove redundant default value None for dict.get * Remove oslo notifier * Uses the venv virtualenv for the pep8 command * Sync db.exception from Oslo * Update oslo-incubator log.py to a01f79c * Update man pages * Add tests for create grant when no group * Add tests for create grant when no user * Correct a docstring in keystone.common.config * Enable pep8 test against auto-generated configuration * Update config options with helpstrings and generate sample * Keystone doc has wrong keystone-manage command * Fix assertEqual arguments order * strengthen assertion for unscoped tokens * Remove sql.Base * Always hash passwords on their way into the DB * bad config user\_enable\_emulation in mask test * Convert Token Memcache backend to new KeyValueStore Impl * Implement mechanism to provide non-expiring keys in KVS * Rationalize the Assignment Grant Tables * Add version routes to KDS * Keystone team uses #openstack-keystone now * Adds model mixin for {to,from}\_dict functionality * Adds Cloud Audit (CADF) Support for keystone authentication * Use class attribute to represent 'project' * Switch over to oslosphinx * Replace notifier with oslo.messaging * Clean StatsController unnecesary members * Use global to represent OS-TRUST:trust * Additional notifications for revocations * add policy entries for /v3/regions * Use Oslo.db migration * \`find\_migrate\_repo\` improvement * Variable 'domain\_ref' referenced before assignment * Cleanup Dogpile KVS Memcache backend support * Fix test\_provider\_token\_expiration\_validation transient failure * Restructure KDS options to be more like Keystone's options * Setup code for auto-config sample generation * Correct \`find\_migrate\_repo\` usage * Make live LDAP user DN match the default from devstack * Set sensible default for keystone's paste * Treat sphinx warnings as errors * Use WebOb directly in ec2\_token middleware * Add lockfile and kombu as requirements for keystone * Move filter\_limit\_query out of sql.Base * List trusts, incorrect self link * LDAP: document enabled\_emulation * Remove s3\_token functional tests * Provide clearer error when deleting enabled domain * Remove copyright from empty files * Syncing policy engine from oslo-incubator * Rename Openstack to OpenStack * Refactor get role for trust * KDS fix documented exception * Cleanup oauth tests * Correctly normalize consumer fields on update * Add tests for oauth consumer normalize fields * Adds a fixture for setting up the cache * Clean up database fixtures * Fixes bug in exception message generation * reverse my preferred mailmap * Notifications upon disable * Move identity logic from controller to manager * Changing testcase name to match our terminology * Allow specifying region ID when creating region * explicitly expect hints in the @truncated signature * list limit doc cleanup * Correct error class in find\_migrate\_repo * Remove unnecessary check to see if trustee exists * Enforce current certificate retrieval behaviour * Use WebOb directly for locale testing * Cleanup KDS doc build errors * Adds rule processing for mapping * Add in functionality to set key\_mangler on dogpile backends * Fix indentation issue * Cleanup invalid token exception text * Limit calls to memcache backend as user token index increases in size * Style the code examples in docs as python * Fixes a misspelling * Doc - Keystone configuration - moving RBAC section * Doc - Detailing objects' attributes available for policy.json * Do not use auth\_info objects for accessing the API * Remove unused method \_get\_domain\_id\_from\_auth * Remove unused method \_get\_domain\_conf * Remove unused method \_store\_protocol * Remove tox locale overrides * Remove unused methods from AuthInfo * Remove unused method \_create\_metadata * Add test for list project users when no user * Fix assignment KVS backend to not use identity * Update kvs assignment backend docs * Don't skip tests for some bugs * Update oslo-incubator fixture to 81c478 * Remove vim header * revise example extension directory structure * Deprecate s3\_token middleware * Update requirements to 661e6 * Implement list limiting support in driver backends * Fix misspellings in keystone * Removes use of fake\_notify and fixes notify test * Remove host from per notification options * Document priority level on Keystone notifications * Remove default\_notification\_level from conf * Mock sys.exit in testing * Remove auth\_token middleware doc * Move v3\_to\_v2\_user from manager to controller * Update db.sqlalchemy.session from oslo-incubator 018138 * Adds tcp\_keepalive and tcp\_keepidle config options * Ensure mapping rule has only local and remote properties * clean up keystone-manage man page * Refactor tests move assertValidErrorResponse * fix grammar error in keystone-manage.rst * Add rules to be a required field for mapping schema * Cleanup docstrings * Do not call deprecated functions * Removes useless string * Removes duplicate key from test fixtures * Fixes a Python3 syntax error using raise * Uses six.text\_type instead of unicode * Uses six.iteritems for Python3 compat * Add tests to ensure additional remote properties are not validated * Removes xrange for Python3 compat * Cleanup sample config * Change 'oauth\_extension' to 'oauth1\_extension' * Modified keystone endpoint-create default region * Load the federation manager * Fix indentation errors found by Pep8 1.4.6+ * Mark strings for translation in ldap backends * Remove unused variable assignment * Sync oslo's policy module * Replace urllib/urlparse with six.moves.\* * Change Continuous Integration Project link * Remove legacy diablo and essex test cruft * Refactor Auth plugin configuration options * Use self.opt\_in\_group overrides * Federation IdentityProvider filter fields on update response * Remove unnecessary test methods * Refactor federation controller class hierarchy * Refactor mutable parameter handling * Avoid use of str() with exceptions * Use message when creating Unauthorized exception * Make error strings translatable * Enhancing tests to check project deletion in Active Directory * Add required properties field to rules schema * Fix assignment to not require user or group existence * deprecate access log middleware * remove access log middleware from the default paste pipeline * deprecate v2.0 API in multiple choice response * cleaned up extension development docs * Add a docstring and rename mapping tests * Remove versionId, versionInfo, versionList from examples * Tests initialize database * Don't set default for a nullable column * Remove autoincrement from String column * Fix docstrings in federation controller * Change assertTrue(isinstance()) by optimal assert * sync oslo-incubator log.py * turn off eventlet.wsgi debug * Make boolean query filter "False" argument work * Fix list\_projects\_for\_endpoint failed bug * Introduce database functionality into KDS * Update the default\_log\_levels defaults * Correct sample config default log levels * deprecate stats middleware * Use passed filter dict param in core sql filtering * Fix federation documentation reference * build auth context from middleware * correct the document links in man documents * Use six.text\_type to replace unicode * Don't mask the filter built-in * Move sql.Base.transaction * Remove sql.Base.get\_session * renamed extensions development doc * Implement filter support in driver backends * append extension name to trust notifications * Allow event callback registration for arbitrary resource types * Fix test\_auth isolation * Policy sample - Identity v3 resources management * Tests use setUp rather than init * Improve forbidden checks * Tests remove useless config list cleanup code * use assertEqual instead of assertIs for string comparison * Don't configure on import * Fix reading cache-time before configured * Cleanup eventlet setup * Remove unused variables from common.config * Reference dogpile.cache.memcached backend properly * Unify StringIO usage with six.StringIO * Fix typos in documents and comments * Sync oslo strutils.py * Use six.string\_types instead of basestring 2014.1.b2 --------- * Use six to make dict work in Python 2 and Python 3 * initialize environment for tests that call popen * Don't duplicate the existing config file list * Implement notifications for trusts * Remove kwargs from trust\_api.create\_trust * Fixup incorrect comment * Simple Certificate Extension * Add mapping function to keystone * Switch from 400 to 403 on ImmutableAttributeError * Identity Providers CRUD operations * Move KDS paths file * Update comments in test\_v3\_protection.py * description is wrong in endpoint filter rst doc * Drop unsused "extras" dependency * LDAP Assignment does not support grant v3 API * Adds run\_tests.sh cli option to stop on failure * Removes option to delete test DB from run\_tests.sh * Removes deprecation warning from run\_tests.sh * v3 credentials, ensure blob response is json * Store ec2 credentials blob as json * remove unused LOG * Store trust\_id for v3/credentials ec2 keypairs * Refactor context trust\_id check to wsgi.Application base class * Implementation of internal notification callbacks within Keystone * Replacing python-oauth2 by oauthlib * Fix using non-default default\_domain\_id * Enhance auth tests for non-default default\_domain\_id * KVS support domain as namespace for users * Remove unused member from KVS assignment * Enhance tests for non-default default\_domain\_id * rename templated.TemplatedCatalog to templated.Catalog * Sync with global requirements * Implements regions resource in 3.2 Catalog API * Reduces memory utilization during test runs * reduce default token duration to one hour * Document running with pdb * Restructure developing.rst * Enable lazy translation * Sync gettextutils from oslo-incubator 997ab277 * derive custom exceptions directly from Exception * Do not append to messages with + * Convert Token KVS backend to new KeyValueStore Impl * Fix sample config external default doc * Documentation cleanup * Make common log import consistent * Remove unused variables * Safe command handling for openssl * Fix external auth (REMOTE\_USER) plugin support * Cleanup test\_no\_admin\_token\_auth cleanup code * Subclasses of TestCase don't need to reset conf * Cleanup test\_associate\_project\_endpoint\_extension * Tests use cleanUp rather than tearDown * Remove netifaces requirement * Clean up fakeldap logging * Resolve oauth dependency after paste pipeline is loaded * Change ListOpt default value from str or None to list * Sync oslo-incubator rpc module * Cleanup from business logic refactor * Introduce basic Pecan/WSME framework for KDS * Don't need session.flush in context managed by session * races cause 404 when removing user from project * initialize eventlet for tests * Flush tokens in batches with DB2 * Remove unnecessary line in test\_auth * Clean up docstrings in contrib.oauth1.core * Remove unused test function * Remove 'disable user' logic from \_delete\_domain\_contents * Break dependency of base V3Controller on V2Controller * Move deletion business logic out of controllers * Do not update password when updating grants in Assignment KVS * Cleanup of new credential\_api delete methods * Enhance list\_group\_users in GroupApi * Remove noop code * Remove unused imports * Fix typo in test * Fix IPv6 check * Remove unused code in contrib/ec2/controllers.py * Fix use the fact that empty sequences are false * Imported Translations from Transifex * Synchronized with oslo db and db.sqlalchemy * Fix variable passed to driver module * Updated Keystone development install instructions for Ubuntu * Stops file descriptor leaking in tests * Re-write comment for ADMIN\_TOKEN * Reduced parameters not used in \_populate\_user() * Sync several modules from oslo-incubator * Use oslo.db sessions * Switch to oslo-incubator mask\_password * Replace xrange in for loop with range * Move Assignment Controllers and Routers to be First Class * Remove Identity and Assignment controller interdependancies * Policy based domain isolation can't be defined * Moves keystoneclient master tests in a new class * Makes the test git checkout info more declaritive * trustee unable to perform role based operations on trust * Cleanup backend loading * Uses oslo's deprecated decorator; removes ours * Move endpoint\_filter extension documentation * Refactor setup\_logging * Fixes documentation building * Create user returns 400 without a password * Fixes the v2 GET /extensions curl example in the documentation * Add assertSetEqual to base test class * Base Implementation of KVS Dogpile Refactor * Sync db.sqlalchemy from oslo-incubator * Fix errors for create\_endpoint api in version2 * Fix issues handling trust tokens via ec2tokens API * Fix typo in identity:list\_role\_assignments policy * Debug env for tox * Updated from global requirements * Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2 * Add ABCMeta metaclass to token provider * token provider cleanup * Sync versionutils from oslo * Cleanup duplication in test\_backend * replace "global" roles var names with "all" roles * Remove unused token.valid index * Narrow columns used in list\_revoked\_tokens sql * Remove roles from OS-TRUST list responses * Remove deprecated code * Sync rpc fix from oslo-incubator * Don't run non-tests * Formalize deprecation of token\_api.list\_tokens * Add index to cover revoked token list 2014.1.b1 --------- * Refactor assertEqualXML into a testtools matcher * Adds support for username to match the v2 spec * One transaction per call to sql assignment backend * Allow caching to be disabled and tests still pass * Sync From OSLO * Updated from global requirements * Revert "Return a descriptive error message for controllers" * Adds a resource for changing a user's password * Deprecates V2 controllers * Updates .gitignore * Ensure the sample policy file won't diverge * Add pycrypto as a test-requirement * Imported Translations from Transifex * Fix typo in keystone * Added documentation to keystone.common.dependency * Make HACKING.rst DRYer * Allow downgrade for extensions * Try decoding string to UTF-8 on error message fail * Import strutils from oslo * Capture debug logging in tests * Easy testing with alternate keystoneclient * Sync log\_handler module from Oslo * refactor test\_catalog * PasteConfigNotFound also raised when keystone.conf not found * Style improvements to logging format strings * Sync the DB2 communication error code change from olso * Skip test\_arbitrary\_attributes\_\* in \_ldap\_livetest * Add documentation for Read Only LDAP configuration option * Remove deprecated auth\_token middleware * Role NoneType object has no attribute setdefault * Utilites for manipulating base64 & PEM * Add memcache options to sample config * UUID vs PKI docs * RST fix for os\_inherit example * Rewrites the serveapp method into a fixture * Allow use of rules Policy driver * Return a descriptive error message for controllers * Proxy Assignment from Identity Deprecated * Remove obsolete redhat-eventlet.patch * AuthInfo use dependency injection * Issue unscoped token if user's default project is invalid * Detangle v3 RestfulTestCase setup * Do not name variables as builtins * Updated from global requirements * Removes unused paste appserver instances from tests * Add WSGI environment to context * trusts raise validation error if expires\_at is invalid * Fix newly discovered H302 * test attribute update edge cases * Return an error when a non-existing tenant is added to a user * use different bind addresses for admin and public * Sync log module from oslo * Change deprecated CLI arguments * UserAuthInfo use dependency injection * fix unparseable JSON * Duplicate delete the user\_project\_metadata * Skip test\_create\_update\_delete\_unicode\_project in \_ldap\_livetest * don't rebind stdlib's os.chdir function * Dependency cleanup * Moves common RestfulTestCase to it's own module * proxy removed from identity and changed to assignment * Uses fixtures for mox and stubs * Adds fixture package from oslo * Fix KVS create\_grant to not raise NotFound if no user/group * Enhance tests for assignment create\_grant when no user or group * Clean up duplicate exceptions in docs for assignment.Driver * Remove obsolete driver test module * Change sample policy files to use policy language * Documentation on how-to develop Keystone Extensions * Allow delete user or group at same time as role * Enhance tests for delete\_grant no user/group * Fix issue deleting ec2-credentials as non-admin user * Remove duplicated code on test\_v3\_auth * Removes NoModule from the base testcase * Fixes tox coverage command * Update mailmap for Joe Gordon * Add WWW-Authenticate header in 401 responses * Use abstract base class for endpoint\_filter driver * Use abstract base class for oauth driver * Use abstract base class for policy driver * Use abstract base class for token driver * Document tox instead of run\_tests.sh * Update my mailmap * remove 8888 port in sample\_data.sh * Adds decorator to deprecate functions and methods * Move fakeldap to tests * Fix remove role assignment adds role using LDAP assignment * Enhance tests for deleting a role not assigned * Implementation of opt-out from catalog data during token validation * Add external.Base class to external plugins * Add notifications for groups and roles * add IRC channel & wiki link to README * Add python-six to requirements * Fix v2 token user ref with trust impersonation=True * Changes to testr as the test runner * Fixes error messaging * Handle unicode at the caching layer more elegantly * set user\_update policy to admin\_required * Remove unused DEFAULT\_DOMAIN variable * Remove unused config option auth\_admin\_prefix * Remove unused member * Adds tests for user extra attribute behavior * Adds identity v2 tests to show extra behavior * Treats OS-KSADM:password as password in v2 APIs * Adds more uniformity to identity update\_user calls * Don't use default value in LimitingReader * Use abstract base class for auth handler * Use abstract base class for catalog driver * Use abstract base class for credential driver * Use abstract base class for assignment driver * Use abstract base class for trust driver * Use abstract base class for identity driver * remove the nova dependency in the ec2\_token middleware * Catch the socket exception and log it * Fixes broken doc references * Sync db.sqlalchemy * Handle DB2 disconnect * Fix mysql checkout handler AttributeError * Disable lazy gettext 2013.2.rc1 ---------- * Open Icehouse development * Imported Translations from Transifex * Sync with global requirements * Add tests dir to the coverage omit list * Update tox config * Close the cursor for SQLite for 034 upgrade/downgrade on select * Imports oslo policy to fix test issues * Fixes errors logging in as a user with no password * Fix live LDAP tests * Eliminate type error on search\_s * Fix error when create user with LDAP backend * assertEquals is deprecated, use assertEqual (H602) * Validate token calls return 404 on invalid tokens * Protect oauth controller calls and update policy.json * Fix updating attributes with ldap backend * sync oslo policy * Changes v1.1 to v2 for Compute endpoint in sample\_data.sh * Update man pages * Update man page version * Sync gettextutils from oslo * only run flake8 once (bug 1223023) * upgrade to oslo.config 1.2 final * Add user to project if project ID is changed * Ensure any relevant tokens are revoked when a role is deleted * Check token\_format for default token providers only * Modify oauth1 tests to use generated keystone token in a call * Test for backend case sensitivity * Remove ldap identity domain attribute options * Cleanup of tenantId, tenant\_id, and default\_project\_id * Add extra test coverage for unscoped token invalidation * Monkey patch select in environment * Rewrite README.rst * Enclose command args in with\_venv.sh * check for domain existence before doing any ID work * Ensure v2 tokens are correctly invalidated when using BelongsTo * Sync gettextutils from oslo * Use localisation for logged warnings * Fix misused assertTrue in unit tests * oauth using optional dependencies * Rationalize list\_user\_projects and get\_projects\_for\_user * Optional dependency injection * Include new notification options in sample config * fix rst syntax in database schema migrations docs * Ignore H803 from Hacking * Test upgrade migration 16->17 * test token revocation list API (bug 1202952) * Imported Translations from Transifex * gate on H304: no relative imports * Move gettextutils installation in tests to core * Cleanup tests imports so not relative * Tests use "from keystone import tests" * Reduce churn of cache on revocation\_list * domain-specific drivers experimental in havana * Fixes for user response with LDAP user\_enabled\_mask * Close each LDAP connection after it is used, following python-ldap docs * Remove CA key password from cert setup * Import core.\* in keystone.tests * Fix incorrect test for list\_users * Changed header from LLC to Foundation based on trademark policies * Changes template header for translation catalogs * Support timezone in memcached token backend 2013.2.b3 --------- * Imported Translations from Transifex * Move CA key from certs directory to private directory * OAuth authorizing user should propose roles to delegate * Need to use \_() to handle i18n string messages * Fix the code miss to show the correct error messages * Move \_generate\_paste\_config to tests.core * add 'project' notifications to docs * Implement basic caching around assignment CRUD * Update keystone wsgi httpd script for oslo logging * Utilities to create directores, set ownership & permissions * Modify default file/directory permissions * Add a oauth1-configuration.rst and extension section to docs * Update keystone-all man page * Cleanup cache layer tests * Implement caching for Tokens and Token Validation * Document usage notifications * Imported Translations from Transifex * Remove kvs backend from oauth1 extension * Use joins instead of multiple lookups in groups sql * Add project CRUD to assignment\_api Manager * Add Memory Isolating Cache Proxy * Enable SQL tests for oauth * Implement decorator-based notifications for users * Use common db model class from Oslo * Add common code from Oslo for work with database * Use testtools as base test class * Bump hacking to 0.7 * Removes KVS references from the documentation * Add notifications module * Drop support for diablo to essex migrations * Add 'cn' to attribute\_list for enabled\_users/tenants query * Implement API protection on target entities * Refactor Token Provider to be aware of expired tokens * Implement Caching for Token Revocation List * Keystone Caching Layer for Manager Calls * Create associations between projects and endpoints * Fixes a link in the documentation * Use correct filename for index & serial file when setting permissions * remove flake8 option from run\_tests.sh * Fix role lookup for Active Directory * Clean up keystone-manage man page * change oauth.consumer description into nullable * Use system locale when Accept-Language header is not provided * Fix translate static messages in response * Migrating ec2 credentials to credential * Fix error where consumer is not deleted from sql * add foreign key constraint on oauth tables * Remove a useless arg in range() * Remove enumerate calls * filter in ldap list\_groups\_for\_user * Delete file TODO * use provider to validate tokens * Fix isEnabledFor for compatibility with logging * Ensure username passed by REMOTE\_USER can contain '@' * fix the default values for token and password auth * Remove an enumerate call * Add defense in ldap:get\_roles\_for\_user\_and\_project * remove unused function * Remove Keystone specific logging module * remove refs to keystone.common.logging * Remove User Check from Assignments * Refactor Token Providers for better version interfaces * Remove kwargs from manager calls / general cleanup * Store hash of access as primary key for ec2 type * Add delegated\_auth support for keystone * Fix LDAP Identity get user with user\_enabled\_mask * Fix LDAP Identity with non-zero user\_enabled\_default * More validation in test\_user\_enable\_attribute\_mask * Add test test\_deleting\_project\_delete\_grants * Cleaned up a few old crufties from README * Clean hacking errors in advance of hacking update * Add unit test to check non-string password support * Assignment to reserved built-in symbol: filter * Implement domain specific Identity backends * Increase length of username in DB * Cleaned up pluggable auth docs * Fix test\_user\_enable\_attribute\_mask so it actually tests * Do not skip test\_user\_enable\_attribute\_mask in \_ldap\_livetest * Skip test\_create\_unicode\_user\_name in \_ldap\_livetest * Refactor Keystone to use unified logging from Oslo * Revoke user tokens when disabling/delete a project * Move affirm\_unique() in create() to BaseLdap * Move some logic from update() to BaseLdap * Add support for API message localization * Remove unused import * Assignment to reserved built-in symbol: dir * Move 'tests' directory into 'keystone' package * Initial implementation of unified-logging * Sync notifier module from Oslo * Move Babel dependency from test-req to req * Ignore flake issues in build/ directory * update usage in run\_test.sh for flake8 * Drop extra credential indexes * Sync models with migrations * Add memcache to httpd doc * Sync unified logging solution from Oslo * Configurable max password length (bug 1175906) * Fix select n+1 issue in keystone catalog * Make pki\_setup work with OpenSSL 0.9.x * extension migrations * Create default role on demand * Set wsgi startup log level to INFO * Abstract out attribute\_ignore assigning in LDAP driver * Abstract out attribute\_mapping filling in LDAP driver * Imported Translations from Transifex * remove swift dependency of s3 middleware * Raise max header size to accommodate large tokens * Clean up use of token\_provider manager in tests * add OS-TRUST to links * Run test\_mask\_password once * Remove kwargs from manager calls where not needed * V3 API need to check mandatory field when creating resources * Use dependency injection for assignment and identity * Handle circular dependencies * Clear out the dependency registry between tests * .gitignore eggs * Handle json data when migrating role metadata * Sync DB models and migrations in keystone.assignment.backends.sql * Remove passwords from LDAP queries * use 'exc\_info=True' instead of import traceback * Fix typo: Tenents -> Tenants * Use keystone.wsgi.Request for RequestClass * Update references with new Mailing List location * Scipped tests don't render as ERROR's * Implement exception module i18n support * Remove vestiges of Assignments from LDAP Identity Backend * Load backends before deploy app in client tests * default token format/provider handling * Fixing broken credential schema in sqlite * Use assignment\_api rather than assignment * Deprecate kvs token backend * Ec2 credentials table not created during testing * Correct Spelling Mistake * Remove an enumerate call * Load app before loading legacy client in tests * Add [assignment].driver to sample config * Deprecation warning for [signing] token\_format * Support token\_format for backward compatibility * sql.Driver:authenticate() signatures should match * update requires to prevent version cap * Return correct link for effective group roles in GET /role\_assignments * Implement Token Binding * Implemented token creation without catalog response * Fix XML rendering with empty auth payload * Pluggable Remote User * grammar fixes in error messages * Implement role assignment inheritance (OS-INHERIT extension) * Implements Pluggable V2 Token Provider * Register Extensions * Implements Pluggable V3 Token Provider * Mixed LDAP/SQL Backend * Clear cached engine when global engine changes * python3: Introduce py33 to tox.ini * Add version so that pre-release versioning works * Sync-up crypto from oslo-incubator * Add crypto dependency * Imported Translations from Transifex * Change domain component value to org from com * Move temporary test files into tests/tmp * Use InnoDB for MySQL * Rationalize how we get roles after authentication in the controllers * Python 3.x compatible use of print * Regenerate example PKI after change of defaults * assignment backend * wsgi.BaseApplication and wsgi.Router factories should use \*\*kwargs * Add unittest for keystone.identity.backends.sql Models * Imported Translations from Transifex * Do not create LDAP Domains sub tree * Use oslo.sphinx and remove local copy of doc theme * Move comments in front of dependencies * Remove context from get\_token call in normalize\_domain\_id * Fix issue with v3 tokens and group membership roles * Sync install\_venv\_common from oslo * Remove a useless arg in range() * Remove an enumerate call * Update paths to pem files in keystone.conf.sample * Don't use deprecated BaseException.message * Add callbacks for set\_global\_engine * Work without admin\_token\_auth middleware * Implement GET /role\_assignment API call * rename quantum to neutron in docs * Install locales for httpd * DB2 migration support * Use event.listen() instead of deprecated listeners kwarg * Add 'application' to keystone.py for WSGI * Remove hard tabs and trailing whitespace * Manager instead of direct driver * check for constraint before dropping * Stop passing context to managers (bug 1194938) * \`tox -ecover\` failure. Missing entry in tox.ini * Clean up keystone-all.rst * Fix up some trivial license mismatches * Revert environment module usage in middleware * LDAP list group users not fail if user entry deleted * Do not raise NEW exceptions * Move identity ldap backend from directory to file * wsgi.Middleware factory should use \*\*kwargs * Removing LDAP API Shim * Consolidate admin\_or\_owner rule * Isolate eventlet code into environment * Set default 'ou' name for LDAP projects to Projects * Imported Translations from Transifex * Imported Translations from Transifex * Move user fileds type check to identity.Manager * Http 400 when project enabled is not a boolean * Imported Translations from Transifex * Correct the resolving api logic in stat middleware * Remove a stat warning log * Using sql as default driver for tokens * Correct LDAP configuration doc * Force simple Bind for authentication * Initialize logging from HTTPD * LDAP get\_project\_users should not return password * Add checks to test if enabled is bool * Fix link typo in Sphinx doc * python WebOb dependency made unpinned * Remove explicit distribute depend * Version response compatible with Folsom * Adds tests for XML version response * Replace openstack-common with oslo in docs * drop user and group constraints * Correct the default name attribute for role * Allow request headers access in app context * Remove how to contribute section in favor of CONTRIBUTING.rst * Fix token purging for memcache for user token index * add ca\_key to sample configuration * Commit transaction in migration * Fix internal doc links (bug 1176211) * Missing contraction: Its -> It's (bug 1176213) * Pass on arguments on Base.get\_session * Remove bufferedhttp * Move coverage output dir for Jenkins * Check schema when dropping constraints * Import eventlet patch from oslo * Raise key length defaults * Base.get\_engine honor allow\_global\_engine=False * run\_tests.sh should use flake8 (bug 1180609) * Ignore the .update-venv directory * Ignore conflict on v2 auto role assignment (bug 1161963) * remove\_role\_from\_user\_and\_project affecting all users (bug 1170649) * Maintain tokens after role assignments (bug 1170186) * split authenticate call * Add db\_version command to keystone-manage * Live SQL migration tests * Fix incorrect role assignment in migration * typo in 'import pydev' statement * Fixes a typo * Imported Translations from Transifex * Improve the performance of tokens deletion for user * Revert "Set EVENTLET\_NO\_GREENDNS=yes in tox.ini." * Disable eventlet monkey-patching of DNS * Fix the debug statement * Document size limits * Add index on valid column of the SQL token Backend * Add KEYSTONE\_LOCALEDIR env variable * Add arg to keystone-manage db\_sync 2013.2.b1 --------- * Add index on expires column of the SQL token Backend * fix error default policy for create\_project * Require keystone-user/-group for pki\_setup * Replace assertDictContainsSubset with stdlib ver * separate paste-deploy configuration from parameters * Add missing oslo module * Convert openstack-common.conf to the nicer multiline format * Rename requires files to standard names * Cleanup docstrings (flake8 H401, H402, H403, H404) * imports not in alphabetical order (flake8 H306) * import only modules (flake8 H302) * one import per line (flake8 H301) * eliminate 'except:' (flake8 H201) * consistent i18n placeholders (flake8 H701, H702, H703) * use the 'not in' operator (flake8 H902) * Use TODO(NAME) (flake8 H101) * Remove unnecessary commented out code * Enumerate ignored flake8 H\* rules * Migrate to pbr * Remove unused variables (flake8 F841) * Satisfy flake8 import rules F401 and F403 * Test 403 error title * Imported Translations from Transifex * Remove useless private method * Consolidate eventlet code * Use webtest for v2 and v3 API testing * Add missing space to error msg * Imported Translations from Transifex * Read-only default domain for LDAP (bug 1168726) * Add assertNotEmpty to tests and use it * Implement Token Flush via keystone-manage * get SQL refs from session (bp sql-query-get) * extracting credentials * Move auth\_token middleware from admin user to an RBAC policy * Accept env variables to override default passwords * Http 400 when user enabled is not a boolean * Migrate to flake8 * Fix pyflakes and pep8 in prep for flake8 * Allow backend & client SQL tests on mysql and pg * Revert "Disable eventlet monkey-patching of DNS" * Set EVENTLET\_NO\_GREENDNS=yes in tox.ini * Disable eventlet monkey-patching of DNS * Revoke tokens on user delete (bug 1166670) * A minor refactor in wsgi.py * Skip IPv6 tests for eventlet dns * LDAP list groups with missing member entry * Fix 403 status response * Remove unused CONF.pam.url * Mark LDAP password and admin\_token secret * HACKING LDAP * Make migration tests postgres & mysql friendly * Documentation about the initial configuration file and sample data * Add rule for list\_groups\_for\_user in policy.json * Test listing of tokens with a null tenant * fix duplicate option error * Delete extra dict in token controller * What is this for? * Removed unused imports * Remove non-production middleware from sample pipelines * Replace password to "\*\*\*" in the debug message * Fixed logging usage instead of LOG * Remove new constraint from migration downgrade * Allow additional attribute mappings in ldap * Enable unicode error message * Sync with oslo-incubator copy of setup.py * Set empty element to "" * Fixed unicode username user creation error * Fix token ids for memcached * Use is\_enabled() in folsom->grizzly upgrade (bug 1167421) * Generate HTTPS certificates with ssl\_setup * Fix for configuring non-default auth plugins properly * test duplicate name * Add TLS Support for LDAP * fix undefined variable * clean up invalid variable reference * Clean up duplicate methods * stop using time.sleep in tests * don't migrate as often * use the openstack test runner * Fix 401 status response * Fix example in documentation * Fix IBM copyright strings * Share one engine for more than just sqlite in-memory * Add missing colon for documentation build steps * Mark sql connection with secret flag 2013.1.rc2 ---------- * Fix test coverage for v2 scoped auth xml response (bug 1160504) * Fix test coverage for v2 scoped auth xml response (bug 1160504) * close db migration session * Use string for port in default endpoints (bug 1160573) * keystone commands don't print any version information * bug 1159888 broken links in rst doc * use the roles in the token when recreating * Sync with oslo-incubator * Rename trust extension (bug 1158980) * Rename trust extension * keystone commands don't print any version information * Imported Translations from Transifex 2013.1.rc1 ---------- * Add a dereference option for ldap * Make versions aware of enabled pipelines * Move trusts to extension * Move trusts to extension * Version bump to 2013.2 * Add a dereference option for ldap * Allow trusts to be optional * Enable emulation for domains * Wrap config module and require manual setup (bug 1143998) * Correct spacing in warning msg * Prohibit V3 V2 token intermix for resource in non-default domain (bug 1157430) * Properly handle emulated ldap enablement * Support for LDAP groups (bug #1092187) * Validate domains unconditionally (bug 1130236) * Fix live ldap tests * V2, V3 token intermix for unscoped tokens (bug 1156913) * Pass project membership as dict in migration 015 * Ensure delete domain removes all owned entities * Utilize legacy\_endpoint\_id column (bug 1154918) * Test default\_project\_id scoping (bug 1023502) * Fix XML handling of member links (bug 1156594) * Discard null endpoints (bug 1152632) * extracting user and trust ids into normalized fields * No parent exception to wrap * Remove duplicate password/token opts * xml\_body returns backtrace on XMLSyntaxError * duplicated trust tests * Migrate roles from metadata to user\_project\_metadata * Fixes bug 1151747: broken XML translation for resource collections * Revise docs to use keystoneclient.middleware.auth\_token * quiet route logging on skipped tests * Ensure tokens are revoked for relevant v3 api calls * Remove un-needed LimitingReader read() function * Catch and log server exceptions * Added test cases to improve LDAP project testing * Switch to final 1.1.0 oslo.config release * Filter out legacy\_endpoint\_id (bug 1152635) * Improve tests for api protection and filtering * add belongs\_to check * Revert "update tests/\_\_init\_\_.py to verify openssl version" * Revert "from tests import" * Make Keystone return v3 as part of the version api * Run keystone server in debug mode * remove spurious roles check * bug 1133526 * Fix folsom -> grizzly role table migration issues (bug 1119789) * Delete tokens for user * from tests import * v3 endpoints won't have legacy ID's (bug 1150930) * return 201 Created on POST request (bug1131119) * add missing attributes for group/project tables (bug1126021) * Remove unused methods from LDAP backed * Move get\_by\_name to LdapBase * fix typo in kvs backend * mark 2.0 API as stable * unable to load certificate should abort request * Move auth plugins to 'keystone.auth.plugins' (bug 1136967) * Change exception raised to Forbidden on trust\_id * cleanup trusts in controllers * remove unused import * ports should be ints in config (bug 1137696) * Expand v3 trust test coverage * Trusts * bug 1134802: fix inconsistent format for expires\_at and issued\_at * Sync timeutils with oslo * Straighten out NotFound raising in LDAP backend * residual grants after delete action (bug1125637) * Remove TODO that didn't land in grizzly * Make getting user-domain roles backend independant * Explain LDAP page\_size & default value * Imported Translations from Transifex * Enable a parameters on ldap to allow paged\_search of ldap queries This fixes bug 1083463 * update tests/\_\_init\_\_.py to verify openssl version * command line switch for short pep8 output * Convert api to controller * bug 1131840: fix auth and token data for XML translation * flatten payload for policy * Unpin pam dependency version * keystone : Use Ec2Signer utility class from keystoneclient * Move handle\_conflicts decorator into sql * domain\_id\_attributes in config.py have wrong default value * Rework S3Token middleware tests * Remove obsolete \*page[\_marker] methods from LDAP backend * Setup logging in keystone-manage command * Ensure keystone unittests do not leave CONF.policyfile in bad state * catch errors in wsgi.Middleware * Fix id\_to\_dn for creating objects * Tests for domain-scoped tokens * domain-scoping * Pass query filter attributes to policy engine * Removed redundant assertion * v3 token API * Update oslo-config version * Correct SQL migration 017 column name * merging in fix from oslo upstream * enabled attribute emulation support * Change the default LDAP mapping for description * Ensure user and tenant enabled in EC2 * Disable XML entity parsing * Remove old, outdated keystone devref docs * Update the Keystone policy engine to the latest openstack common * Implement name space for domains * Update sample\_data.sh to match docs * project membership to role conversion * Remove test\_auth\_token\_middleware * Workaround Migration issue with PostgreSQL * make LDAP query scope configurable * make fakeldap.\_match\_query work for an arbitrary number of groups * Use oslo-config-2013.1b3 * Remove usage of UserRoleAssociation.id in LDAP * Add an update option to run\_tests.sh * Add pysqlite as explicit test dep * fix unit test when memcache middleware is not configured * add missing kvs functionality (bug1119770) * Update to oslo version code * adding additional backend tests (bug1101244) * Fix spelling mistakes * Cleaned up keystone-all --help output * Keystone backend preparation for domain-scoping * Use install\_venv\_common.py from oslo * Spell accommodate correctly * Missed import for IPv6 tests skip * Add missing log\_format, log\_file, log\_dir opts * Fix normalize identity sql ugrade for Mysql and postgresql * remove duplicate model declaration/attribution * simplify query building logic * Fix test\_contrib\_s3\_core unit test * Expand dependency injection test coverage * remove unneeded config reloading (it's already done during setUp) * allow unauthenticated connections to an LDAP server * Relational API links * return 400 Bad Request if invalid params supplied (bug1061738) * UserApi.update not to require all fields in arg * Tenant update on LDAP breaks if there is no update to apply * Query only attributes strictly required for keystone when using it with existing LDAP servers * Update .coveragerc * Add size validations to token controller * add check for config-dir parameter (bug1101129) * Silence routes internal debug logging * Imported Translations from Transifex * Delete Roles for User and Project LDAP * Why .pop()'ing urls first is important * don't create a new, copied list in get\_project\_users * Fixes 'not in' operator usage * Add --keystone-user/group to keystone-manage pki\_setup * Adds png versions of all svg image files. Changes reference * Updates migration 008 to work on PostgreSQL * Create a default domain (bp default-domain) * Generate apache-style common access logs * import tools/flakes from oslo * tenant to project in the apis * Tenant to Project in Back ends * Fix bugs with set ldap password * Enable/disable domains (bug 1100145) * Readme: use 'doc' directory not 'docs' * rename tenant to project in sql * Update to requests>=1.0.0 for keystoneclient * Fix pep8 error * Document user group LDAP options * Sync latest cfg from oslo-incubator * Limit the size of HTTP requests * Fix role delete method in LDAP backend * public\_endpoint & admin\_endpoint configuration * Skip IPv6 tests if IPv6 is not supported * Allow running of sql against the live DB * Test that you can undo & re-apply all migrations * downgrade user and tenant normalized tables downgraded such that sqlite is supported, too * Auto-detect max SQL migration * Safer data migrations * Sync base identity Driver defs with SQL driver * Fix i18n of string templates * Enhance wsgi to listen on ipv6 address * add database string field length check * Autoload schema before creating FK's (bug 1098174) * Enable exception format checking in the tests * reorder tables for delete * Validated URLs in v2 endpoint creation API * Fixes import order nits * Cleanup keystoneclient testing requirements * Fix issue in test\_forbidden\_action\_exposure * Correct spelling errors / typos in test names * Update ldap exceptions to pass correct kwargs * Add \_FATAL\_EXCEPTION\_FORMAT\_ERRORS global * Keystone server support for user groups * Add missing .po files to tarball * Imported Translations from Transifex * adds keyring to test-requires * Revert "shorten pep8 output" * Upgrade WebOb to 1.2.3 * il8n some strings * Imported Translations from Transifex * Removed unused variables * Removed unused imports * Add pyflakes to tox.ini * Fix spelling typo * shorten pep8 output * Driver registry * Adding a means to connect back to a pydevd debugger * add in pip requires for requests * Split endpoint records in SQL by interface * Fix typo s/interalurl/internalurl/ * module refactoring * Test for content-type appropriate 404 (bug 1089987) * Imported Translations from Transifex * fixing bug 1046862 * Expand default time delta (bug 1089988) * Add tests for contrib.s3.core * Test drivers return HTTP 501 Not Implemented * Support non-default role\_id\_attribute * Remove swift auth * Move token controller into keystone.token * Import pysqlite2 if sqlite3 is not available * Remove mentions of essex in docs (bug 1085247) * Ensure serviceCatalog is list when empty, not dict * Adding downgrade steps for migration scripts * Port to argparse based cfg * Only 'import \*' from 'core' modules * use keystone test and change config during setUp * Bug 1075090 -- Fixing log messages in python source code to support internationalization * Added documentation for the external auth support * check the redirected path on the request, not the response * Validate password type (bug 1081861) * split identities module into logical parts remove unneeded imports from core * Ensure token expiration is maintained (bug 1079216) * normalize identity * Fixes typo in keystone setup doc * Imported Translations from Transifex * Stop using cfg's internal implementation details * syncing run\_tests to match tox * Expose auth failure details in debug mode * Utilize policy.json by default (bug 1043758) * Wrap v3 API with RBAC (bug 1023943) * v3 Identity * v3 Catalog * v3 Policies * Import auth\_token middleware from keystoneclient * Imported Translations from Transifex * Refix transient test failures * Make the controller addresses configurable * Expose authn/z failure info to API in debug mode * Refactor TokenController.authenticate() method * Fix error un fixtures * Ensures User is member of tenant in ec2 validation * Properly list tokens with a null tenant * Reduce total number of fixtures * Provide config file fields for enable users in LDAP backend (bug1067516) * populate table check * Run test\_keystoneclient\_sql in-memory * Make tox.ini run pep8 checks on bin * tweaking docs to fix link to wiki Keystone page * Various pep8 fixes for keystone * Use the right subprocess based on os monkeypatch * Fix transient test failures (bug 1077065, bug 1045962) * Rewrite initial migration * Fix default port for identity.internalURL * Improve feedback on test failure * fixes bug 1074172 * SQL upgrade test * Include 'extra' attributes twice (bug 1076120) * Return non-indexed attrs, not 'extra' (bug 1075376) * bug 1069945: generate certs for the tests in one place * monkeypatch cms Popen * HACKING compliance: consistent use of 'except' * auth\_token hash pki key PKI tokens on hash in memcached when accessed by auth\_token middelware * key all backends off of hash of pki token * don't import filter\_user name, use it from the identity module * don't modify the passed in dict to from\_dict * move hashing user password functions to common/utils * ignore .tox directory for pep8 in runtests * Imported Translations from Transifex * Implements REMOTE\_USER authentication support * pin sqlalchemy to 0.7 * Move 'opentack.context' and 'openstack.params' definitions to keystone.common.wsgi * Removes duplicate flag for token\_format * Raise exception if openssl stderr indicates one * Ignore keystone.openstack for PEP8 * Fixed typo in log message * Fixes 500 err on authentication for invalid body * Enable Deletion of Services with Endpoints * Exception.message deprecated in py26 (bug 1070890) * Utilize logging instead of print() * stop LdapIdentity.create\_user from returning the user's password * Compare token expiry without seconds * Moved SQL backend tests into memory * Add trove classifiers for PyPI * Adding handling for get user/tenant by name * Fixed bug 1068851. Refreshed new crypto for the SSL tests * move filter\_user function to keystone.identity.core * Fixes response for missing credentials in auth * making PKI default token type * Fixes Bug 1063852 * bug 1068674 * Update common * Extract hardcoded configuration in ldap backend (bug 1052111) * Fix Not Found error, when router not match * add --config-dir=DIR for keystone-all option * Add --config-dir=DIR in OPTIONS * Delete role does not delete role assignments in tenants (bug 1057436) * replacing PKI token detection from content length to content prefix. (bug 1060389) * Document PKI configuration and management * Raise if we see incorrect keyword args "condition" or "methods" * Filter users in LDAP backend (bug 1052925) * Use setup.py develop to insert code into venv * Raise 400 if credentials not provided (bug 1044032) * Fix catalog when services have no URL * Unparseable endpoint URL's should raise friendly error * Configurable actions on LDAP backend in users Active Directory (bug 1052929) * Unable to delete tenant if contains roles in LDAP backend (bug 1057407) * Replaced underscores with dashes * fixes bug 1058429 * Command line switch for standard threads * Remove run\_test.py in favor of stock nose * utf-8 encode user keys in memcache (bug 1056373) * Convert database schemas to use utf8 character set * Return a meaningful Error when token\_id is missing * Backslash continuation cleanup * notify calling process we are ready to serve * add Swift endpoint in sample data * Updated Fix for duplicated entries on LDAP backend for get\_tenant\_users * Fix wsgi config file access for HTTPD * Bump version to 2013.1 * Limit token revocation to tenant (bug 1050025) * Fixed trivally true tests (bug 983304) * add Quantum endpoint in sample data * Add XML namespace support for OSADM service api * Delete user tokens after role grant/revoke * LDAP backend attribute fixes * Document memcached host system time configuration * Implementation of tenant,user,role list functions for ldap * Initialize Metadata variable * Cleanup PEP8 errors from Common * List tokens for memcached backend * Implement token endpoint list (bug 1006777) * Ignore eclipse files * Identity API v3 Config, Routers, Controllers * Sync some misc changes from openstack-common * Sync latest cfg from openstack-common * Remove id\_hash column * LOG.warn all exception.Unauthorized authentication failures * Fixed: test\_default\_tenant\_uuid\_token not running * Upgrade PEP8 to 1.3.3 (bug 1037303) * Expand PEP8 coverage to include docs & tests * Removed/fixed unused variable references * HACKING compliance & staticly init module vars * PEP8 fix E251 * PEP8 fix * Removed unused imports * Check for expected cfg impl (bug 1043479) * Fixed typos in comment * HACKING: Import by full module path * HACKING: Use single quotes * mistake in doc string * pep8 1.3.3 cleanup removing unused imports * Removed dead code * Fix auth\_token middleware to fetch revocation list as admin * Require authz to update user's tenant (bug 1040626) * Code cleanup in doc/source/conf.py * Typo fix in keystone: existant => existent * allow middleware configuration from app config * PEP8 fix for PAM test * change verbose and debug to Fasle in keystone.conf.sample * add token\_format=UUID to keystone.conf.sample * Demonstrate that authenticate() returns roles * Add nosehtmloutput as a test dependency * Less information returned with IntegrityError * Support running the tests in the debugger * Removed stray print statement (bug 1038131) * Remove unused variables * PKI Token revocation * Remove unused imports * Adding missing files to MANIFEST.in * Simplify the sql backend deletion of users and tenants * Add tests for PAM authentication * Allow overloading of username and tenant name in the config files * Enabling SQL Catalog tests (bug 958950) * Use user home dir as default for cache * Set example key\_size to 1024 * Log errors when signing/verifying * Implement python version of migration 002 * Set default signing\_dir based on os USER * Assert adminness on token validation (bug 1030968) * Test for Cert by name * Typo error in keystone/doc/source/configuration.rst * fix broken link * Cryptographically Signed tokens * Sync jsonutils from openstack-common * Added user name validation. Fixes bug 966251 * Import ec2 credentials from old keystone db * Debug output may include passwords (bug 1004114) * Raise unauthorized if tenant disabled (bug 988920) * Files for Apache-HTTPD * Implementation of LDAP functions * Fix the wrong infomation in keystone-manage.rst * Webob needs body to calc Content-Length (bug 1016171) * Prevent service catalog injection in auth\_token * Admin Auth URI prefix * updating testing documentation * adding keystoneclient test * Removed redundant / excessively verbose debug * Making docs pretty! * Adding user password setting api call * Fixing pep8 errors in tests/\*py * Make sure user dict has id key before checking against it * pep8 for openssl * Run pep8 for tests * Move monkey patch to keystone-all startup * Use sdist tarball instead of zipball * Return a 409 error when adding a second time a role to user/tenant * notify calling process we are ready to serve * Set iso8601 module as default dependence * Fixed user-only role deletion error * Use PyPI for keystoneclient * keystone\_manage certificate generation * documenting models * Reorder test imports by full import path * pep8 v1.3.3 compliance (bug 1019498) * Correct Tree DN * don't assume that the LDAP server require authentication * fix variable names to coincide with the ones in common.ldap * Keystone should use openstack.common.timeutils * Fixed marker & limit computation (bug 1006055) * Do not crash when trying to remove a user role (without a tenant) * Keystone should use openstack.common.jsonutils * Refactor 404's into managers & drivers (bug 968519) * fix sphinx warnings * fix man page build * Utilize newer changes in openstack-common * Add .mailmap file * setting up babel for i18n work blueprint start-keystone-i18n * Removed unused import * Fix order of returned tuple elements in pam authenticate * Reorder imports by full module path * Pass serviceCatalog in auth\_token middleware * Fixed typo in routing conditions (bug 1006793) * 400 on unrecognized content type (bug 1012282) * Basic request stats monitoring & reporting * Monkey patching 'thread' * Speed up SQL unit tests * PEP8 fixes * Clean up test requires a bit * Use cfg's new global CONF object * Add s3 extension in keystone.conf sample * Tweak for easier, safer subclassing * Revert file mode to be non-executable * fix importing of optional modules in auth\_token * Carrying over token expiry time when token chaining * Keystone should use openstack.common.importutils * Require authz for user role list (bug 1006815) * Require authz for service CRUD (bug 1006822) * PEP8 fixes * Use cfg's new behavior of reset() clearing overrides * Use cfg's new group autocreation feature * Sync with latest version of openstack.common.cfg * blueprint 2-way-ssl * Fixes some pep8 warning/errors * Update swift\_auth documentation * Add ACL check using : format * Use X\_USER\_NAME and X\_ROLES headers * Allow other middleware overriding authentication * Backslash continuation removal (Keystone folsom-1) * Remove service\_\* from authtoken examples * Nail prettytable test dependency at 0.5.0 * Invalidate user tokens when a user is disabled * Fix depricated /users/{user-id}/roles * Changed arguments in keystone CLI for consistency * Add validations of 'name' field for roles, users and tenants * Added 'NormalizingFilter' middleware * One 'ctrl-c' kills keystone * Make sure we parse delay\_auth\_decision as boolean * Flush tenant membership deletion before user * notify calling process we are ready to serve * Invalidate user tokens when password is changed * Added tenant name validation. Fixes bug 966249 * Corrects url conversion in export\_legacy\_catalog * Truly handle mailmap entries for all combinations * fix pam admin user case * Improve the sample keystone.conf * Add defaults for ldap options * Sync to newer openstack-common * Set defaults for sql options * Set defaults for port options * Add defaults for driver options * Use ConfigOpts.find\_file() to locate catalog template * Use ConfigOpts.find\_file() to locate policy.json * Policy doc updates; RST syntax consistency * Removed SimpleMatch 'shim'; updated readme * Removed old sections; improved syntax consistency * cleanup dependent data upon user/tenant deletion * Update tests to run servers on 127.0.0.1 * Switch to 1000 rounds during unit tests * Fix argument name referred in the document * Exit on error in a S3 way * Auto generate AUTHORS file for keystone component * Misnamed exception attribute (bug 991936) * Avoid ValueError in 12.04 essex pkg (bug 988523) * Non-nullable User, Tenant, Role names (bug 987121) * Fix expired token tests * Make run\_tests.py non-executable * Add distribute to test-requires * Makes the ldap backend return proper role metadata * cleanup no\_meta user in live LDAP test * Add ChangeLog to tarball * Fix "it's" grammar errors * Rename keystone.conf to .sample * Import latest openstack-common * Stub out swift log configuration during testing * Remove tenant membership during user deletion * Add a \_ at the end of reseller\_prefix default * additional logging to support debugging auth issue * Add support to swift\_auth for tokenless authz * Make import\_nova\_auth only create roles which don't already exist * don't duplicate the extra dict in extra * Fix looking for config files * endpoint-crud 404 (bug 963056) * user-role-crud 404 (bug 963056) * ec2-credential-crud 404 (bug 963056) * service-crud 404 (bug 963056) * user-crud 404 (bug 963056) * tenant-crud 404 (bug 963056) * Add build artifacts missing from .gitignore * Switch keystone.test.TestCase to use unittest2 * Raise keystone.exception for HTTP 401 (bug 962563) * Fixed misc errors in configuration.rst * Docs: SQL-based vs File-based Service Catalog * Improve service CRUD test coverage * Change default catalog driver to SQL; doc the options * Replace tabs with spaces * role-crud 404 (bug 963056) * Improve swift\_auth test coverage + Minor fixes * Open Folsom * S3 tokens cleanups * Check values for EC2 * Fix critical typo in endpoint\_create (bug 961412) * updating docs to include creating service accts * unique role name constraint * Add test for swift middleware * Spring cleaning, fix PEP8 violations * Rename tokenauth to authtoken * pass the arguments in when starting keystone-all * fix keystone-all's usage of options vs conf * Wrapped unexpected exceptions (bug 955411) * Changing belongsTo validation back to ID * Clean up sql connection args * Improved file logging example (bug 959610) * Swift middleware doc update * Fixes LP #954089 - Service list templated catalog * Remove nova-specific middlewares * Add check for MAX\_PASSWORD\_LENGTH to utils * Remove glance\_auth\_token middleware * Support PyPAM in pam backend, update to latest API * Fix default port for identity.internalURL * Installing keystone docs * Update username -> name in token response * Refactor keystone.common.logging use (bug 948224) * Add automatically generated code docs * Properly return 501 for unsupported Catalog calls * docstring cleanup to remove sphinx warnings * updating documentation for rewrite of auth\_token * Allow connect to another tenant * Update docs for keystone client cli args * Raising unauthorized instead of 500 (bug 954547) * Failing to update tenants (bug 953678, bug 954673) * added LDAP section to architecture and architecture * Bug #943031 MySQL Server has gone away added docnotes of error messages caught for mysql and reference * making all use of time follow datetime.utcnow() fixes bug 954057 * Improved legacy tenancy resolution (bug 951933) * sample\_data.sh: check file paths for packaged installations * Fix iso8601 import/use and date comparaison * Fix double-quoted service names * Remove Nova Diablo reference from migrate docs * Fixes the cli documentation of user/tenant/roles * Add simple set of tests for auth\_token middleware * update documention on changing user password * enables run\_test option to skip integration * Add token caching via memcache * Update get\_metadata to return {} * Diablo to Essex migration docs (bug 934328) * Added license header (bug 929663) * Add AUTHORS to the tarball * create service endpoints in sample data * Fix EC2 credentials crud after policy backend change * port common policy code to keystone * rename belongs\_to to belongsTo as per the API spec * Make sure we have a port number before int it * fixes lp#949648 change belongsTo validate to name * HTTP\_AUTHORIZATION was used in proxy mode * fix Nova Volume Service in sample data * fixes bug lp#948439 belongs\_to and serviceCatalog behavior \* removing belongs\_to as a kwarg and getting from the context \* adding a serviceCatalog for belongs\_to calls to tokens \* adding test to validate belongs\_to behavior in tokens * Make bind host configurable * add more default catalog templates * Fix coverage jobs for Jenkins * Improve auth\_str\_equal() * Set default identity driver to sql (bug 934332) * Renamed sqlite files (bug 944951) * Isolating backtraces to DEBUG (bug 947060) * updating readme to point to developer setup docs \* fixes bug 945274 * Add reseller admin capability * Remove trailing whitespaces in regular file * LDAP get\_user\_by\_name * Added missing import (bug 944905) * add git commit date / sha1 to sphinx html docs * gitignore follow up for docs/ rename * improve auth\_token middleware * Add service accounts to sample\_data.sh * standardize ldap and related tests * Align with project configs * Fixes doc typo s/SERVIVE/SERVICE/ * Use constant time string comparisons for auth * Unpythonic code in redux in auth\_token.py * fix pep8 * GET /v2.0 (bug 930321) * LDAP member defaults * Handle KeyError in \_get\_admin\_auth\_token * Align tox jobs with project standards * renaming pip-requires-test to test-requires * Provide request to Middleware.process\_response() * Add Vary header (bug 928057) * Implement a Catalog SQL backend * Set tenantName to 'admin' in get\_admin\_auth\_token * LDAP Identity backend * Implements extension discovery (bug 928054) * Support unicode in the keystone database * Add HEAD /tokens/{token\_id} (bug 933587) * XML de/serialization (bug 928058) * fleshing out architecture docs * Update auth\_token middleware so it sets X\_USER\_ID * Adds AUTHORS file generated from git log (and de-duplicated) * The default nova compute port is 8774 * Fix case of admin role in middleware * Fix MANIFEST.in to include missing files * Remove extraneous \_validate\_claims() arg * Create tools/sample\_data.sh * Backslash continuations (Keystone) * Correct config name for max\_pool\_size * Use cfg's new print\_help() method * Move cfg to keystone.openstack.common * Remove cfg dict mixin * Update cfg from openstack-common * Fix copyright dates and remove duplicate Apache licenses * some additional style bits * Add migration path for Nova auth * fix the style guide to match the code * Re-adds admin\_pass/user to auth\_tok middleware * Fix thinko in keystone-all sys.path hack * Removing broken & redundant code (bug 933555) * Return HTTP 401 bad user/password is specified * cli now returns an exit status cmd is invalid * Ignore sqlite.db files * Implements admin logic for tenant\_list call * Implemented get\_tenant\_users. Fixed bug 933721 * Removing unused imports from keystone.cli * Set include\_package\_data=True in setup.py * Remove data\_files section from setup.py * Update Manifest.in * Add migrate.cfg to data\_files in setup.py * Should return 300 Multiple Choice (bug 925548) * Admin version pipeline not utilized (bug 925548) * fixes #934459 * Fix logging.config import * backport some asserts * remove pycli * Adds missing argument to add\_user\_to\_tenant in create\_user * Fixes a failure caused by a recent change to user update in the client * remove executable bit from setup.py * Raising 'NotImplmented' results in TypeError * Update docs for Swift and S3 middlewares * Added Apache 2.0 License information * Add docs on keystone\_old -> ksl migration * Add token expiration * Update docs to for current keystone-manage usage * add catalog export * Handle unicode keys in memcache token backend * make sure passwords work after migration * add legacy diablo import tests * change password hash * add essex test as well * add sql for import legacy tests * add import legacy cli command * add migration from legacy db * remove keystoneclient-based manage commands * Remove executable bit from auth\_token.py * Update swift token middleware * Add s3\_token * Add pagination to GET /tokens * Fixes role checking for admin check * Fix webob exceptions in test\_middlware * Add tests for core middleware * Add version description to root path * Add TokenNotFound exception * remove diablo tests, they aren't doing much * Fix largest memory leak in ksl tests * Add memcache token backend * Friendly JSON exceptions (bug 928061, bug 928062) * Fix comment on bcrypt and avoid hard-coding 29 as the salt length * Add SQL token backend * Add content-type to responses * Cope with unicode passwords or None * Add auth checks to ec2 credential crud operations * termie all the things * example in hacking was incorrect * Ensures duplicate users and tenants can't be made * make pip requires match nova * fixes lp:925721 adds .gitreview for redux branch * remove novaclient, fix python syntax * We don't need all the deps to check pep8 * remove extra line * Make ec2 auth actually work * fixing grammar, noting broken enable, adding hacking with prefs for project * Removed unused reference * adding a token service Driver to define the interface * Added support for DELETE /tokens/{token\_id} * Fixes bug 924391 * ran through all commands to verify keywords against current (master) keystonelight * updating docs: * Fix "KeyError: 'service-header-mappings'" * updating tox.ini with test pip requirements * use our own logging module * Update auth\_token middleware to support creds * Removes nova middleware and config from keystone * minor docstring update for new locations * Missed one more keystone-server * Renamed keystone-server to keystone-all based on comments in LP: #910484 * be more safe with getting json aprams * skip the two tests where testing code is failing * accept POST or PUT for tenant update * deal with reparsing the config files * don't automatically parse sys.argv for cfg * deal with tags in git checkout * fix keystoneclient tests * add tests for essex and fix the testing framework * Update docs/source/developing.rst * Change the name of keystone to keystone-server so the binaries dont conflict with python-keystoneclient * Normalize build files with current jenkins * Use gerrit instead of github * Fix pep8 violations * Add .gitreview file * Added keystone-manage list\_role\_grants (bug 923933) * removing unused images, cleaning up RST in docstrings from sphinx warnings * pep8 cleanup * shifting contents from \_static to static * adding in testing details * moved notes from README.rst into docs/architecture.rst * updating formating for configuration page * format tweaks and moving old docs * shifting older docs into old/ directory * doc updates * moving in all the original docs from keystone * adding python keystoneclient to setup.py deps * fixing up PIP requirements for testing and virtualenv * indents * Make it as a subclass * Added shortcut for id=NULL queries (bug 916386) * fix style and termie's comments about comments * invalid params for roles.delete * initial stab at requiring adminness * Simplify code * add tests that auth with tenant user isn't member of * Add s3tokens validation * Test coverage for issue described in bug 919335 * Removing \_\_init\_\_ from non-packages (bug 921054) * add instructions for setting up a devenv on openSUSE 11.4 and 12.1 * Documented race condition (bug 921634) * Fix race in TestCreateTokenCommand (bug 921634) * Forgot to update models (bug 885426) * Updating example glance paste config * add a bunch of basic tests for the cli * Migrated 'enabled' int columns to bool for postgres (bug 885426) * remove this useless catalog * move cli code into a module for testing * Updated bp keystone-configuration for bp keystone-manage2 * Return Version and Tenant in Endpoints * Updated error message for keystone-manage2 * allow class names to be different from attr names * add ec2 credentials to the cli * fix middleware * Added: "UserWithPassword" Added: "UserWithOnlyEnabled" Removed: "UserWithOnlyPassword" * Update Extended Credentials (EC2, S3) * Fix for bug 921126 * Adds keystone auth-n/auth-z for Swift S3 API * Implement cfg.py * bcrypt the passwords * fix token vs auth\_token * Implement Secure Token Auth * some quick fixes to cli, tests incoming * fix pep8 * fix some more pass-by-reference bugs * strip password before checking output * flip actual and expected to match common api * don't allow disabled users to authenticate * turn off echo * fix invalid\_password, skip ec2 tests * Suppressed backtraces in tests causes sweaty eyes * strip password from sql backend * raise and catch correct authenticate error * rely on internal \_get\_user for update calls * Fixed: Inserting URLs into endpoint version attr * strip password from kvs backend * fix user\_get/user\_list tests * Release Notes for E3 * Addresses bug 918608 * Restore Console Info Logging - bp keystone-logging * removing the sphinx\_build from setup.py, adding how to run the docs into the README * Added Vary header to support caching (bug 913895) * Implemented subparsers (bp keystone-manage2) * Handle EC2 Credentials on /tokens * ec2 docs * simple docstrings for ec2 crud * Fixed PEP8 violations and disallowed them * Implemented bp keystone-manage2 * Fixes 918535: time not properly parsed in auth\_token middleware * Use dateutil 1.5 * get docs working * some cli improvements * add checks for no password attribute * Prestage fix - fixed requirement name; python-dateutil, not dateutil * users with correct credentials but disabled are forbidden not unauthorized * Pre-staging pip requires * shimming in basics from original keystone * test login fails with invalid password or disabled user * doctry * use token\_client in token tests * remove duplicate pycli from pip-requires * fix ec2 sql config * get\_client lets you send user and tenant * update how user is specified in tests * rename ec2 tests to be more explicit * use the sql backend for ec2 tests * more failing ec2 tests * add METADATA for boo * add (failing) tests for scoping ec2 crud * add some docs that got overwritten last night * Bug #916199: keystone-manage service list fails with AttributeError on Service.description * Exception raise error * Updates to middleware to deprecate X\_USER * Revert "Exception raise error" * fix pep8 * update tests * update some names * fix some imports * split up sql backends too * split up the services and kvs backends * establish basic structure * add docs for various service managers * expect sphinx sources to be autogenned * some tiny docs * fix sphinx * testing rst on github * updating dependencies for ksl * needed to do more for cli opts * make a main in keystone-manage * fix pep8 error * rename apidoc to autodoc * Fix typo * Fix LDAP Schema Syntax (bug 904380) * return to starting directory after git work * spacing * tests for ec2 crud * add keystoneclient expected format * add sql backend, too * add an ec2 extension * update readme * Exception raise error * re-indent * re-indent * re-indent * re-indent kvs.py * re-indent test.py * remove models.py * add some docs to manager * dynamic manager classes for now * add a couple more tests * Bug #915544: keystone-manage version 1 commands broken when using flags * add some more todos * strip newlines * TODO * add role refs to validate token * fix token auth * check for membership * flush that sht * add more middleware * fixing WatchedFileHandler * logging to debugging by default for now * add a noop controller * woops * add glance middleware ?? * add legacy middleware * fix setup.py * adding #vim to file with changed indent * add id-only flag to return IDs * rename ks to keystone-manage * fixing imports for syslog handlers and gettext * adding gettext * adding logging from configuration files, default logging per common * cli using keystoneclient * add a db\_sync command to bin/ks, remove others * merge test and default configs * adding project to keystone config to find default config files * some more config in bin/keystone * in the bin config too * rename many service parts to public * keystone\_compat -> service * remove keystone from names, remove service * remove default configuration * basic service running again * rename extras to metadata * version number in setup.py * add basic sphinx doc bits * remove references to keystone light * renaming keystonelight to keystone * keystoneclient tests working against sql backend * run all teh keystoneclient tests against sql too * move everything over to the default config * config system overhaul * add nova's cfg framework * fix pep8 * missed a file * most tests working again * still wip, got migration mostly working * get the sql ball rolling, still wip * add sql backend, WIP * Show useful traceback if manage command fails * Fix minor typo * Add 'tenants' to Auth & Validate Response * Fixed Test Coverage Handling * Adding prettytable dependency * Front-end logging * tweaking for running regular tests in jenkins * Implement Role Model * xsd fixes * Added decorators for admin and service\_admin checks * Initial keystone-manage rewrite (bp keystone-manage2) * Correct endpoint template URLs in docs * fix bug lp:843064 * finished up services stuff * add the various role tests * add list users * get user tests working * Remove install\_requires processing * get endpoints test working * get tenant\_add\_and\_remove\_user test working * tenant test working again * copy over the os-ksadm extension * Implement Endpoint, Endpoint Template, and Credential Managers * PEP8 keystone cleanup * Changes run\_tests.sh to also run pep8 by default * example crud extension for create\_tenant * Updates to Tests/Testing * Un-pythonic methods lp:911311 Fixed pep8 problems Changed comments to docstrings * get some tests working again * merge fixes * fixup * Made tests use both service and admin endpoints * All tests but create\_tenant pass * Split keystone compat by admin and service endpoints * Install a good version of pip in the venv * fix bug lp:910491 option "service\_host" in keystone.conf not works * Added broken tests to show compatibility gaps * Added tox.ini file * Split keystone compat by admin and service endpoints * Implement Service Manager * Implement Tenant Manager * Fixes bug lp:910169 - Tests are using too much memory Added super() call to tearDown() method * Changed the call to create the KeystoneContextMiddleware object to pass the correct glance ConfigOpts object * Added logging on core modules * Adding logging to Auth-Token Middleware * Implement Role Manager * Refactor models and backends * Add HP-IDM extension to fix Bug 890411 * Move URL Normalizer to Frontends * move novaclient tests over also * clean up test\_identity\_api * clean up keystoneclient setup * Move Global Role variables out of backendutils * Bug #909255: Endpoint handling broken on SQL backend by portable-identifiers changes * add role crud * speed up tests * add basic fixture functionality * documentation driven development * novaclient now requires prettytable * Return Endpoint IDs * Correct Handling of Default Tenant * Fix duplicate logging * Added global endpoints response in XML as well * Fix: Client and Unit Tests not correctly failing a build * Bug #907521. Changes to support get roles by service * Always Return Global Endpoints * Added release notes * Fixed error with database initialization * Tests use free TCP/IP ports * Testing Refactor - this is a squash of 6 commits - original commits are vailable for cherry-picking here: https://github.com/ziadsawalha/keystone/commits/tests * Added HP-IDM documentation artifacts * whitespace * whitespace * make create\_tenant work for keystone api * common ks client creation * Fixed version response (bug 891555 and bug 843052) * Implement Multiple Choices Response (bug 843051) * updating of docs * Fix LDAP schema (bug 904815) * working on a tenant\_create test * standardize spacing * novaclient uses password instead of apikey * update to use the correct repo for python-novaclient * fix tenant auth tests * Updated namespace * Fixes the catalog return in d5\_compat calls * Added: ./keystone-manage database goto * Added databased version check on startup w/ docs * Revised in-memory sql connection path for sqlalchemy * Clarify 'test not found' error message * Contract fix: change IDs from xsd:ID to xsd:string * Tenants - asserted all the things (bug 887844) * Support for unscoped admin tokens * LDAP: fix to keystone.ldif * Contract fix: IDs are not Ints, they are ID or string types * Contract fix: description optional * Update tracer excludes for Linux * Fixed bug 905422. Swift caching should work again. Also fixed a few other minor syntactical stuff * Update test\_keystone\_manage to use unittest2 * Python 2.6 subprocess.check\_output doesn't exist * No more python path changes * Clarified language on migration instructions * Refactor: Workaround for python build\_sphinx failure * Fixed some skipped tests * Format keystone-manage output better * Added instructions to git clone from github * Refactor: Computing api/model module paths dynamically * Introduces UID's & domain models (bp portable-identifiers) * Improved test coverage of d5 compat * Fixed: Tests returning successful (0) on failure * D5 Compatibility Support * Added original tenants blueprint to docs * Fixed broken import of version info (bug 902316) * Added missing import preventing keystone from starting (bug 901453) * Fix some issues with new version module * quantum\_auth\_token.py middleware fails on roles * Removed Server class from \_\_init\_\_.py * Fix auth\_token middleware: make \_verify\_claims not static. Fixes bug #901049 * Pylint fixes to auth\_token.py * Split version code into its own file * Change is\_global == 1 to is\_global == True * Bug 897496: Remove tenant id from Glance URLs * Refactor: move initialization code to class * Add missing json validation * Refactor: get rid of keystone/config.py * Fixes missed tests and subsequently introduced bugs * Rename .keystone-venv to .venv * Refactor: Rename auth controller to token controller * Added documentation * Added SSL and memcache sample config files * Updated auth\_token middleware caching to support memcache * Deprecating RAX-KEY middleware * Added argparse to support python 2.3 - 2.6 * Make bin/keystone use port settings in the config file. Fixes bug #898935 * Bug#899116: use correct module when building docs * Minor RST changes * Revised extension documentation * Added documentation for SQL tables * Remove pysqlite deps. Fixes bug #898343 * Pretty-printed JSON samples * Added option to pretty-print JSON * Implements blueprint keystone-swift-acls * Updated docstring to match auth\_token.py (bug 898211) * Bug #890801 Changes to support /extensions call. - Introduced a new extension reader to read static extension content. - Added additional rst files explaining extensions. - Removed functionality from additional middleware that used to support /extensions call.ie RAX-KEY-extension - Removed service extension test as it was no more relavent. - Added unit test that checks toggling of extensions. - Additional notes on the conf file * Added JSON validator; fixed samples (bug 898353) * Fixes a number of configuration/startup bugs * Fixed RST syntax (bug 898211) * Revised schema migration docs * Improved doc formatting consistency (bug 898211) * Fixed RST syntax in doc strings (bug 898211) * Added ssl docs to index; fixed rst syntax (bug 898211) * Bug-897724: Added method to list endpoints specific to a service and related tests * Eliminated debug output from sphinx\_build (bug 898211) * Updated testing * Fixes bug lp:897819 * Check that endpointTemplate ID is valid in endpoint add cmd (#897749) * Added Endpoint and Endpoint Template documentation * Bug #854104 - Changes to allow admin url to be shown only for admin users. - Additional test asserts to verify * Fixed memcache tests * Update documentation and examples following API 1.1 removal * Fixes bug 843065 * Additional middleware test coverage * Enforce service ownership * Add keystone\_tenant\_user\_admin option and fixes * Make owner the user named same as tenant/account * Restored developer default log dir * Add default for log directory and log filenames * Added wadls, pdfs, samples and functional test confs (bug 891093) * Additional documentation * ./keystone-manage endpointTemplates list missing arg (bug 891843) * Bug #890399 * Bug #891451: Changes to support update endpointTemplates call in the WADL * add an example for capability rbac * make readme use code style * add the policy code * describe and add a policy backend * policty stub * re-indent * Added timeout to bufferedhttp class and timeout setting for middleware - bug 891687 * Refactoring master to match stable/diablo fix for bug 891710 * Refactor auth\_token.py to only call out to Keystone once * Added files missing from dist packaging (bug 891093) * pylintrc should not be hidden (bug 891093) * Simplified gitignore (in pursuit of bug 891093) * Fixes typo in setup document * Adding middleware tests * Remove executable bit on template * change array syntax * updates to make compatible with middleware * mergeish dolph's port change * fix tests * handle unscoped requests * adjust default port * Revised version status response (bug 890807) * Refactored headers produced by middleware (bug 835087) * move noop to identity controller * Ignoring db migrate mgmt module to workaround bug 889287 * 'text/json' should be 'application/json' (bug 843226) * Revised curl examples (bug 884789) * allow setting user\_id on create * users require a name * pep8 * update test conf too * cli for adding users, tenants, extras * adjust paths and use composite apps * add tests for extras * add tenant crud * oops, forgot update in crud * add crud tests * add crud tests * add crud tests * add test for create user and get user * add test for create user and get user * re-indent identity.py * don't pep8 swp files * accept data as kwargs for crud * use the keystone app in the conf * reorg * re-indent service.py * Bug 888448: - Changes to allow validate token call return user name as per contract. - Additional test assertions to test the same. - Changes to middleware * more dyanmic client * get some initial identity api tests working * update service to middleware in confs * move around middleware * make a composite app * add crud methods to identity manager * Add a new swift auth middleware * Use TENANT\_ID if it exists, but still support X\_TENANT * cli beginnings * Bug 888170: Fixing references to incorrect schema * add admin port * add an etc dir * Bug #888210: Changes to fix calls to use the right path * bug 878431: Minor changes to auth\_token middleware * add a default handler for / * Bug #886046 Add Quantum auth middleware to Keystone source code tree * add a stubby setup.py * use paste for the binary * add a trivial admin-only middleware * update keystone sample tests, skip one * Bug #887236: - Changes to allow extensions to be configured. - Introduced a new property that holds list of extensions that are to be enabled * add crud info to readme * get novaclient tests working * add novaclient, intermediate * add run\_tests.sh and pep8 stuff * remove italics on Light * modify requirements * link diagrams * Track post-Diablo database evolution using migrations (BP: database-migrations) * Changed blatant hack (fixed spelling also) to 5 second timout as tests were not completing * Use TENANT\_ID instead of TENANT for project\_id * X.509 client authentication with Keystone. Implements blueprint 2-way-ssl * whitespace * added catalog tests * added tests for tokens * test the other methods too * add some tests and get others to pass * add some failing tests * add a default conf * minor whitespace cleanup * add some todo * fixed the output message error on granting user a role * Bug #884930 Support/Remove additional calls for for Tenant. - Supported call to get users for a tenant for a specific role. - Removed calls to get specific role for a user and to get all the roles for a specific tenant as they are not useful. - Fixed LDAP backend call to get users for a tenant. - Disabling Invalid pylint check * adding docs to test classes, updating run\_tests.sh to match reality adding debug middleware factory adding docs on enabling debug middleware resolving pep8 issues * Fixes LP Bug#885434 - Documentation showing multiple tenants misleading * add example * rst blah blah * updated readme * authenticate and tenants working * working authenticate in keystoneclient * remove test\_keystone\_compat's catalog tests * add templated catalog backend * Use pure version number ("2012.1") in tarball name * Set run\_tests.sh so pep8 runs in the virtualenv * bug 885364 * bug:884518 Changes to support passwordcredentials calls as per API contract. Minor LDAP code change to support tests * Fixed spelling of 'Resources' (Resoruces) * pep8 cleanup * everything but the catalog * Remove execute bit on keystone.conf * Fixes LP882760.Changes to return TenantId properly as part of roles.Additional tests to support the same * Moving contributor docs into rst (bug #843056) * fixing search sequence to not include directory structure from os.walk() * bug lp:882371 Standardize Json pagination structures * get a checkout of keystoneclient * bug lp:882233 Code changes to support API calls to fetch services/roles by name * Removed contributor doc build info from project README (bug #843056) * Revised documentation build process (bug #843056) * updates to keystone documentation - install & conf bug 843056 blueprint keystone-documentation * Specific LDAP version causing hiccups installing on latest ubuntu & fedora * Adding the concept of creating a Keystone HTTP client in Python which can be used in Keystone and imported from Keystone to allow for easier Keystone integration * Add .gitreview config file for gerrit * updating keystone developer documentation updating docstrings to remove errors in automodule generation updating setup.py to generate source documentation blueprint keystone-documentation bug 843056 * Changes to support getuser by name and gettenant by name calls * Changes to support get endpoints for token call * Additional changes to support endpointtemplates operations.Disabling pylint msgs that dont fit * Github markdown doens't seem to like irc:// links * Removed 'under construction' docs provided elsewhere * Updated self-documentation to point to docs.openstack.org * Revised documentation * Changes to endpoint operations as per OSKSCATALOG contract. Adding couple of pylint fixes * Refactored version attributes * Changes to support endpointTemplate operations as per new API.Fixed issues with command line manage stuff * Updated Secret Q&A to extend CredentialType * Changes to support API calls as per OS-KSCATALOG extension * Improved CLI error feedback (bug 877504) * authenticate working, too * base tests on keystone-diablo/stable * get tenants passing, yay * flow working, added debugging * add context to calls * move diagram into docs dir * refactor keystone compat and add catalog service * added sequence diagrams for keystone compat * Resubmitting change. Fixing issue #843226. Changes to throw appropriate faults during token validation * bug lp:865448 change abspath to dirname in controllers/version.py to correct path problems * Moving non core users and tenants calls to appropriate extensions * Fix issues in the ec2 middleware * Adding calls to get roles for user as per new format.Cleaning references to old code * Fixes LP844959, typo in Authors file * Changes to support roles and services calls via extensions. Change-Id: I1316633b30c2be07353dacdffb321791a4e2e231 * Simplified README * First commit for Secret Question and Answer Extension: RAX-KSQA * Fixing issue 854425.ie chaning token table name to tokens. Fixing issue 863667.Changes to support updation of user/tenant name as well using api calls. Fixing LDAP backend to have id independent of name.Fixing getuser call to also return name * Fixing bug 859937. Removing incorrect atom feed references from roles.xsd * Minor corrections to the middleware and wadl * Changes to show name also for the user list * Changes to show admin URL also as a part of json in endpoints listing * getting closer, need to match api now * tests running through, still failing * add a test client * added a test, need to get it working now * Use the tenant name for X\_TENANT * Fix possible\_topdir computing * Change roleId to role.id for swift middleware * adding in doc and setup to cover existing scripts adding doc around credentials command usage (for EC2) 2011.3 ------ * Updating legacy auth translation to 2.0 (bug #863661) * Shouldn't look in /etc/init/ for config files * Changing default admin port from 5001 to 35357, per IANA/IETF (bug #843054) * Organizing and documenting pypi requirements * sample data updates to remove -service from image and identity * Refactor and unit test json auth parsing * Error message expecting 'e' in local scope * Do not return identical error messages twice * Update auth examples in README * README.md changes to point to openstack repo * updating docs for Mac source install, no docs for mac package install relevant * POST /tokens: Added tenant id & name to scoped tokens in XML (#862752) * Updated guides.Have recompiled to use the latest examples * Fix bug 861546 * Fix swift middleware with regard to latest changes * Changes to support getTenants to behave differntly for admin users when invoked as a service api or admin api * Changes to stored hashed password in backends. Using passlib a password hashing library. Using sha512. Setting hashing to be the default behavior * Changes to WADLs to refer actual types * Revised docstring * Added /etc/init/keystone.conf to list of known configuration paths * Revising tenant IDs & Names in samples (#854228) * Authenticating against non-existent tenant (fixed #859927) * Adds list of dependencies to dev install * Fixed Anne's email address & list position (alphabetical) * Added support for scoping by tenantName * Changes to return groups as a part of RAXKSGRP extension.Also fixed incorrect schema version references in wadls and examples * Changes to support authenticate call to accept token as per agreed format * Minor changes to wadl * Making type mandatory as per sandy's request and minor fixes to wadl examples. Adding Ann as an author * Changes to structures to support authenticate using token. Minor wadl fixes. Adding Anne as an author * Removing token element from token.xsd * Update to token.xsd to allow element token as a root element in relation tu bug: https://bugs.launchpad.net/keystone/+bug/855216 - apiKeyCredentials Samples casing apiKey update * Changes to support endpoint template addition/listing by service names. Changes to list service details as well * Modified apiKeyCredentials to extend single entity and use restriction * Reorder params in User() constructor * Fix for bug 856857 - add user.name to User() constructor to re-align param * Fix for bug 856846 - cast ints to string in users\_get\_by\_tenant\_get\_page so that they can be joined * POST /tokens: A chronicle of missing features * Fixes issues with ldap tests * Get Service Catalog from token * Fixes auth\_token middleware to allow admin users in nova * Initial set of changes to move role operations to extensions * Updating guide wrt wadl changes * Minor Changes to extension WADL * Changes to support auth catalog as per new format * Changes to docs * Adding tenantid to user roles and endpoints * Fixes bug 855823 * Add code removed in https://code.launchpad.net/~vishvananda/nova/remove-keystone-middleware/+merge/76297 to keystone * Added support for HEAD /tokens/{token\_id} Changed POST /tokens response container from 'auth' to 'access' * Making identity-admin.wadl well-formed * Converting to new doc format for included code samples * Changing authenticate request content xml as well as json * GET /tokens/{token\_id}: Exposing both role ID's and Name's * Renaming 'roleRef' container to 'role' * Renaming 'roleRefs' container to 'roles' * Renaming GET /tokens/{token\_id} response container to 'access' * Revised samples * Fixed path issues with keystone-import * Update validate\_service\_or\_keystone\_admin\_token so that it doesn't cause exceptions if the admin or service admin haven't been configured * Changing/introducing actual extension json/xml snippets. Adding updated documents * Backend-managed role & service ID's (bug #834683) * Initial Changes to move service operations to extensions * Docs,wadls,samples,initial code to support RAX-KSKEY and OS-KSEC2 extensions. Removed tenant id from being part of endpoints * Glance Auth Token Middleware fix * Sorted AUTHORS list * adding imports from Nova for roles, tenants, users and credentials * Update keystone-manage commands to convert tenant name to id. Fixes #lp849007 * 1.Changed all Json paginated collection structure. 2.Introduced a type for credential type (path param) and change wadls and xsds. 3.Added List Users call. 4.Changed Endpoint creation example * Don't import keystone.test unless we are in testing. Fixes #lp848267 * Add toggle to run tests in-process, w/ realtime progress feedback * Add ability to run fakeldap in memory * Added backend-managed primary key to User and Tenant model * Introducing doc to support OS-KSCATALOG extensions.Adding new calls to OS-KSADM extension document * Adding initial document for OS-KSADM-admin extension.Related changes on wadl,json,xsd etc * Fixing sample content * Adding new doc.Changes to sample xmls and jsons * Validation content and relavant changes * Minor fixes on xsds and sample xmls * Fixing existing wadl.Completing wadl for extension OS-KSADM * Fix invocations of TemplateError. This exception takes precisely three parameters, so I've added a fake location (0, 0) to keep it happy * Adding wadl for OS-KSCATALOG extension.Fixing existing xsds.Fixing service wadls. Merging changes. Change-Id: Id29dc19cbc89f47e21329e531fc33bd66c14cf61 * Update Nova and Glance paste config examples * Various documentation-related changes * Consolidating xsds. Splitting contrib to admin and service * Adding guides for groups extension * Fix host/port split code in authenticate\_ec2. Resolves an AttributeError: 'Ec2Credentials' object has no attribute 'partition' exception that can occur for EC2 auth validations * Adding guide for RAX-KSKEY-service extension. Adding guide for OS-KSEC2-service extension * Fix NameError exceptions in add\_credentials. Adds test case on creating credentials * Redefining credential types. Defining additional extensions and renaming extensions. Removed wadls that are not needed * Fix for duplicate tag on credentials.xsd * Move tools/tracer into the keystone code. Fixes ImportError's when running keystone as a .deb package * Fixed error where endpoints returned for tenant instead of token * Updated the AUTHORS file to test the new rpc script and workflow * Update rfc.sh to use 'true' * Made it possible to integrate with external LDAP * Dev guide rebuild and minor fixes * Updates to samples, XSDs, and WADLs * Added AUTHORS, .mailmap and generate\_authors.sh * Changes to support endpoint template updates * Fixes bug 831574. Adds missing sys import * Updated schema to reflect id and name changes to Users and Tenants * Updated guides and samples * Additional contract changes * Sample changes * Atom links on Token * Cleanup service it endpoint catalog * Removed redundant function from base user api * Updated samples * Fixed reference to unassigned variable * Reworked XSDs and WADL to support auth and access elements * Remove more group stuff * Removed OSX files that shouldn't be in git * Documentation cleanups * Banished .DS\_Store * Add rfc.sh for git review * Wrong common namespace * XSD & sample updates * Added more missing files to MANIFEST.in * hanges to allow test to work on python 2.6.\* * Cleaned up come issues with python2.6 * Refactored manage.py to be both testable and useful for testing * Sample changes to support v2.0 api * Sample changes to support v2.0 api * Admin WADL Revisions * Add the files in keystone/test/etc * Add run\_tests.\* to the MANIFEST.in * Keystone manage.py cleanup * Tests running on in-memory sqlite db * Additional changes to fix minor service support stuff and increase test coverage. Also making validate token call available using service admin tokens * Made all sample data loading in one script * Minor fix to run\_tests * Contract changes * Admin WADL updates * Port of glance-control to keystone. This will make writing certain keystone integration functional tests a little easier to do * Updates to XML and JSON changes for validateToken * Added pylint message count as run\_tests.sh -l * Added reponse handling for xsd static file rendering III Extra extension tests (for RS-KEY) * Creating an artificial whitespace merge conflict * Moved run\_test logic into abstract class * Git-ignore python coverage data * Added reponse handling for xsd static file rendering * Additional tests and minor changes to support services CRUD * Added reponse handling for xsd static file rendering * Schema updates. Split WADLs and extensions and got xsds to compile * Ziads changes and fixes for them * Added check\_password to abstract backend user API * Doc changes, including service catalog xsd * Fixed service-bound roles implementation in LDAP backend * Removed ldap names import from fakeldap module * fix ec2 and add keystone-manage command for creating credentials * Legacy auth fix and doc, wadl, and xsd updates * Replacing tokens with the dummy tokens from sampledata.sh * Add option for running coverage with unit2 * Adding curl documentation and additional installation doc. Also updated man documentation for keystone-manage * Changes to improve performance * Removed the need to set PYTHONPATH before tests * Back to zero PEP8 violations * Schema and WADL updates * Adding documentation to WADL * Correct 401, 305, and www-authenticate responses * Correct 401, 305, and www-authenticate responses * Correct 401, 305, and www-authenticate responses * Added xsd content, update static controller, and static tests * Updated wadl * Fix LDAP requires to compatible version * Moved password check logic to backend * Changes to delete dependencies when services,endpoint\_templates,roles are being deleted. PEP8 and Pylint fixes.Also do ldap related changes * Add LDAP schema * Add wrapper for real LDAP connection with logging and type converting * Fix console and debug logging * Redux: Add proper simple\_bind\_s to fakeldap * Adds support for authenticating via ec2 signatures * Changes to allow additional calls to support endpoint template CRUD and additional checks on existing method * Committer: Joe Savak * Refactoring business logic behind GET /tenants to make it less convoluted * Moved run\_tests.py to match other projects * Revert "Add proper simple\_bind\_s to fakeldap, removed all imports from ldap." * Add proper simple\_bind\_s to fakeldap, removed all imports from ldap * Gets Keystone a bit more inline with the way that other OpenStack projects run tests. Basically, adds the standard run\_tests.sh script, modifies the run\_tests.py script to do the following: * Changes to support CRUD on services/roles * Issue #115: Added support for testing multiple keystone configurations (sql-only, memcache, ldap) * Added automatic test discovery to unit tests and removed all dead tests * PEP8 fixes... all of them * Small licensing change to test Gerrit * Small change to test Gerrit * Fix brain-o--we may not need project\_ref, but we do need to create the project! * updated README with more accurate swift info * Determine is\_admin based on 'Admin' role; remove dead project\_ref code; pass auth\_token into request context; pass user\_id/project\_id into request context instead of their refs * Added support for versioned openstack MIME types * #16 Changes to remove unused group clls * Add unittest2 to pip requires for testing * #66 Change in variable cases * #66 Change in variable cases * Changes to make cache time configurable * Changes to store tokens using memcache #66 * Changes suggested by Ziad.Adding validateToken operation * Flow diagram to support keystone service registration * Restored identity.wadl w/ system test * pylint fixes for role api * Removing attribute duplicated from superclass; causes an issue in py 2.7 * pylint fixes for tenant-group unit tests * pylint fixes for server unit tests * Making the API version configurable per API request * PEP8 fixes for system tests * Issue #13: Added support for Accept-appropriate 404 responses w/ tests for json & xml * Simple change to test gerrit * Document how to allow anonymous access * Sigh. Proofreading.. * Update README with instructions to fix segfault * These changes make no sense--I didn't do them, and I'm in sync! * Add middleware for glance integration * #3 Preventing creation of users with empty user id and pwds * Fixing naming conflict with builtin function next() * This makes the use of set\_enabled more clear * Fixes failing test introduced after disabled check remove * Changes to allow password updates even when the user is disabled.Also fixed failing tests * Disabled users should now be returned by GET /users/{user\_id} * Updating a disabled user (via xml) should now succeed * Updating a disabled user should now succeed * Noted potential issue, but I'm not sure if this is dead code or not anyway? * Assigned Base API classes so downstream code knows what to expect * Adding missing class variable declaration * Cleaning up unit tests * Removes disabled checks from get\_user and update\_user * Fixing module-level variable naming issues * Improving variable naming consistency * Avoiding overloading of built-in: type() * Fixing indentation * Specified python-ldap version, which appears to avoid the packaging issues we've experienced * Added missing import * More LDAP tweaks * LDAP backend updates * More test fixes * Fixed deprecation warning * Updated test to allow for additional role * Restored UnauthorizedFaults to token validation requests * Fix for issue #85 * - System test framework can now assert specific response codes automatically - Revised system test for issue #85 based on clarification from Ziad - Added system test to attempt admin action using a service token * Adds the member role to sampledata, gives it to joeuser * PEP8 fixes * Formatting * Merged duplicate code * Add first implementation of LDAP backend * Added (failing) system test for issue #13 * Minor cleanup * Made all API methods raise NotImplementedError if they are not implemented in backend * Made delete\_all\_endpoint calm if there is nothing to do * Fixed bug causing request body setting to fail * Add check to sqlalchemy backed to prevent loud crush * Tweaked import\_module to clearly import module if it can * Removed hardcoded references to sql backends * Add exception throwing and logging to keystone-manage * Merging keystone.auth\_protocols package into keystone.middleware * - Added 'automatic' admin authentication to KeystoneTestCase using bootstrapped user - Added system tests for admin & service authentication - Abstracted '/v2.0' path prefix away from system tests - Added simple uuid function to generate data for system tests (random number gen w/ seeds might work better?) - Refactored issue #85 tests with setUp & tearDown methods * Clarifying test case * Fixed minor pylint issues * Removed tenant id from admin user * Move dev guide to OpenStack * Commented out failing request, until it's review * Wrote test case for github issue #85 * Formatting change * Was this a typo or an incredibly lame joke? * Added missing imports and fixed a few pylint issues * Improved dict formatting * Improved readability a bit * Abstracted underlying HTTP behavior away from RestfulTestCase Added 'automatic' JSON body encoding (TODO: automatic XML encoding) Improved user-feedback on automatic response status assertion * Added run\_tests.py to keystone.test.system, which uses bootstrap db script * Added bootstrap configuration script (with admin user assigned an Admin role) * Added 'automatic' token auth for each API * Refactored port configuration strategy to allow a single test case to address both the admin and service API's * Added automatic json/xml parsing to system test framework * Added system test discovery to run\_tests.py * Added system tests for content type handling and url rewriting * Updated tests to reflect last bug fix * Extracted sample test from framework and moved system test framework into \_\_init\_\_ * Converted system test framework to use httplib * Initial system test approach, using urllib2 * Fixed bug: traceback thrown when the path '/' is requested * Updated \*unused\* tests to reflect refactored API's * Removed some useless/dead code * Cleaned up authentication tests * Improved readability slightly * Moved db imports to config module Removed useless try/except blocks * Organized imports * Simplified a few util functions * Fixed line length * Renamed service API configuration options * Renamed ServiceApi router module * Renamed ServiceApi router * Cleaned up keystone.logic * Removed unused logger * Refactored routers and controllers into their own modules (issue #44) * Fixed doc string * Improved PEP8 compliance * Fixed spelling * Removed unused import * Slightly simplified base wsgi router * Added note about run\_tests.py to readme * Organized imports * Improved readme consistency * pep8 * Pylint an pep8 fixes * Fixing bug reported using with swift * Fixed default content type behavior (was defaulting to XML) * Removed redundant action mappings (for version controller) * Renamed exthandler to urlrewritefilter to better illustrate it's purpose * Minor comment change * Refactored URL extensions handling (for .json/.xml) Added universal support for optional trailing slashes * Return users in a tenant as part of a many-to-many relationship * Added import, autoformatting * Removed unused imports * Moved exthandler to keystone.middleware * \*\* keystone.conf refactoring \*\* * Fixed 'is\_xml\_response' function, which had no clear intention * Removed unused function * Rewrote .json/.xml extension handler with additional unit test * Added links to readme * Added python-ldap to pip-requires * Initialized LDAP backend * Various fixes for test running * Commented out suspicious unit tests..... * Added test automation script * Cleaned up file * Added missing test files to test collection * Made unit tests executable from the cmd line * Added test\_auth to list of unit tests * Update auth test to account for generic service names * Changes to make Admin for keystone configurable.#27 * Remove old initializers * Changes to introduce BaseAPI to support multiple back ends * Changes to support dynamic loading of models * Adding list of todos * Initial changes to support multiple backends * Fixed identity.wadl response - issue #71# * Recompiled devguide with endpoints and templates * Removed unnecessary symlink * Changes to support endpoints and endpointemplates (renaming BaseUrls and BaseURLRefs) * Make swift middleware live where it should * Remove swift-y bits from generic token auth * Changes on Sample data * Code changes to support global endpointTemplates * Swift-specific middleware * Issue 31: Switching default ports to 5000/5001 (public/admin) * Fixed readme instructions for Nova - Issue #55 * Fixed requires for development and in readme * Bringing back the changes to support endpointTemplates and endpoints * Readme fix * Edited keystone/auth\_protocols/nova\_auth\_token.py via GitHub * Issue 32: Updated readme to reflect fix for issue 32 (removed 'cd bin' prefixes before several commands) * (Related to) Issue 32: bin/sampledata.sh cannot be executed outside of bin/ * Issue 32: ./bin/keystone cannot be executed outside of bin/ * Issue 31: Reverted ports to 8080/8081 while the issue is under discussion * Adding endpoint related files * Updated readme to reflect docs/ -> doc/ change Added tools/pip-requires-dev for depelopment dependencies * Basic authorization for swift * Republished developer guide for Jun 21, 2011 * Updated token validation sample xml (dev guide) * Updated dev guide publish date * Added developer guide build folder to git ignore list * Auto-formatted and syntacically validated every JSON example in the doc guide * working with dashboard * add get\_tenants * rudimentary login working * most bits working * initial * Reverting change thats not needed * Fixing some of the failing tests * Merging changes from trunk * demo of membership using keystone in sampledata * Name changes BaseURLRefs to EndPoints and BaseURLs to EndpointTemplates * Fixed formatting, imports * Issue 31: Updated docs and examples * Committing unit test configuration for issue 31 * Issue 31: Changed default ports to 80/8080 * Issue #8: Renamed primary key of Token to 'id' * Name changes BaseURLRefs to EndPoints and BaseURLs to EndpointTemplates * Changes to hash password * Restored tools.tracer to bin/ scripts; included fix for empty frames * Merging changes * Removed unused import * Removed redundant sentence in dev guide * Removed unused imports in bin/ * Fix for keystone issue 41: https://github.com/rackspace/keystone/issues/41 * Merging changes from rackspace * Fixed spelling error * Changes to include support for paginations * Fixing existing methods on wadl * Fixed broken unit test code * Refactored api function names to avoid redundancy with new module names * Changes to wadl to support user operations * Refactored DB API into modules by model * Pep8 changes * Changes to allow user creation without a tenant * for got to change a 1.1 to 1.0 * dash needs both 1.0 and 1.1 compatability - need to fix that! * nova needs 1.0 api currently * Some field validations * Merged docs * make sampledata executable again * Admin for nova doesn't take a tenant * add keystone to its own service catalog * Fixed error on UrlExtensionFilterTest * Fixed imports; improved PEP8 formatting compliance * Fixed imports in keystone.common * Removed unused imports and denoted unused variables * Fixed imports in auth\_protocols * Removed duplicated function * Added coverage to pip development requirements * Fixed relative & unused imports * Adding py init to functional tests * Created pip requirements file for development env (added sphinx python doc generation to start) * Added pydev files to gitignore * Added py init files to directories already being referenced as modules * Users must have tenants or nova breaks * Doc updates and dev requires * Resolved conflicts * To PUT or to POST * Fixed v1.0 auth test to account for cdn baseURL order * Support for GET /v2.0/users and add cdn back to sampledata for v1.0 support * Update the baseURL data pushed into glance * Fix symlinks after docs -> doc rename * Adding call to modify tenant.Adding more tests and fixing minor issue * Added pip requirements file for testing environments * Grammar corrections * Adds Sphinx build ability and RST documentation * Removing unused references to UserTenantAssociation * Introduced a method to get all users @Users resource.Also moved the method to get user groups out of tenant scope * Changed BaseURLs to OpenStack names * Test fixes * Seperating user calls from tenants * Improved README formatting/consistency * Updated paths to unit/function tests in README * Updated docs: sampledata.sh can't be executed outside of bin/ * Added Routes and httplib2 to production dependencies * Correcting typo * Setup.py fix * Readd test folder * Forgot to add doc file * Moved tests to keystone folder and removed old management tools - issue #26 * Updated SWIFT endpoint default * Update to dev guide explaining admin call auth requirements * Update sample data and keystone-manage for local install of OpenStack * Put updated Swift Quickstart into README.md * API v2.0 Proposal * Doc updates.Minor keyston-manage changes * Doc updates * Doc updates * set nova admin role if keystone user has "Admin" role * keystone repo is now at github.com/rackspace/keystone * Add success test for GET /v2.0/tokens/ in json and xml * Add Admin API tests for v2 authentication * Add test verifying a missing tenantId key in the password creds works properly in JSON * Rename file.Ziad suggestion * Name changes suggested by Ziad * Minor fixes * Code cleanup * PEP8 changes * Removing redundant files * Changing to legacy auth to standard wsgi middleware.Name change of some of the files * Changing to legacy auth to standard wsgi middleware * Introducing new frontend component to handle rackspace legacy calls * Introducing new frontend component to handle rackspace legacy calls * keystone repo is now at github.com/rackspace/keystone * Add success test for GET /v2.0/tokens/ in json and xml * Add Admin API tests for v2 authentication * Add test verifying a missing tenantId key in the password creds works properly in JSON * Removing debug print * Changes to return service urls for Auth1.0 style calls * Changes to return service urls for Auth1.0 style calls * Updating tests and sample data * Merging changes from rackspace * Changes to support service catalog * pep8 * Added URLs to sampledata * Support for listing BaseURL refs in keystone-manage * Support transforming service catalog * Removing remerged comments * Adding roles as comma seperated values on a single header * Changes to support getTenants call for user with admin privelage and regular user * Add more test cases for v2 authentication for bad requests and unauthorized results * Add test case for verifying GET /v2.0/tokens returns 404 Not Found * It's possible to authenticate through the Admin API * Changes on auth basic middleware component to return roles.Also changes on the application to return roles not tied to a tenant * Update the sample to reflect some minor enhancements to the base framework * Add test for validate\_token * Save expiration data for later comparison * Don't need to fiddle around with user tokens here, just admin tokens * Get and revoke both admin and user tokens.. * Merging changes * Bah, somehow my sample data failed to include Admin as admin's role * Merging changes * Merging changes * Merging changes * Meging changes * Changes to also return role references as a part of user when get token call is made for a specific tenant * Use un-spaced exception names.. * Try to use an admin credential to revoke the token * Split the Keystone service from the Admin service so we can test both * The API is a moving target; update the test * Support for listing roles in keystone-manage * Adds unit testing base class that takes care of much of the tedium around setting up test fixtures. This first commit just demoes the new test case functionality with a new test case /test/unit/test\_authn\_v2.py * pep8 * Fixed issue #6 * Support POST /tokens only - issue #5 * Added quick start guide to integrating Swift and Keystone; fixed setup.py tokenauth filter installation * Added role and user data to sampledata.sh * Additional unit tests for base url refs.Minor code refactorings * Changes to support baseurlrefs operations * MD cleanup * md futzing * More readme cleanup * Merged DTest tests and moved ini file to examples/paste * moved paste example to examples * Readme updates * Just making sure leading whitespace is stripped if automated * to->too * Updated dev guide * Add a sample to document how to create tests * Add a test for authenticate/revoke\_token * Ensure that --username, --password, and --keystone are given * Build base classes for tests * Documentation fixes to versions * Build the skeleton necessary to run tests * Add x\_auth\_token header to most methods * Make sure we don't lose the body completely if we can't json.load() it * Add debugging messages * Add a property to get the RESTClient instance * Fix up get()/put()/post()/delete() calls to make\_req() * Deal with the case that no headers are provided * Deal more intelligently with empty strings * Listing technologies to integrate * Um, queries are supposed to be optional, all others required * Properly join relative paths * Apparently "/token" is actually spelled "/tokens" * Accidentally left out the reqwrapper argument * Sketch in a basis for the Keystone API 2.0 * Make argument order a little more natural * Fixing unit tests.Introduced support for global roles * Don't let self.\_path be the empty string * self.\_scheme isn't set yet * Don't add a field if there isn't one.. * Create a simple means of building a REST-based API * Fixing unit tests for user and groups * Docs * Link fix * API Spec updates * More /token -> /tokens fixes * /tokens instead of /token * Prep for move to git@github.com:rackspace/keystone.git * Made URL relative * pep-8 and minor mapping fix * Dev guide update - BaseURLs and Roles * Update docs on how to use nova.sh to deploy openstack on cloud servers * Changes to support calls to getBaseUrls * Changes to support /tokens on docbook and minor roleref changes * Changes to support roleref calls * Updated to use X\_USER as decided in Issue 49 * Updated with feedback from https://github.com/khussein/keystone/issues/49#issuecomment-1237312 * Fix for issue 49 - parse X\_AUTHORIZATION header for user\_id * Fixed issue where user tenant not returned in GET /token - related to issue #49 * user should be what keystone returns * Fixed issue #54 * Updated to use X\_USER as decided in Issue 49 * Updated with feedback from https://github.com/khussein/keystone/issues/49#issuecomment-1237312 * Fix for issue 49 - parse X\_AUTHORIZATION header for user\_id * Minor changes to the document * Changes to unique relationship definition * Adding more tests for roleref operations * Fixed issue where user tenant not returned in GET /token - related to issue #49 * Changes to support /tokens on docbook and minor roleref changes * Changes to support roleref calls * user should be what keystone returns * midnight typo * Added examples readme * Fixed issue #54 * Link to latest dev guide in readme * Instructions to run with Nova * Documentation update and new API spec * Updates to README * Updates to README * Updates to README * Updates to README * Updates to README * Updates to README * Fix up broken setup.py scripts list * -Removed .project file from project and added it to .gitignore -Moved pylintrc -> .pylintrc, personal preference that this file should be available, but not seen -Moved echo to examples directory, seemed a bit odd to be in the top level -Moved management directory to tools, seemed a bit odd to be in the top level -Moved pip-requires to tools/, and updated the reference to it in README.md * Fix the identity.wadl symlink * keystone src directory needs symlinked * remove copy&paste ware from nova\_auth\_token and use auth\_token middleware * Flow diagrams * simple flow diagrams * Multi-tenant token fixes * Fixed invalid tenant authentication * Fix error in tenant\_is\_empty (model has changed) * Fixed debug/verbose flag processing * update readme * keep nova\_auth\_token in keystone * Changes to support /Roles calls.Removing create call from being exposed as of now * Changes to support /Roles calls.Description included * Changes to support /Roles calls * Readme merge * Readme updaes for load testing * hack nova\_auth\_token to work * removing unused library * Changes to support roles and baseurls on wadl * Changes to support roles and baseurls on wadl * Changes to support roles and baseURLs * missed some nova reqs * information on using nova\_auth\_token * lazy provisioning for nova * readme fixes * Merged in anotherjesse's changes * New model working with echo\_client.py * Missed a file * Added tracing and modified model * echo\_client should be executable * move nova's path injection to management scripts * server.py/version.py shouldn't be executable while cli tools should * spacing for readme * Add keystone-manage to support bootstrapping Keystone with add user command * Setup.py update * Updated logging and parameterization for bin scripts * Minor readme fixes * Simplified running Keystone and Updated readme * v1 compatibility and Service/Admin API split * DocBook Changes * Merging HCL changes - pull 40 * Changes to support baseurls and roles on the document.Adding sample files * Changes to support baseurls and roles on the document * Adding xsds to support roles and baseurls * More version fixes * Initial commit * Make config compatible with legacy * Move to v2.0 * Changes to move the db settings to conf file * removing bottle * Adding Accept header to is\_xml\_response logic * Removing bottle dependencies * Mae Pylintrc, reordered imports made pep8 of the files * Foundation for some server and auth unit tests * Added as per HACKING Files * pylint fixes * fixes * fixed test cases * Merged api,service,server,test\_common * Added test cases for add user to a tenanat * multi token test cases and bug fixes * Moved all Server functions to utils.py * Fixed failing test - bug introduced in cleanup * Added pylint and cleanup from last commit * Merged pull 37. Removes bottle, adds configuration, and adds daemonization * fixed pylint * fixed bugs * fixes * fixes * removed backslashes * Added functionality add user to a tenant * fixes * Pep8 test\_users.py * checking SSLv3 problems * checking SSLv3 problems * checking SSLv3 problems * checking git push problems * Optimised test\_users.py * Modified the README and README.md * fixed bug raised when included exthandler * Removed unwanted file * removed unused run method * Added PEP8 to test cases * Removed importing objects from keystone * pylintrc optimization * optimization of test cases and handling multi token * fixes * Nochanges * Modified the README for keystone-control issue * Modified the README * Added PEP8 for remaining test cases * PEP8 for test cases by praveena * renamed test\_identity.py to test\_keystone * added pidfile and removed print statement from test\_common * fixes * removed print statement * Added keystone.log to ignore list * Modified server.py tenant group URL to fix failing test cases * Added \*.log to gitignore * neglect changes * Added new script to run all tests * Modified and tests. Tests groups throwing some minor errors still * Modified and commented the code * Split the test cases into individual files Fixed Bugs of api * Made PEP8 of server * Too much of duplication and incomplete conflict resolution in test\_identity.py * Sisirhs changes * Sai and Praveena's Changes * Added missing tests, mad e enable and disable password work * merged conflicts * test cases modfications and bug fixes * Renamed to server.py and added top dir in config * Added the keystone top dir in configuration * Modified the README * latest updates * latest updates * new merge with installation fixes * A brief README for the auth-server * Added keystone-control * chasing tenant group bug * Added tests for the URL extension middleware * modified keystone-control and reshuffling of file names * Adding unit test for the URL extension handler * Modified test cases * Yes, I modified, but I wont commit * merged Sai changes * Installation of keystone done * corrects charset=utf=8 * Working on echo server * one more push * move the template code from bottle into a separate file: * modified auth\_server.py * Added echod and renamed echo.py to server.py * Minor cleanup + pep8 * merging changes from sai branch * saving changes to auth\_server.py * get version implementation s Please enter the commit message for your changes. Lines starting * get\_version\_info is still not working * in the middle of get\_version\_info * Modified test\_identity * removed .auth.serve.py.swp * Added some more functions through Routes and mapper * Update for Abdul * My Changes part 2 * modified Resposne to resp=Response() * My Changes * minor tweak * Some more cleaning up of git merges * Cleaning up of git merges * Added glance type of eventlet, because of its plug and play which meets the need of running everything independently if needed * pep8 and fixes * Readme updates * Removed keystone.db - should be generated by ORM * Removed extra files from last commit * Removed Global groups tests, which still needs to be tested. Updated README on how to run unit test * Deleted keystone.db * Merged pagination * Git problems - lingering commit * Renamed identity.py to server.py and added bin directory * Adding router to requires. Updating standards in HACKING. Removing schema (generated from ORM) * Added pagination functionality and tenant\_group functionality with unit tests * Removing unused imports * Removing unused function * unwanted file * added the code that would go to hussein repo * Added tenant groups in identity, created test cases for tenant groups * Added latest changes to sirish branch with pagination for get tenants * Annotate TODOs * argument handling in echo.py * getting pep8-y with it * Merged conflicts * Basic auth and refactor * more pep8 * testing merging * get \_tenants pagination updates * Merging keystone code * Basic Auth support * 17: query extension works * Issue 17: Adding tests * removed \r chararcter from unit directory * removed windows newline characters from management folder * removed unwanted files * Adding First kestone repo * Add Description File * sai added by sai * Foo2 * Foo * Initial * Minor changes + call using WSGI instead of bottle * Restored remoteauth * Reverted accidental(?) WADL deletion >:-( * Renamed protocol modules to auth\_[type] Renamed PAPIAuth to RemoteAuth - better documented it and added redirect to auth\_token (to stop using this) Cleaned up ini files and ini file handling (removed hard-coded defaults) * simple json cleanups for tests * pep8-ize * Added protocol stubs (openid and basic auth) * Renamed delegated to 'delay\_auth\_decision' Remove PAPIAuth Rename folder to Auth\_protocols (that is where we add protocol components)Get\_request -> get\_content Make protocol module more generic (prepare for superclassing and multiple protocol support Refactor Auth\_protocol\_token If no token, bail out quick (clearer) same with if app Break out headers: - here is what is coming in - here is what we add - explain the X in headers: extended header * Updated Readme, and added TODO * Added XML/Json tests to the identity and updated the README * Fixed issue with standalone install * Updated readme * Fixed remote proxy issue * draft remote proxy: needs fixing * Updated readme and echo\_client * Adding remote echo ini file * Fixes to middleware, ini parameters, and support for running echo remotely * replaced localhost with config * modifide middleware; echo\_client works * Fixing and documenting middleware * Merged pull request #30 from cloudbuilders/master * Updated management scripts to use SQLAlchemy * Fixed SQLAlchemy db location to keystone directory * Added unit tests and updated the README.md on how to run it * made echo test work * get\_request is actually init model from request contents * missed simplejson assumption * finish removing simplejson * pythonizing * update fault to be pythonic * remove unpythonic properties from atom and tenant * error decorator and logging unhandled errors * missed auth\_data * fix typos * more pythonic * we don't need properties yet * use string formating * use relative import in init * fixed paste configs to run without eggs * Fixed mistake in port for echo service * Added echo\_client.py * keystone.db should be in keystone dir * pep8 / whitespace * gitignore pyc files * split out running and installing sections in readme * allow apps to be run without setup.py * add command for test database to readme * echo has a separate setup.py * httplib2 isn't used * spacing * add httplib2 to deps and sort them * Added pip-requires and updated readme to include missing deps * explict installs for python libraries * update readme formating * update readme to be markdown * Updated readme * Doc fixes * Friendly error message if a user is not associated with a tenant * Ensure schema complience assertion is on in all tests * Whoops, details element is optional in faults * Remove identity (1) stuff and renamed identity2 to identity * Added wadl and xsd contract links * Adjust reletive links in schema * Comment seperators * Init version links * Initial version support * Initial extensions support * Initial update tenant * Make sure we don't delete non-empty tenants * Initial delete tenant * Initial getTenant * Minor updates to tests * Initial implementation of get tenants * added unit tests in test/unit/test\_keystone.py * Initial create tenant * Minor bug when serializing tenant to JSON * Schema update * Whoops forgot 409 in JSON as well! * Whoops missed 409 on create tenant * setup.py fix * Minor fixes * pep-8 cleanup of model * More pep-8 cleanup * Minor fixes * Some pep-8 cleanup * Initial revoke token * Initial support for authenticate * Whoops, bad user data * Initial working validate token * Whoops need to convert datetimes to iso format * Test updates * tokenId should not be a string! * Cleaned up validate token call * Full check admin token with soap ui tests * Some SQL testing scripts * Initial check admin token from db * made identity.py pep8 compliant * Better error handling * Initial full response to authenticate token, still having issues with errors * Stubb for token calls * Initial prototype of default token based auth protocol * Initial deserialization of tenant * Initial deserialization of password credentials * SQL Alchemy additions: Token * SQL Alchemy additions * Whoops pep8 * Output serialization of faults * XML and JSON rendering on tenant/s * Translations of auth to XML and JSON * Sample service.py with sqlalchemy * Fixed relative path issue * sqlalchemy draft * Initial service.py * Cleaned up setup.py * Added collections * Initial atom link type * Initial fault type * Initial tenant type * PEP-8 for echo.py * Initial auth types * Readme update * Fixed identity.py and some styling * Minor updates * Keystone WSGI and eventlet * Corrected how to run echo service * Replaced paster with eventlet for echo service * Added create tables in README and modified keystone.db to reflect the new schema * Merged identity functions second time * Sync * Whoops should have never checked this in * all management files except user add and delete from group * Management files except for add/delete user from group * Updated README * Setup PasteDeploy and configured PAPIAuth * reorganization of files * Add SOAPUI projects * Resolved Conflicts * Removed Conflicts * dos2unix * Deleted IDE files * Importing from DevTeam * Import from DevTeam * updates DevTeam * Code by Dev Team * Added Power API Auth Middleware * removed unused libraries * Dev Team: validate\_token , create\_user ( created for test purpose) and update\_tenant * Added to README * Fixed bug in echo.py * Whoops forgot auth header * Instructions for soapUI * Add WADL links for convenience * Initial work into paste deploy...commen out for now * Added echo.wadl * Fixed for case with missing accept header * Added content nagotiation * Use XSL to convert * Better quote handling * Add JSON transform * Whoops samples don't match * XSD for echo service * Initial echo service * Updates to identity.py and README * Added X-Auth-Token * Added extensions * Updated errors for extension requests * Added getTenant, updateTenant, deleteTenant * Added get and create tenants * Initial WADL with token operations * Added faults * Remove refrences to usernameConflict and groupConflict * Added common extensions * Added api.xsd schema index * Added XSD 1.1 and atom linking support * Made the tenant xsd extensible * Initial tenant xsd * Made the token schema extensible * Initial token schema * Groups should have ids instead of names? * Added Creating Tenants, JSON only * Remove mention of service catalog * Updated samples * Updated pubdate * Updates to intro section * Updated concepts * Better entities in document * Removed init section from docs, we'll get to them later * Added Dependencies section * Added License & Create/Delete user management CLI * Initial docs import * Created DB with users table, simple schema * first commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/HACKING.rst0000664000175000017500000000315500000000000015265 0ustar00zuulzuul00000000000000Keystone Style Commandments =========================== - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Keystone Specific Commandments ------------------------------ - Avoid using "double quotes" where you can reasonably use 'single quotes' TODO vs FIXME ------------- - TODO(name): implies that something should be done (cleanup, refactoring, etc), but is expected to be functional. - FIXME(name): implies that the method/function/etc shouldn't be used until that code is resolved and bug fixed. Logging ------- Use the common logging module, and ensure you ``getLogger``:: from oslo_log import log LOG = log.getLogger(__name__) LOG.debug('Foobar') AssertEqual argument order -------------------------- assertEqual method's arguments should be in ('expected', 'actual') order. Properly Calling Callables -------------------------- Methods, functions and classes can specify optional parameters (with default values) using Python's keyword arg syntax. When providing a value to such a callable we prefer that the call also uses keyword arg syntax. For example:: def f(required, optional=None): pass # GOOD f(0, optional=True) # BAD f(0, True) This gives us the flexibility to re-order arguments and more importantly to add new required arguments. It's also more explicit and easier to read. Testing ------- keystone uses testtools and stestr for its unittest suite and its test runner. If you'd like to learn more in depth: https://testtools.readthedocs.io/en/latest/ https://stestr.readthedocs.io/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/LICENSE0000664000175000017500000002363700000000000014503 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6221123 keystone-26.0.0/PKG-INFO0000664000175000017500000000620400000000000014562 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: keystone Version: 26.0.0 Summary: OpenStack Identity Home-page: https://docs.openstack.org/keystone/latest Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ================== OpenStack Keystone ================== .. image:: https://governance.openstack.org/tc/badges/keystone.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on OpenStack Keystone provides authentication, authorization and service discovery mechanisms via HTTP primarily for use by projects in the OpenStack family. It is most commonly deployed as an HTTP interface to existing identity systems, such as LDAP. Developer documentation, the source of which is in ``doc/source/``, is published at: https://docs.openstack.org/keystone/latest The API reference and documentation are available at: https://docs.openstack.org/api-ref/identity The canonical client library is available at: https://opendev.org/openstack/python-keystoneclient Documentation for cloud administrators is available at: https://docs.openstack.org/ The source of documentation for cloud administrators is available at: https://opendev.org/openstack/openstack-manuals Information about our team meeting is available at: https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting Release notes is available at: https://docs.openstack.org/releasenotes/keystone Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/keystone Future design work is tracked at: https://specs.openstack.org/openstack/keystone-specs Contributors are encouraged to join IRC (``#openstack-keystone`` on OFTC): https://wiki.openstack.org/wiki/IRC Source for the project: https://opendev.org/openstack/keystone For information on contributing to Keystone, see ``CONTRIBUTING.rst``. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 Provides-Extra: ldap Provides-Extra: memcache Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/README.rst0000664000175000017500000000323400000000000015154 0ustar00zuulzuul00000000000000================== OpenStack Keystone ================== .. image:: https://governance.openstack.org/tc/badges/keystone.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on OpenStack Keystone provides authentication, authorization and service discovery mechanisms via HTTP primarily for use by projects in the OpenStack family. It is most commonly deployed as an HTTP interface to existing identity systems, such as LDAP. Developer documentation, the source of which is in ``doc/source/``, is published at: https://docs.openstack.org/keystone/latest The API reference and documentation are available at: https://docs.openstack.org/api-ref/identity The canonical client library is available at: https://opendev.org/openstack/python-keystoneclient Documentation for cloud administrators is available at: https://docs.openstack.org/ The source of documentation for cloud administrators is available at: https://opendev.org/openstack/openstack-manuals Information about our team meeting is available at: https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting Release notes is available at: https://docs.openstack.org/releasenotes/keystone Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/keystone Future design work is tracked at: https://specs.openstack.org/openstack/keystone-specs Contributors are encouraged to join IRC (``#openstack-keystone`` on OFTC): https://wiki.openstack.org/wiki/IRC Source for the project: https://opendev.org/openstack/keystone For information on contributing to Keystone, see ``CONTRIBUTING.rst``. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/api-ref/0000775000175000017500000000000000000000000015006 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.434115 keystone-26.0.0/api-ref/source/0000775000175000017500000000000000000000000016306 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/conf.py0000664000175000017500000001433300000000000017611 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # keystone documentation build configuration file, created by # sphinx-quickstart on Mon May 23 07:54:13 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. html_theme = 'openstackdocs' html_theme_options = { "sidebar_dropdown": "api_ref", "sidebar_mode": "toc", } extensions = [ 'os_api_ref', 'openstackdocstheme', ] # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones.[] # Add any paths that contain templates here, relative to this directory. # templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2010-present, OpenStack Foundation' # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_repo_name = 'openstack/keystone' openstackdocs_bug_project = 'keystone' openstackdocs_bug_tag = 'api-ref' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'keystonedoc' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/index.rst0000664000175000017500000000035600000000000020153 0ustar00zuulzuul00000000000000Welcome to keystone's documentation! ==================================== Contents: .. toctree:: :maxdepth: 2 v2-ext/index v3/index v3-ext/index Indices and tables ================== * :ref:`genindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.434115 keystone-26.0.0/api-ref/source/v2-ext/0000775000175000017500000000000000000000000017433 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/index.rst0000664000175000017500000000030500000000000021272 0ustar00zuulzuul00000000000000:tocdepth: 3 ------------------------------------------- Identity API v2.0 extensions (DEPRECATED) ------------------------------------------- .. rest_expand_all:: .. include:: ksec2-admin.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/ksec2-admin.inc0000664000175000017500000000550400000000000022227 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================== OS-KSEC2 admin extension ======================== Supports Amazon Elastic Compute (EC2) style authentication. EC2 Authentication ================== .. rest_method:: POST /v2.0/ec2tokens Authenticate for token. Request ------- Example ~~~~~~~ .. literalinclude:: samples/OS-KSEC2/authenticate-request.json :language: javascript Parameters ~~~~~~~~~~ Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 Example ~~~~~~~ .. literalinclude:: samples/OS-KSEC2/authenticate-response.json :language: javascript Grant credential to user ======================== .. rest_method:: POST /v2.0/users/{userId}/credentials/OS-EC2 Grants a credential to a user. Request ------- .. rest_parameters:: parameters.yaml - userId: user_id_path Example ~~~~~~~ .. literalinclude:: samples/OS-KSEC2/ec2Credentials-create-request.json :language: javascript Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 List credentials (EC2 extension) ================================ .. rest_method:: GET /v2.0/users/{userId}/credentials/OS-EC2 Lists credentials. Request ------- .. rest_parameters:: parameters.yaml - userId: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: samples/OS-KSEC2/credentialswithec2-list-response.json :language: javascript Delete user credentials ======================= .. rest_method:: DELETE /v2.0/users/{userId}/credentials/OS-EC2/{credentialId} Deletes user credentials. Request ------- .. rest_parameters:: parameters.yaml - userId: user_id_path - credentialId: credential_id Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Get user credentials ==================== .. rest_method:: GET /v2.0/users/{userId}/credentials/OS-EC2/{credentialId} Gets user credentials. Request ------- .. rest_parameters:: parameters.yaml - userId: user_id_path - credentialId: credential_id Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: samples/OS-KSEC2/ec2Credentials-show-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/parameters.yaml0000664000175000017500000000356400000000000022472 0ustar00zuulzuul00000000000000# variables in path credential_id: description: | The credential id. in: path required: true type: string credential_type: description: | The credential type. in: path required: true type: string user_id_path: description: | The user ID. in: path required: true type: string # variables in query # variables in body role_description: description: | The role description. in: body required: true type: string role_id: description: | The role ID. in: body required: true type: integer role_links: description: | Role links. in: body required: true type: object role_name: description: | The role name. in: body required: true type: string roles: description: | A ``roles`` object. in: body required: true type: string service_description: description: | Description about the service. in: body required: true type: string service_id: description: | The ID of the service. in: body required: true type: string service_name: description: | The service name. in: body required: true type: string service_type: description: | The type of the service. in: body required: true type: string tenant: description: | The tenant object. in: body required: true type: object user: description: | The ``user`` object. in: body required: true type: string user_email: description: | The user email. in: body required: true type: string user_name: description: | The user in-real-life name. in: body required: true type: string user_new_password: description: | User's new password in: body required: true type: string user_original_password: description: | User's old password in: body required: true type: string users: description: | The ``users`` object. in: body required: true type: array ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/api-ref/source/v2-ext/samples/0000775000175000017500000000000000000000000021077 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.434115 keystone-26.0.0/api-ref/source/v2-ext/samples/OS-KSEC2/0000775000175000017500000000000000000000000022225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/samples/OS-KSEC2/authenticate-request.json0000664000175000017500000000072700000000000027272 0ustar00zuulzuul00000000000000{ "credentials": { "access": "8cff51dc66594df4a2ae121f796df36c", "host": "localhost", "params": { "Action": "Test", "SignatureMethod": "HmacSHA256", "SignatureVersion": "2", "Timestamp": "2007-01-31T23:59:59Z" }, "path": "/", "secret": "df8daeaa981b40cea1217fead123bc64", "signature": "Fra2UBKKtqy3GQ0mj+JqzR8GTGsbWQW+yN5Nih9ThfI=", "verb": "GET" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/samples/OS-KSEC2/authenticate-response.json0000664000175000017500000000206700000000000027437 0ustar00zuulzuul00000000000000{ "access": { "metadata": { "is_admin": 0, "roles": ["9fe2ff9ee4384b1894a90878d3e92bab"] }, "serviceCatalog": [], "token": { "audit_ids": ["AVUi_tN8SFWnHYaYpCcMEQ"], "expires": "2018-02-02T21:13:19.000000Z", "id": "gAAAAABadMZfZa_PZNOSi5iQoqPZ1b-VIo2Gnlf4Z_oJotw8qTh1Yv8_CFysOnoxRIrfFI-EcrErdJM2CrP7o6aDNR5AbfnFtSN_zgzzesEWaOaQkoLpk9X1lLada2KcQpWAeNafjNYSiP7JFjW6N4ngAm9U7egUW6MwUPPxi5e8igR5DtNc0FU", "issued_at": "2018-02-02T20:13:19.000000Z", "tenant": { "description": "description", "enabled": true, "id": "6b85a6ff4e0b4040a81708d6e063e4e7", "name": "BAR", "tags": [] } }, "user": { "id": "b245550742cf4c2fb9cf37aa1eda866e", "name": "foo", "roles": [ {"name": "_member_"} ], "roles_links": [], "username": "foo" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/samples/OS-KSEC2/credentials-show-response.json0000664000175000017500000000032700000000000030231 0ustar00zuulzuul00000000000000{ "credentials": [ { "passwordCredentials": { "username": "test_user", "password": "secretsecret" } } ], "credentials_links": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/samples/OS-KSEC2/credentialswithec2-list-response.json0000664000175000017500000000063100000000000031510 0ustar00zuulzuul00000000000000{ "credentials": [ { "passwordCredentials": { "username": "test_user", "password": "secretsecret" } }, { "OS-KSEC2-ec2Credentials": { "username": "test_user", "secret": "secretsecret", "signature": "bbb" } } ], "credentials_links": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/samples/OS-KSEC2/ec2Credentials-create-request.json0000664000175000017500000000021100000000000030670 0ustar00zuulzuul00000000000000{ "OS-KSEC2-ec2Credentials": { "username": "test_user", "secret": "secretsecret", "signature": "bbb" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v2-ext/samples/OS-KSEC2/ec2Credentials-show-response.json0000664000175000017500000000021100000000000030553 0ustar00zuulzuul00000000000000{ "OS-KSEC2-ec2Credentials": { "username": "test_user", "secret": "secretsecret", "signature": "bbb" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.438115 keystone-26.0.0/api-ref/source/v3/0000775000175000017500000000000000000000000016636 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/application-credentials.inc0000664000175000017500000003245600000000000024141 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================= Application Credentials ======================= Application credentials provide a way to delegate a user's authorization to an application without sharing the user's password authentication. This is a useful security measure, especially for situations where the user's identification is provided by an external source, such as LDAP or a single-sign-on service. Instead of storing user passwords in config files, a user creates an application credential for a specific project, with all or a subset of the role assignments they have on that project, and then stores the application credential identifier and secret in the config file. Multiple application credentials may be active at once, so you can easily rotate application credentials by creating a second one, converting your applications to use it one by one, and finally deleting the first one. Application credentials are limited by the lifespan of the user that created them. If the user is deleted, disabled, or loses a role assignment on a project, the application credential is deleted. Application credentials can have their privileges limited in two ways. First, the owner may specify a subset of their own roles that the application credential may assume when getting a token for a project. For example, if a user has the ``member`` role on a project, they also have the implied role ``reader`` and can grant the application credential only the ``reader`` role for the project: :: "roles": [ {"name": "reader"} ] Users also have the option of delegating more fine-grained access control to their application credentials by using access rules. For example, to create an application credential that is constricted to creating servers in nova, the user can add the following access rules: :: "access_rules": [ { "path": "/v2.1/servers", "method": "POST", "service": "compute" } ] The ``"path"`` attribute of application credential access rules uses a wildcard syntax to make it more flexible. For example, to create an application credential that is constricted to listing server IP addresses, you could use either of the following access rules: :: "access_rules": [ { "path": "/v2.1/servers/*/ips", "method": "GET", "service": "compute" } ] or equivalently: :: "access_rules": [ { "path": "/v2.1/servers/{server_id}/ips", "method": "GET", "service": "compute" } ] In both cases, a request path containing any server ID will match the access rule. For even more flexibility, the recursive wildcard ``**`` indicates that request paths containing any number of ``/`` will be matched. For example: :: "access_rules": [ { "path": "/v2.1/**", "method": "GET", "service": "compute" } ] will match any nova API for version 2.1. An access rule created for one application credential can be re-used by providing its ID to another application credential, for example: :: "access_rules": [ { "id": "abcdef" } ] Authenticating with an Application Credential ============================================= .. rest_method:: POST /v3/auth/tokens To authenticate with an application credential, specify "application_credential" as the auth method. You are not allowed to request a scope, as the scope is retrieved from the application credential. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - identity: identity - methods: auth_methods_application_credential - application_credential: request_application_credential_body_required - id: request_application_credential_auth_id_body_not_required - name: request_application_credential_auth_name_body_not_required - secret: request_application_credential_auth_secret_body_required - user: request_application_credential_user_body_not_required Example ~~~~~~~ An application credential can be identified by an ID: .. literalinclude:: samples/admin/auth-application-credential-id-request.json :language: javascript It can also be identified by its name and a user object: .. literalinclude:: samples/admin/auth-application-credential-name-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token - token: token - application_credential: auth_application_credential_body - application_credential.id: response_application_credential_id_body - application_credential.name: response_application_credential_name_body - application_credential.restricted: response_application_credential_unrestricted_body - audit_ids: audit_ids - catalog: catalog - expires_at: expires_at - issued_at: issued_at - methods: auth_methods - project: project - roles: roles - user: user - user.id: user_id - user.name: user_name Example ~~~~~~~ .. literalinclude:: samples/admin/auth-application-credential-response.json :language: javascript A token created with an application credential will have the scope and roles designated by the application credential. Create application credential ============================= .. rest_method:: POST /v3/users/{user_id}/application_credentials Creates an application credential for a user on the project to which the current token is scoped. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/application_credentials`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: request_application_credential_user_id_path_required - application_credential: request_application_credential_body_required - name: request_application_credential_name_body_required - secret: request_application_credential_secret_body_not_required - description: request_application_credential_description_body_not_required - expires_at: request_application_credential_expires_at_body_not_required - roles: request_application_credential_roles_body_not_required - unrestricted: request_application_credential_unrestricted_body_not_required - access_rules: request_application_credential_access_rules_body_not_required Example ~~~~~~~ .. literalinclude:: samples/admin/application-credential-create-request.json :language: javascript Response -------- .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - application_credential: response_application_credential_body - id: response_application_credential_id_body - name: response_application_credential_name_body - secret: response_application_credential_secret_body - description: response_application_credential_description_body - expires_at: response_application_credential_expires_at_body - project_id: response_application_credential_project_id_body - roles: response_application_credential_roles_body - access_rules: response_application_credential_access_rules_body - unrestricted: response_application_credential_unrestricted_body - links: link_response_body Example ~~~~~~~ .. literalinclude:: samples/admin/application-credential-create-response.json :language: javascript List application credentials ============================= .. rest_method:: GET /v3/users/{user_id}/application_credentials List all application credentials for a user. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/application_credentials`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: request_application_credential_user_id_path_required - name: request_application_credential_name_query_not_required Response -------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - application_credential: response_application_credential_body - id: response_application_credential_id_body - name: response_application_credential_name_body - description: response_application_credential_description_body - expires_at: response_application_credential_expires_at_body - project_id: response_application_credential_project_id_body - roles: response_application_credential_roles_body - access_rules: response_application_credential_access_rules_body - unrestricted: response_application_credential_unrestricted_body - links: link_collection Example ~~~~~~~ .. literalinclude:: samples/admin/application-credential-list-response.json :language: javascript Show application credential details =================================== .. rest_method:: GET /v3/users/{user_id}/application_credentials/{application_credential_id} Show details of an application credential. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/application_credentials`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: request_application_credential_user_id_path_required - application_credential_id: request_application_credential_id_path_required Response -------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - application_credential: response_application_credential_body - id: response_application_credential_id_body - name: response_application_credential_name_body - description: response_application_credential_description_body - expires_at: response_application_credential_expires_at_body - project_id: response_application_credential_project_id_body - roles: response_application_credential_roles_body - access_rules: response_application_credential_access_rules_body - unrestricted: response_application_credential_unrestricted_body - links: link_response_body Example ~~~~~~~ .. literalinclude:: samples/admin/application-credential-get-response.json :language: javascript Delete application credential ============================= .. rest_method:: DELETE /v3/users/{user_id}/application_credentials/{application_credential_id} Delete an application credential. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/application_credentials`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: request_application_credential_user_id_path_required - application_credential_id: request_application_credential_id_path_required Response -------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 List access rules ================= .. rest_method:: GET /v3/users/{user_id}/access_rules List all access rules for a user. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/access_rules`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: request_access_rule_user_id_path_required Response -------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - access_rules: response_access_rules_body - id: response_access_rules_id_body - path: response_access_rules_path_body - method: response_access_rules_method_body - service: response_access_rules_service_body - links: link_collection Example ~~~~~~~ .. literalinclude:: samples/admin/access-rules-list-response.json :language: javascript Show access rule details ======================== .. rest_method:: GET /v3/users/{user_id}/access_rules/{access_rule_id} Show details of an access rule. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/access_rules`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: request_access_rule_user_id_path_required - access_rule_id: request_access_rule_id_path_required Response -------- .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - access_rules: response_access_rules_body - id: response_access_rules_id_body - path: response_access_rules_path_body - method: response_access_rules_method_body - service: response_access_rules_service_body - links: link_collection Example ~~~~~~~ .. literalinclude:: samples/admin/access-rule-get-response.json :language: javascript Delete access rule ================== .. rest_method:: DELETE /v3/users/{user_id}/access_rules/{access_rule_id} Delete an access rule. An access rule that is still in use by an application credential cannot be deleted. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/access_rules`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: request_access_rule_user_id_path_required - access_rule_id: request_access_rule_id_path_required Response -------- .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/authenticate-v3.inc0000664000175000017500000007212600000000000022345 0ustar00zuulzuul00000000000000.. -*- rst -*- ===================================== Authentication and token management ===================================== The Identity service generates tokens in exchange for authentication credentials. A token represents the authenticated identity of a user and, optionally, grants authorization on a specific project, domain, or the deployment system. The body of an authentication request must include a payload that specifies the authentication methods, which are normally just ``password`` or ``token``, the credentials, and, optionally, the authorization scope. You can scope a token to a project, domain, the deployment system, or the token can be unscoped. You cannot scope a token to multiple scope targets. Tokens have IDs, which the Identity API returns in the ``X-Subject-Token`` response header. In the case of multi-factor authentication (MFA) more than one authentication method needs to be supplied to authenticate. As of v3.12 a failure due to MFA rules only partially being met will result in an auth receipt ID being returned in the response header ``Openstack-Auth-Receipt``, and a response body that details the receipt itself and the missing authentication methods. Supplying the auth receipt ID in the ``Openstack-Auth-Receipt`` header in a follow-up authentication request, with the missing authentication methods, will result in a valid token by reusing the successful methods from the first request. This allows MFA authentication to be a multi-step process. After you obtain an authentication token, you can: - Make REST API requests to other OpenStack services. You supply the ID of your authentication token in the ``X-Auth-Token`` request header. - Validate your authentication token and list the domains, projects, roles, and endpoints that your token gives you access to. - Use your token to request another token scoped for a different domain and project. - Force the immediate revocation of a token. - List revoked public key infrastructure (PKI) tokens. In v3.7 of the Identity API service, two new configuration options were added: ``[resource] admin_project_name`` and ``[resource] admin_project_domain_name``. The options represent the project that only the cloud administrator should be able to access. When an authentication request for a token scoped to the admin project is processed, it will have an additional field in the token ``{is_admin_project: True}``. The additional field can be used when writing policy rules that evaluate access control to APIs. Alternatively, in v3.10 the Identity API service introduced the concept of system role assignments and system-scoped tokens. APIs that affect the deployment system require system-scoped tokens. The Identity API considers expired tokens as invalid, which is determined by the deployment's configuration. These authentication errors can occur: **Authentication errors** +------------------------+----------------------------------------------------------------------+ | Response code | Description | +------------------------+----------------------------------------------------------------------+ | ``Bad Request (400)`` | The Identity service failed to parse the request as expected. One | | | of the following errors occurred: | | | | | | - A required attribute was missing. | | | | | | - An attribute that is not allowed was specified, such as an ID on a | | | POST request in a basic CRUD operation. | | | | | | - An attribute of an unexpected data type was specified. | +------------------------+----------------------------------------------------------------------+ | ``Unauthorized (401)`` | One of the following errors occurred: | | | | | | - Authentication was not performed. | | | | | | - The specified ``X-Auth-Token`` header is not valid. | | | | | | - The authentication credentials are not valid. | | | | | | - Not all MFA rules were satisfied. | | | | | | - The specified ``Openstack-Auth-Receipt`` header is not valid. | +------------------------+----------------------------------------------------------------------+ | ``Forbidden (403)`` | The identity was successfully authenticated but it is not | | | authorized to perform the requested action. | +------------------------+----------------------------------------------------------------------+ | ``Not Found (404)`` | An operation failed because a referenced entity cannot be found by | | | ID. For a POST request, the referenced entity might be specified in | | | the request body rather than in the resource path. | +------------------------+----------------------------------------------------------------------+ | ``Conflict (409)`` | A POST or PATCH operation failed. For example, a client tried to | | | update a unique attribute for an entity, which conflicts with that | | | of another entity in the same collection. | | | | | | Or, a client issued a create operation twice on a collection with a | | | user-defined, unique attribute. For example, a client made a POST | | | ``/users`` request two times for the unique, user-defined name | | | attribute for a user entity. | +------------------------+----------------------------------------------------------------------+ Password authentication with unscoped authorization =================================================== .. rest_method:: POST /v3/auth/tokens Authenticates an identity and generates a token. Uses the password authentication method. Authorization is unscoped. The request body must include a payload that specifies the authentication method, which is ``password``, and the user, by ID or name, and password credentials. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - nocatalog: request_nocatalog_unscoped_path_not_required - domain: domain - name: user_name - auth: auth - user: user - password: password - id: user_id - identity: identity - methods: auth_methods_passwd Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-password-unscoped-request-with-domain.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token - domain: domain - methods: auth_methods_passwd - expires_at: expires_at - token: token - user: user - audit_ids: audit_ids - issued_at: issued_at - id: user_id - name: user_name Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-password-unscoped-response.json :language: javascript Password authentication with scoped authorization ================================================= .. rest_method:: POST /v3/auth/tokens Authenticates an identity and generates a token. Uses the password authentication method and scopes authorization to a project, domain, or the system. The request body must include a payload that specifies the ``password`` authentication method which includes the credentials in addition to a ``project``, ``domain``, or ``system`` authorization scope. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - nocatalog: nocatalog - name: user_name - auth: auth - user: user - scope: scope_string - password: password - id: user_id - identity: identity - methods: auth_methods_passwd System-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/system-password.json :language: javascript Domain-Scoped with Domain ID Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/domain-id-password.json :language: javascript Domain-Scoped with Domain Name Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/domain-name-password.json :language: javascript Project-Scoped with Project ID Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/project-id-password.json :language: javascript Project-Scoped with Project Name Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/project-name-password.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token - region_id: region_id_required - methods: auth_methods_passwd - roles: roles - url: endpoint_url - region: endpoint_region - token: token - expires_at: expires_at - system: system_scope_response_body_optional - domain: domain_scope_response_body_optional - project: project_scope_response_body_optional - issued_at: issued_at - catalog: catalog - user: user - audit_ids: audit_ids - interface: endpoint_interface - endpoints: endpoints - type: endpoint_type - id: user_id - name: user_name Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 System-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/system-scoped-password.json :language: javascript Domain-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/domain-scoped-password.json :language: javascript Project-Scoped Example ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/project-scoped-password.json :language: javascript Password authentication with explicit unscoped authorization ============================================================ .. rest_method:: POST /v3/auth/tokens Authenticates an identity and generates a token. Uses the password authentication method with explicit unscoped authorization. The request body must include a payload that specifies the ``password`` authentication method, the credentials, and the ``unscoped`` authorization scope. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - nocatalog: request_nocatalog_unscoped_path_not_required - name: user_name - auth: auth - user: user - scope: explicit_unscoped_string - password: password - id: user_id - identity: identity - methods: auth_methods_passwd Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-password-explicit-unscoped-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token - domain: domain - methods: auth_methods_passwd - roles: roles - expires_at: expires_at - token: token - user: user - audit_ids: audit_ids - issued_at: issued_at - id: user_id - name: user_name Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-password-explicit-unscoped-response.json :language: javascript Token authentication with unscoped authorization ================================================ .. rest_method:: POST /v3/auth/tokens Authenticates an identity and generates a token. Uses the token authentication method. Authorization is unscoped. In the request body, provide the token ID. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - nocatalog: request_nocatalog_unscoped_path_not_required - identity: identity - token: auth_token - id: auth_token_id - auth: auth - methods: auth_methods_token Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-token-unscoped-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-token-unscoped-response.json :language: javascript Token authentication with scoped authorization ============================================== .. rest_method:: POST /v3/auth/tokens Authenticates an identity and generates a token. Uses the token authentication method and scopes authorization to a project, domain, or the system. The request body must include a payload that specifies the ``token`` authentication method which includes the token in addition to a ``project``, ``domain``, or ``system`` authorization scope. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - nocatalog: nocatalog - methods: auth_methods_token - auth: auth - token: auth_token - audit_ids: audit_ids - scope: scope_string - id: auth_token_id - identity: identity System-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/system-token.json :language: javascript Domain-Scoped with Domain ID Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/domain-id-token.json :language: javascript Domain-Scoped with Domain Name Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/domain-name-token.json :language: javascript Project-Scoped with Project ID Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/project-id-token.json :language: javascript Project-Scoped with Project Name Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/requests/project-name-token.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token - region_id: region_id_required - methods: auth_methods_passwd - roles: roles - url: endpoint_url - region: endpoint_region - token: token - expires_at: expires_at - system: system_scope_response_body_optional - domain: domain_scope_response_body_optional - project: project_scope_response_body_optional - issued_at: issued_at - catalog: catalog - user: user - audit_ids: audit_ids - interface: endpoint_interface - endpoints: endpoints - type: endpoint_type - id: user_id - name: user_name Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 System-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/system-scoped-token.json :language: javascript Domain-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/domain-scoped-token.json :language: javascript Project-Scoped Example ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/project-scoped-token.json :language: javascript Token authentication with explicit unscoped authorization ========================================================= .. rest_method:: POST /v3/auth/tokens Authenticates an identity and generates a token. Uses the token authentication method with explicit unscoped authorization. In the request body, provide the token ID and the ``unscoped`` authorization scope. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - nocatalog: request_nocatalog_unscoped_path_not_required - methods: auth_methods_token - auth: auth - token: auth_token - audit_ids: audit_ids - scope: explicit_unscoped_string - id: auth_token_id - identity: identity Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-token-explicit-unscoped-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/auth-token-unscoped-response.json :language: javascript Multi-Step authentication (2-Factor Password and TOTP example) ============================================================== .. rest_method:: POST /v3/auth/tokens Authenticates an identity and generates a token. Uses the password authentication method, then the totp method, with an auth receipt in between. This assumes that MFA has been enabled for the user, and a rule has been defined requiring authentication with both password and totp. The first request body must at least include a payload that specifies one of ``password`` or ``totp`` authentication methods which includes the credentials in addition to an optional scope. If only one method is supplied then an auth receipt will be returned. Scope is not retained in the receipt and must be resupplied in subsequent requests. While it is very possible to supply all the required auth methods at once, this example shows the multi-step process which is likely to be more common. More than 2 factors can be used but the same process applies to those as well; either all auth methods are supplied at once, or in steps with one or more auth receipts in between. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` First Request ------------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - nocatalog: nocatalog - name: user_name - auth: auth - user: user - scope: scope_string - password: password - id: user_id - identity: identity - methods: auth_methods_passwd Example ~~~~~~~ .. literalinclude:: ./samples/auth/requests/project-id-password.json :language: javascript Response -------- Here we are expecting a 401 status, and a returned auth receipt. Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - Openstack-Auth-Receipt: Openstack-Auth-Receipt - methods: auth_methods_receipt - expires_at: receipt_expires_at - issued_at: receipt_issued_at - user: user - required_auth_methods: required_auth_methods Status Code ~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 401: auth_receipt .. rest_status_code:: error status.yaml - 400 - 401: auth_failed - 403 - 404 Auth Receipt Example ~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/auth-receipt-password.json :language: javascript Second Request -------------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - Openstack-Auth-Receipt: Openstack-Auth-Receipt - nocatalog: nocatalog - name: user_name - auth: auth - user: user - scope: scope_string - totp: totp - id: user_id - identity: identity - methods: auth_methods_totp Example ~~~~~~~ .. literalinclude:: ./samples/auth/requests/project-id-totp.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token - region_id: region_id_required - methods: auth_methods_passwd - roles: roles - url: endpoint_url - region: endpoint_region - token: token - expires_at: expires_at - system: system_scope_response_body_optional - domain: domain_scope_response_body_optional - project: project_scope_response_body_optional - issued_at: issued_at - catalog: catalog - user: user - audit_ids: audit_ids - interface: endpoint_interface - endpoints: endpoints - type: endpoint_type - id: user_id - name: user_name Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401: auth_receipt_failure - 403 - 404 Project-Scoped Password and TOTP Example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/project-scoped-password-totp.json :language: javascript Validate and show information for token ======================================= .. rest_method:: GET /v3/auth/tokens Validates and shows information for a token, including its expiration date and authorization scope. Pass your own token in the ``X-Auth-Token`` request header. Pass the token that you want to validate in the ``X-Subject-Token`` request header. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Auth-Token: X-Auth-Token - X-Subject-Token: X-Subject-Token - nocatalog: nocatalog - allow_expired: allow_expired Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Subject-Token: X-Subject-Token - methods: auth_methods - links: domain_link_response_body - user: user - token: token - expires_at: expires_at - catalog: catalog_response_body_optional - system: system_scope_response_body_optional - domain: domain_scope_response_body_optional - project: project_scope_response_body_optional - roles: roles - audit_ids: audit_ids - issued_at: issued_at - id: user_id - name: user_name Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Unscoped Example ~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/unscoped-password.json :language: javascript System-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/system-scoped-password.json :language: javascript Domain-Scoped Example ~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/domain-scoped-password.json :language: javascript Project-Scoped Example ~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/auth/responses/project-scoped-password.json :language: javascript Check token =========== .. rest_method:: HEAD /v3/auth/tokens Validates a token. This call is similar to ``GET /auth/tokens`` but no response body is provided even in the ``X-Subject-Token`` header. The Identity API returns the same response as when the subject token was issued by ``POST /auth/tokens`` even if an error occurs because the token is not valid. An HTTP ``204`` response code indicates that the ``X-Subject-Token`` is valid. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Auth-Token: X-Auth-Token - X-Subject-Token: X-Subject-Token - allow_expired: allow_expired Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Revoke token ============ .. rest_method:: DELETE /v3/auth/tokens Revokes a token. This call is similar to the HEAD ``/auth/tokens`` call except that the ``X-Subject-Token`` token is immediately not valid, regardless of the ``expires_at`` attribute value. An additional ``X-Auth-Token`` is not required. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Auth-Token: X-Auth-Token - X-Subject-Token: X-Subject-Token Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Get service catalog =================== .. rest_method:: GET /v3/auth/catalog New in version 3.3 This call returns a service catalog for the X-Auth-Token provided in the request, even if the token does not contain a catalog itself (for example, if it was generated using ?nocatalog). The structure of the catalog object is identical to that contained in a token. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_catalog`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Auth-Token: X-Auth-Token Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoints: endpoints - id: service_id - type: service_type - name: service_name Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/get-service-catalog-response.json :language: javascript Get available project scopes ============================ .. rest_method:: GET /v3/auth/projects New in version 3.3 This call returns the list of projects that are available to be scoped to based on the X-Auth-Token provided in the request. The structure of the response is exactly the same as listing projects for a user. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Auth-Token: X-Auth-Token Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: project_domain_id_response_body - enabled: project_enabled_response_body - id: project_id - links: links_project - name: project_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/get-available-project-scopes-response.json :language: javascript Get available domain scopes =========================== .. rest_method:: GET /v3/auth/domains New in version 3.3 This call returns the list of domains that are available to be scoped to based on the X-Auth-Token provided in the request. The structure is the same as listing domains. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_domains`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Auth-Token: X-Auth-Token Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - description: domain_description_response_body - enabled: domain_enabled_response_body - id: domain_id_response_body - links: domain_link_response_body - name: domain_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/get-available-domain-scopes-response.json :language: javascript Get available system scopes =========================== .. rest_method:: GET /v3/auth/system New in version 3.10 This call returns the list of systems that are available to be scoped to based on the X-Auth-Token provided in the request. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_system`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - X-Auth-Token: X-Auth-Token Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: domain_link_response_body - system: response_body_system_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 400 Example ~~~~~~~ .. literalinclude:: ./samples/admin/get-available-system-scopes-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/credentials.inc0000664000175000017500000001267500000000000021641 0ustar00zuulzuul00000000000000.. -*- rst -*- ============= Credentials ============= In exchange for a set of authentication credentials that the user submits, the Identity service generates and returns a token. A token represents the authenticated identity of a user and, optionally, grants authorization on a specific project or domain. You can list all credentials, and create, show details for, update, and delete a credential. Create credential ================= .. rest_method:: POST /v3/credentials Creates a credential. The following example shows how to create an EC2-style credential. The credential blob is a string that contains a JSON-serialized dictionary with the ``access`` and ``secret`` keys. This format is required when you specify the ``ec2`` type. To specify other credentials, such as ``access_key``, change the type and contents of the data blob. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/credentials`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - credential: credential - project_id: project_id - type: credential_type - blob: credential_blob - user_id: credential_user_id Example ~~~~~~~ .. literalinclude:: ./samples/admin/credential-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - credential: credential - user_id: credential_user_id - links: credential_links - blob: credential_blob - project_id: project_id - type: credential_type - id: credential_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/credential-create-response.json :language: javascript List credentials ================ .. rest_method:: GET /v3/credentials Lists all credentials. Optionally, you can include the ``user_id`` or ``type`` query parameter in the URI to filter the response by a user or credential type. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/credentials`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_query - type: credential_type_not_required Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: credential_user_id - links: credentials_links - blob: credential_blob - credentials: credentials - project_id: project_id - type: credential_type - id: credential_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/credentials-list-response.json :language: javascript Show credential details ======================= .. rest_method:: GET /v3/credentials/{credential_id} Shows details for a credential. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/credential`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - credential_id: credential_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - credential: credential - user_id: credential_user_id - links: credential_links - blob: credential_blob - project_id: project_id - type: credential_type - id: credential_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/credential-show-response.json :language: javascript Update credential ================= .. rest_method:: PATCH /v3/credentials/{credential_id} Updates a credential. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/credential`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - credential_id: credential_id_path - credential: credential - project_id: project_id - type: credential_type_not_required - blob: credential_blob_not_required - user_id: credential_user_id_not_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/credential-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - credential: credential - user_id: credential_user_id - links: credential_links - blob: credential_blob - project_id: project_id - type: credential_type - id: credential_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/credential-update-response.json :language: javascript Delete credential ================= .. rest_method:: DELETE /v3/credentials/{credential_id} Deletes a credential. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/credential`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - credential_id: credential_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/domains-config-v3.inc0000664000175000017500000003431700000000000022564 0ustar00zuulzuul00000000000000.. -*- rst -*- ====================== Domain configuration ====================== You can manage domain-specific configuration options. Domain-specific configuration options are structured within their group objects. The API supports only the ``identity`` and ``ldap`` groups. These groups override the default configuration settings for the storage of users and groups by the Identity server. You can create, update, and delete domain-specific configuration options by using the HTTP PUT , PATCH , and DELETE methods. When updating, it is only necessary to include those options that are being updated. To create an option, use the PUT method. The Identity API does not return options that are considered sensitive, although you can create and update these options. The only option currently considered sensitive is the ``password`` option within the ``ldap`` group. The API enables you to include sensitive options as part of non- sensitive options. For example, you can include the password as part of the ``url`` option. If you try to create or update configuration options for groups other than the ``identity`` or ``ldap`` groups, the ``Forbidden (403)`` response code is returned. For information about how to integrate the Identity service with LDAP, see `Integrate Identity with LDAP `_. Show default configuration settings =================================== .. rest_method:: GET /v3/domains/config/default The default configuration settings for the options that can be overridden can be retrieved. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - config: domain_config - ldap: domain_ldap - url: domain_url - user_tree_dn: domain_user_tree_dn - identity: identity - driver: domain_driver Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-default-response.json :language: javascript Show default configuration for a group ====================================== .. rest_method:: GET /v3/domains/config/{group}/default Reads the default configuration settings for a specific group. The API supports only the ``identity`` and ``ldap`` groups. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group: group_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - ldap: domain_ldap - url: domain_url - user_tree_dn: domain_user_tree_dn - identity: identity - driver: domain_driver Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-default-response.json :language: javascript Show default option for a group =============================== .. rest_method:: GET /v3/domains/config/{group}/{option}/default Reads the default configuration setting for an option within a group. The API supports only the ``identity`` and ``ldap`` groups. For the ``ldap`` group, a valid value is ``url`` or ``user_tree_dn``. For the ``identity`` group, a valid value is ``driver``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group: group_id_path - option: option Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - user_tree_dn: domain_user_tree_dn Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-option-default-response.json :language: javascript Show domain group option configuration ====================================== .. rest_method:: GET /v3/domains/{domain_id}/config/{group}/{option} Shows details for a domain group option configuration. The API supports only the ``identity`` and ``ldap`` groups. For the ``ldap`` group, a valid value is ``url`` or ``user_tree_dn``. For the ``identity`` group, a valid value is ``driver``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group: group_id_path - option: option Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-option-show-response.json :language: javascript Update domain group option configuration ======================================== .. rest_method:: PATCH /v3/domains/{domain_id}/config/{group}/{option} Updates a domain group option configuration. The API supports only the ``identity`` and ``ldap`` groups. For the ``ldap`` group, a valid value is ``url`` or ``user_tree_dn``. For the ``identity`` group, a valid value is ``driver``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group: group_id_path - option: option - url: domain_url - driver: domain_driver - user_tree_dn: domain_user_tree_dn Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-option-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-option-update-response.json :language: javascript Delete domain group option configuration ======================================== .. rest_method:: DELETE /v3/domains/{domain_id}/config/{group}/{option} Deletes a domain group option configuration. The API supports only the ``identity`` and ``ldap`` groups. For the ``ldap`` group, a valid value is ``url`` or ``user_tree_dn``. For the ``identity`` group, a valid value is ``driver``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group: group_id_path - option: option Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Show domain group configuration =============================== .. rest_method:: GET /v3/domains/{domain_id}/config/{group} Shows details for a domain group configuration. The API supports only the ``identity`` and ``ldap`` groups. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group: group_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-show-response.json :language: javascript Update domain group configuration ================================= .. rest_method:: PATCH /v3/domains/{domain_id}/config/{group} Updates a domain group configuration. The API supports only the ``identity`` and ``ldap`` groups. If you try to set configuration options for other groups, this call fails with the ``Forbidden (403)`` response code. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group: group_id_path - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-group-update-response.json :language: javascript Delete domain group configuration ================================= .. rest_method:: DELETE /v3/domains/{domain_id}/config/{group} Deletes a domain group configuration. The API supports only the ``identity`` and ``ldap`` groups. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config_default`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Create domain configuration =========================== .. rest_method:: PUT /v3/domains/{domain_id}/config Creates a domain configuration. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-create-response.json :language: javascript Show domain configuration ========================= .. rest_method:: GET /v3/domains/{domain_id}/config Shows details for a domain configuration. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-show-response.json :language: javascript Update domain configuration =========================== .. rest_method:: PATCH /v3/domains/{domain_id}/config Updates a domain configuration. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - url: domain_url - driver: domain_driver - ldap: domain_ldap - config: domain_config - user_tree_dn: domain_user_tree_dn - identity: identity Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-config-update-response.json :language: javascript Delete domain configuration =========================== .. rest_method:: DELETE /v3/domains/{domain_id}/config Deletes a domain configuration. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_config`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/domains.inc0000664000175000017500000001336300000000000020771 0ustar00zuulzuul00000000000000.. -*- rst -*- ========= Domains ========= A domain is a collection of users, groups, and projects. Each group and project is owned by exactly one domain. Each domain defines a namespace where certain API-visible name attributes exist, which affects whether those names must be globally unique or unique within that domain. In the Identity API, the uniqueness of these attributes is as follows: - *Domain name*. Globally unique across all domains. - *Role name*. Unique within the owning domain. - *User name*. Unique within the owning domain. - *Project name*. Unique within the owning domain. - *Group name*. Unique within the owning domain. List domains ============ .. rest_method:: GET /v3/domains Lists all domains. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domains`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: domain_name_query - enabled: domain_enabled_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domains: domains - description: domain_description_response_body - enabled: domain_enabled_response_body - id: domain_id_response_body - links: domain_link_response_body - name: domain_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domains-list-response.json :language: javascript Create domain ============= .. rest_method:: POST /v3/domains Creates a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domains`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain: domain - explicit_domain_id: request_explicit_domain_id_body_not_required - enabled: domain_enabled_request_body - description: domain_description_request_body - name: domain_name_request_body - options: request_domain_options_body_not_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain: domain - description: domain_description_response_body - enabled: domain_enabled_response_body - id: domain_id_response_body - links: domain_link_response_body - name: domain_name_response_body - options: response_role_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Show domain details =================== .. rest_method:: GET /v3/domains/{domain_id} Shows details for a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domains`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain: domain - description: domain_description_response_body - enabled: domain_enabled_response_body - id: domain_id_response_body - links: domain_link_response_body - name: domain_name_response_body - options: response_role_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-show-response.json :language: javascript Update domain ============= .. rest_method:: PATCH /v3/domains/{domain_id} Updates a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - domain: domain - enabled: domain_enabled_update_request_body - description: domain_description_update_request_body - name: domain_name_update_request_body - options: request_domain_options_body_not_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain: domain - description: domain_description_response_body - enabled: domain_enabled_response_body - id: domain_id_response_body - links: domain_link_response_body - name: domain_name_response_body - options: response_role_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-update-response.json :language: javascript Delete domain ============= .. rest_method:: DELETE /v3/domains/{domain_id} Deletes a domain. To minimize the risk of accidentally deleting a domain, you must first disable the domain by using the update domain method. When you delete a domain, this call also deletes all entities owned by it, such as users, groups, and projects, and any credentials and granted roles that relate to those entities. If you try to delete an enabled domain, this call returns the ``Forbidden (403)`` response code. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/groups.inc0000664000175000017500000001674200000000000020662 0ustar00zuulzuul00000000000000.. -*- rst -*- ======== Groups ======== A group is a collection of users. Each group is owned by a domain. You can use groups to ease the task of managing role assignments for users. Assigning a role to a group on a project or domain is equivalent to assigning the role to each group member on that project or domain. When you unassign a role from a group, that role is automatically unassigned from any user that is a member of the group. Any tokens that authenticates those users to the relevant project or domain are revoked. As with users, a group without any role assignments is useless from the perspective of an OpenStack service and has no access to resources. However, a group without role assignments is permitted as a way of acquiring or loading users and groups from external sources before mapping them to projects and domains. List groups =========== .. rest_method:: GET /v3/groups Lists groups. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/groups`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: group_name_query - domain_id: domain_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_collection - groups: groups - description: group_description_response_body - domain_id: group_domain_id_response_body - id: group_id_response_body - links: link_response_body - name: group_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/groups-list-response.json :language: javascript Create group ============ .. rest_method:: POST /v3/groups Creates a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/groups`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group: group - description: group_description_request_body - domain_id: group_domain_id_request_body - name: group_name_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/group-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group: group - description: group_description_response_body - domain_id: group_domain_id_response_body - id: group_id_response_body - links: link_response_body - name: group_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/group-show-response.json :language: javascript Show group details ================== .. rest_method:: GET /v3/groups/{group_id} Shows details for a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/group`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group: group - description: group_description_response_body - domain_id: group_domain_id_response_body - id: group_id_response_body - links: link_response_body - name: group_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/group-show-response.json :language: javascript Update group ============ .. rest_method:: PATCH /v3/groups/{group_id} Updates a group. If the back-end driver does not support this functionality, the call returns the ``Not Implemented (501)`` response code. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/group`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - group: group - description: group_description_update_request_body - domain_id: group_domain_id_update_request_body - name: group_name_update_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/group-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group: group - description: group_description_response_body - domain_id: group_domain_id_response_body - id: group_id_response_body - links: link_response_body - name: group_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 - 501 Example ~~~~~~~ .. literalinclude:: ./samples/admin/group-update-response.json :language: javascript Delete group ============ .. rest_method:: DELETE /v3/groups/{group_id} Deletes a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/group`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List users in group =================== .. rest_method:: GET /v3/groups/{group_id}/users Lists the users that belong to a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/group_users`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - password_expires_at: password_expires_at_query Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/group-users-list-response.json :language: javascript Add user to group ================= .. rest_method:: PUT /v3/groups/{group_id}/users/{user_id} Adds a user to a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/group_user`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - group_id: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Check whether user belongs to group =================================== .. rest_method:: HEAD /v3/groups/{group_id}/users/{user_id} Validates that a user belongs to a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/group_user`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - group_id: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Remove user from group ====================== .. rest_method:: DELETE /v3/groups/{group_id}/users/{user_id} Removes a user from a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/group_user`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - group_id: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/index.rst0000664000175000017500000002270000000000000020500 0ustar00zuulzuul00000000000000:tocdepth: 3 --------------------------- Identity API v3 (CURRENT) --------------------------- The Identity service generates authentication tokens that permit access to the OpenStack services REST APIs. Clients obtain this token and the URL endpoints for other service APIs by supplying their valid credentials to the authentication service. Each time you make a REST API request to an OpenStack service, you supply your authentication token in the X-Auth-Token request header. Like most OpenStack projects, OpenStack Identity protects its APIs by defining policy rules based on a role-based access control (RBAC) approach. The Identity service configuration file sets the name and location of a JSON policy file that stores these rules. Note that the V3 API implements HEAD for all GET requests. Each HEAD request contains the same headers and HTTP status code as the corresponding GET API. For information about Identity API protection, see `Identity API protection with role-based access control (RBAC) `_ in the OpenStack Cloud Administrator Guide. =================================== What's New in Version 3.14 (Ussuri) =================================== - New attribute ``authorization_ttl`` for identity providers - New attribute ``membership_expires_at`` when listing groups for a user - Ability to persist group memberships carried through mapping for a federated user - Added the ability to create, update and delete federated attributes for a user ================================== What's New in Version 3.13 (Train) ================================== - New parameter access_rules for application credentials - New read-only API /v3/users/{user_id}/access_rules for viewing access rules ================================== What's New in Version 3.12 (Stein) ================================== - New optional multi-factor auth process involving auth receipts ================================== What's New in Version 3.11 (Rocky) ================================== - New endpoint /v3/limits-model for discovering the limit model in effect - New description field in registered and project limits - New project_id filters for project limits - New parameter include_limits for project detail query =================================== What's New in Version 3.10 (Queens) =================================== - Introduction of the Application Credentials API. - Introduction of an experimental Unified Limits API. - Ability to grant system role assignments and obtain system-scoped tokens. ================================== What's New in Version 3.9 (Queens) ================================== - Addition of ``tags`` attribute to project. - New APIs to interact with the ``tags`` attribute. ================================= What's New in Version 3.8 (Ocata) ================================= - Allow a service user to fetch a token that has expired. - Add a ``password_expires_at`` query parameter to user list and users in group list. ================================== What's New in Version 3.7 (Newton) ================================== - Addition of the ``password_expires_at`` field to the user response object. - Introduce a flag to bypass expiration and revocation checking. ================================== What's New in Version 3.6 (Mitaka) ================================== - Listing role assignments for a tree of projects. - Setting the project ``is_domain`` attribute enables a project to behave as a domain. - Addition of the ``is_domain`` field to project scoped token response that represents whether a project is acting as a domain. - Enable or disable a subtree in the project hierarchy. - Delete a subtree in the project hierarchy. - Additional identifier for tokens scoped to the designated ``admin project``. - Addition of ``domain_id`` filter to list user projects - One role can imply another via role_inference rules. - Enhance list role assignment to optionally provide names of entities. - The defaults for domain-specific configuration options can be retrieved. - Assignments can be specified as inherited, causing the assignment to be placed on any sub-projects. - Support for domain specific roles. - Support ``enabled`` and ``id`` as optional attributes to filter identity providers when listing. =================================== What's New in Version 3.5 (Liberty) =================================== - Addition of ``type`` optional attribute to list credentials. - Addition of ``region_id`` optional attribute to list endpoints. - Addition of ``is_domain`` optional attribute to projects. Setting this currently has no effect, it is reserved for future use. ================================ What's New in Version 3.4 (Kilo) ================================ - For tokenless authorization, the scope information may be set in the request headers. - Addition of ``parent_id`` optional attribute to projects. This enables the construction of a hierarchy of projects. - Addition of domain specific configuration management for a domain entity. - Removal of ``url`` optional attribute for ``regions``. This attribute was only used for the experimental phase of keystone-to-keystone federation and has been superseded by making service provider entries have its own entry in the service catalog. - The JSON Home support now will indicate the status of resource if it is not stable and current. ================================ What's New in Version 3.3 (Juno) ================================ These features are considered stable as of September 4th, 2014. - Addition of ``name`` optional variable to be included from service definition into the service catalog. - Introduced a stand alone call to retrieve a service catalog. - Introduced support for JSON Home. - Introduced a standard call to retrieve possible project and domain scope targets for a token. - Addition of ``url`` optional attribute for ``regions``. ==================================== What's New in Version 3.2 (Icehouse) ==================================== These features are considered stable as of January 23, 2014. - Introduced a mechanism to opt-out from catalog information during token validation - Introduced a region resource for constructing a hierarchical container of groups of service endpoints - Inexact filtering is supported on string attributes - Listing collections may indicate only a subset of the data has been provided if a particular deployment has limited the number of entries a query may return ================================== What's New in Version 3.1 (Havana) ================================== These features are considered stable as of July 18, 2013. - A token without an explicit scope of authorization is issued if the user does not specify a project and does not have authorization on the project specified by their default project attribute - Introduced a generalized call for getting role assignments, with filtering for user, group, project, domain and role - Introduced a mechanism to opt-out from catalog information during token creation - Added optional bind information to token structure =================================== What's New in Version 3.0 (Grizzly) =================================== These features are considered stable as of February 20, 2013. - Former "Service" and "Admin" APIs (including CRUD operations previously defined in the v2 OS-KSADM extension) are consolidated into a single core API - "Tenants" are now known as "projects" - "Groups": a container representing a collection of users - "Domains": a high-level container for projects, users and groups - "Policies": a centralized repository for policy engine rule sets - "Credentials": generic credential storage per user (e.g. EC2, PKI, SSH, etc.) - Roles can be granted at either the domain or project level - User, group and project names only have to be unique within their owning domain - Retrieving your list of projects (previously ``GET /tenants``) is now explicitly based on your user ID: ``GET /users/{user_id}/projects`` - Tokens explicitly represent user+project or user+domain pairs - Partial updates are performed using the HTTP ``PATCH`` method - Token ID values no longer appear in URLs ============= Relationships ============= The entries within the operations below contain a relationship link, which appears as a valid URI, however these are actually URN (Uniform Resource Name), which are similar to GUID except it uses a URI syntax so that it is easier to be read. These links do not resolve to anything valid, but exist to show a relationship. ======================= Identity API Operations ======================= This page lists the Identity API operations in the following order: * `Authentication and token management`_ * `Application Credentials`_ * `Credentials`_ * `Domains`_ * `Domain configuration`_ * `Groups`_ * `Policies`_ * `Projects`_ * `Project Tags`_ * `Regions`_ * `Roles`_ * `System Role Assignments`_ * `Service catalog and endpoints`_ * `Unified Limits`_ * `Users`_ * `OS-INHERIT`_ * `OS-PKI (DEPRECATED)`_ .. rest_expand_all:: .. include:: authenticate-v3.inc .. include:: application-credentials.inc .. include:: credentials.inc .. include:: domains.inc .. include:: domains-config-v3.inc .. include:: groups.inc .. include:: inherit.inc .. include:: os-pki.inc .. include:: policies.inc .. include:: projects.inc .. include:: project-tags.inc .. include:: regions-v3.inc .. include:: roles.inc .. include:: system-roles.inc .. include:: service-catalog.inc .. include:: unified_limits.inc .. include:: users.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/inherit.inc0000664000175000017500000003667700000000000021016 0ustar00zuulzuul00000000000000.. -*- rst -*- ================ OS-INHERIT ================ Enables projects to inherit role assignments from either their owning domain or projects that are higher in the hierarchy. (Since API v3.4) The OS-INHERIT extension allows inheritance from both projects and domains. To access project inheritance, the Identity service server must run at least API v3.4. Assign role to user on projects owned by domain =============================================== .. rest_method:: PUT /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects Assigns a role to a user in projects owned by a domain. The inherited role is only applied to the owned projects (both existing and future projects), and will not appear as a role in a domain scoped token. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_user_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - role_id: role_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Assign role to group on projects owned by a domain ================================================== .. rest_method:: PUT /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects The inherited role is only applied to the owned projects (both existing and future projects), and will not appear as a role in a domain scoped token. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_group_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: role_id_path - role_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 List user's inherited project roles on a domain =============================================== .. rest_method:: GET /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/inherited_to_projects The list only contains those role assignments to the domain that were specified as being inherited to projects within that domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_user_roles_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 Example ~~~~~~~ .. literalinclude:: samples/admin/user-roles-domain-list-response.json :language: javascript List group's inherited project roles on domain ============================================== .. rest_method:: GET /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/inherited_to_projects The list only contains those role assignments to the domain that were specified as being inherited to projects within that domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_group_roles_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 Example ------- .. literalinclude:: samples/admin/group-roles-domain-list-response.json :language: javascript Check if user has an inherited project role on domain ===================================================== .. rest_method:: HEAD /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects Checks whether a user has an inherited project role in a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_user_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - role_id: role_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Check if group has an inherited project role on domain ====================================================== .. rest_method:: HEAD /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects Checks whether a group has an inherited project role in a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_group_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Revoke an inherited project role from user on domain ==================================================== .. rest_method:: DELETE /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects Revokes an inherited project role from a user in a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_user_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - role_id: role_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Revoke an inherited project role from group on domain ===================================================== .. rest_method:: DELETE /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects Revokes an inherited project role from a group in a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/domain_group_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Assign role to user on projects in a subtree ============================================ .. rest_method:: PUT /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects The inherited role assignment is anchored to a project and applied to its subtree in the projects hierarchy (both existing and future projects). * Note: The inherited role is not applied to the project itself, and only applied to its subtree projects. * Note: It is possible for a user to have both a regular (non-inherited) and an inherited role assignment on the same project. * Note: The request doesn't require a body, which will be ignored if provided. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/project_user_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id - role_id: role_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Assign role to group on projects in a subtree ============================================= .. rest_method:: PUT /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects The inherited role assignment is anchored to a project and applied to its subtree in the projects hierarchy (both existing and future projects). * Note: The inherited role is not applied to the project itself, and only applied to its subtree projects. * Note: It is possible for a group to have both a regular (non-inherited) and an inherited role assignment on the same project. * Note: The request doesn't require a body, which will be ignored if provided. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/project_group_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - project_id: project_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Check if user has an inherited project role on project ====================================================== .. rest_method:: HEAD /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects Checks whether a user has a role assignment with the ``inherited_to_projects`` flag in a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/project_user_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - role_id: role_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Check if group has an inherited project role on project ======================================================= .. rest_method:: HEAD /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects Checks whether a group has a role assignment with the ``inherited_to_projects`` flag in a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/project_group_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - project_id: project_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Revoke an inherited project role from user on project ===================================================== .. rest_method:: DELETE /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/project_user_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - role_id: role_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 Revoke an inherited project role from group on project ====================================================== .. rest_method:: DELETE /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-INHERIT/1.0/rel/project_group_role_inherited_to_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - project_id: project_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 List role assignments ===================== .. rest_method:: GET /v3/role_assignments Get a list of role assignments. If no query parameters are specified, then this API will return a list of all role assignments. .. literalinclude:: samples/admin/role-assignments-list-response.json :language: javascript Since this list is likely to be very long, this API would typically always be used with one of more of the filter queries. Some typical examples are: ``GET /v3/role_assignments?user.id={user_id}`` would list all role assignments involving the specified user. ``GET /v3/role_assignments?scope.project.id={project_id}`` would list all role assignments involving the specified project. It is also possible to list all role assignments within a tree of projects: ``GET /v3/role_assignments?scope.project.id={project_id}&include_subtree=true`` would list all role assignments involving the specified project and all sub-projects. ``include_subtree=true`` can only be specified in conjunction with ``scope.project.id``, specifiying it without this will result in an HTTP 400 Bad Request being returned. Each role assignment entity in the collection contains a link to the assignment that gave rise to this entity. The scope section in the list response is extended to allow the representation of role assignments that are inherited to projects. .. literalinclude:: samples/admin/role-assignments-list-include-subtree-response.json :language: javascript The query filter ``scope.OS-INHERIT:inherited_to`` can be used to filter based on role assignments that are inherited. The only value of ``scope.OS-INHERIT:inherited_to`` that is currently supported is ``projects``, indicating that this role is inherited to all projects of the owning domain or parent project. If the query parameter ``effective`` is specified, rather than simply returning a list of role assignments that have been made, the API returns a list of effective assignments at the user, project and domain level, having allowed for the effects of group membership, role inference rules as well as inheritance from the parent domain or project. Since the effects of group membership have already been allowed for, the group role assignment entities themselves will not be returned in the collection. Likewise, since the effects of inheritance have already been allowed for, the role assignment entities themselves that specify the inheritance will also not be returned in the collection. This represents the effective role assignments that would be included in a scoped token. The same set of query parameters can also be used in combination with the ``effective`` parameter. For example: ``GET /v3/role_assignments?user.id={user_id}&effective`` would, in other words, answer the question "what can this user actually do?". ``GET /v3/role_assignments?user.id={user_id}&scope.project.id={project_id}&effective`` would return the equivalent set of role assignments that would be included in the token response of a project scoped token. An example response for an API call with the query parameter ``effective`` specified is given below: .. literalinclude:: samples/admin/role-assignments-effective-list-response.json :language: javascript The entity ``links`` section of a response using the ``effective`` query parameter also contains, for entities that are included by virtue of group membership, a url that can be used to access the membership of the group. If the query parameter ``include_names`` is specified, rather than simply returning the entity IDs in the role assignments, the collection will additionally include the names of the entities. For example: ``GET /v3/role_assignments?user.id={user_id}&effective&include_names=true`` would return: .. literalinclude:: samples/admin/role-assignments-effective-list-include-names-response.json :language: javascript Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/role_assignments`` Request ------- Parameters ~~~~~~~~~~ Optional query parameters: .. rest_parameters:: parameters.yaml - effective: effective_query - include_names: include_names_query - include_subtree: include_subtree_query - group.id: group_id_query - role.id: role_id_query - scope.domain.id: scope_domain_id_query - scope.OS-INHERIT:inherited_to: scope_os_inherit_inherited_to - scope.project.id: scope_project_id_query - user.id: user_id_query Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/os-pki.inc0000664000175000017500000000060700000000000020536 0ustar00zuulzuul00000000000000.. -*- rst -*- =================== OS-PKI (DEPRECATED) =================== List revoked tokens =================== .. rest_method:: GET /v3/auth/tokens/OS-PKI/revoked Lists revoked PKI tokens. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/tokens/OS-PKI/revoked`` Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: error status.yaml - 410 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/parameters.yaml0000664000175000017500000016635700000000000021707 0ustar00zuulzuul00000000000000# variables in header Openstack-Auth-Receipt: description: | The auth receipt. A partially successful authentication response returns the auth receipt ID in this header rather than in the response body. in: header required: true type: string X-Auth-Token: description: | A valid authentication token for an administrative user. in: header required: true type: string X-Subject-Token: description: | The authentication token. An authentication response returns the token ID in this header rather than in the response body. in: header required: true type: string # variables in path credential_id_path: description: | The UUID for the credential. in: path required: true type: string domain_id_path: description: | The domain ID. in: path required: true type: string endpoint_id_path: description: | The endpoint ID. in: path required: true type: string group_id: description: | The group ID. in: path required: true type: string group_id_path: description: | The group ID. in: path required: true type: string implies_role_id: description: | Role ID for an implied role. in: path required: true type: string limit_id_path: description: | The limit ID. in: path required: true type: string option: description: | The option name. For the ``ldap`` group, a valid value is ``url`` or ``user_tree_dn``. For the ``identity`` group, a valid value is ``driver``. in: path required: true type: string policy_id_path: description: | The policy ID. in: path required: true type: string prior_role_id: description: | Role ID for a prior role. in: path required: true type: string project_id_path: description: | The project ID. in: path required: true type: string project_tag_path: description: | A simple string associated with a project. Can be used for assigning values to projects and filtering based on those values. in: path required: true type: string region_id_path: description: | The region ID. in: path required: true type: string registered_limit_id_path: description: | The registered limit ID. in: path required: true type: string request_access_rule_id_path_required: description: | The ID of the access rule. in: path required: true type: string request_access_rule_user_id_path_required: description: | The ID of the user who owns the access rule. in: path required: true type: string request_application_credential_id_path_required: description: | The ID of the application credential. in: path required: true type: string request_application_credential_user_id_path_required: description: | The ID of the user who owns the application credential. in: path required: true type: string role_id: description: | The role ID. in: path required: true type: string role_id_path: description: | The role ID. in: path required: true type: string service_id_path: description: | The service ID. in: path required: true type: string user_id_path: description: | The user ID. in: path required: true type: string # variables in query allow_expired: description: | (Since v3.8) Allow fetching a token that has expired. By default expired tokens return a 404 exception. in: query required: false type: bool domain_enabled_query: description: | If set to true, then only domains that are enabled will be returned, if set to false only that are disabled will be returned. Any value other than ``0``, including no value, will be interpreted as true. in: query required: false type: string domain_id_query: description: | Filters the response by a domain ID. in: query required: false type: string domain_name_query: description: | Filters the response by a domain name. in: query required: false type: string effective_query: description: | Returns the effective assignments, including any assignments gained by virtue of group membership. in: query required: false type: key-only (no value required) enabled_user_query: description: | Filters the response by either enabled (``true``) or disabled (``false``) users. in: query required: false type: string group_id_query: description: | Filters the response by a group ID. in: query required: false type: string group_name_query: description: | Filters the response by a group name. in: query required: false type: string idp_id_query: description: | Filters the response by an identity provider ID. in: query required: false type: string include_limits: description: | It should be used together with `parents_as_list` or `subtree_as_list` filter to add the related project's limits into the response body. in: query required: false type: key-only, no value expected include_names_query: description: | If set to true, then the names of any entities returned will be include as well as their IDs. Any value other than ``0`` (including no value) will be interpreted as true. in: query required: false type: boolean min_version: 3.6 include_subtree_query: description: | If set to true, then relevant assignments in the project hierarchy below the project specified in the ``scope.project_id`` query parameter are also included in the response. Any value other than ``0`` (including no value) for ``include_subtree`` will be interpreted as true. in: query required: false type: boolean min_version: 3.6 interface_query: description: | Filters the response by an interface. in: query required: false type: string is_domain_query: description: | If this is specified as true, then only projects acting as a domain are included. Otherwise, only projects that are not acting as a domain are included. in: query required: false type: boolean min_version: 3.6 name_user_query: description: | Filters the response by a user name. in: query required: false type: string nocatalog: description: | (Since v3.1) The authentication response excludes the service catalog. By default, the response includes the service catalog. in: query required: false type: string parent_id_query: description: | Filters the response by a parent ID. in: query required: false type: string min_version: 3.4 parent_region_id_query_not_required: description: | Filters the response by a parent region, by ID. in: query required: false type: string parents_as_ids: description: | The entire parent hierarchy will be included as nested dictionaries in the response. It will contain all projects ids found by traversing up the hierarchy to the top-level project. in: query required: false type: key-only, no value expected min_version: 3.4 parents_as_list: description: | The parent hierarchy will be included as a list in the response. This list will contain the projects found by traversing up the hierarchy to the top-level project. The returned list will be filtered against the projects the user has an effective role assignment on. in: query required: false type: key-only, no value expected min_version: 3.4 password_expires_at_query: description: | Filter results based on which user passwords have expired. The query should include an ``operator`` and a ``timestamp`` with a colon (``:``) separating the two, for example:: password_expires_at={operator}:{timestamp} - Valid operators are: ``lt``, ``lte``, ``gt``, ``gte``, ``eq``, and ``neq`` - lt: expiration time lower than the timestamp - lte: expiration time lower than or equal to the timestamp - gt: expiration time higher than the timestamp - gte: expiration time higher than or equal to the timestamp - eq: expiration time equal to the timestamp - neq: expiration time not equal to the timestamp - Valid timestamps are of the form: ``YYYY-MM-DDTHH:mm:ssZ``. For example:: /v3/users?password_expires_at=lt:2016-12-08T22:02:00Z The example would return a list of users whose password expired before the timestamp (``2016-12-08T22:02:00Z``). in: query required: false type: string policy_type_query: description: | Filters the response by a MIME media type for the serialized policy blob. For example, ``application/json``. in: query required: false type: string project_enabled_query: description: | If set to true, then only enabled projects will be returned. Any value other than ``0`` (including no value) will be interpreted as true. in: query required: false type: boolean project_name_query: description: | Filters the response by a project name. in: query required: false type: string protocol_id_query: description: | Filters the response by a protocol ID. in: query required: false type: string region_id_query: description: | Filters the response by a region ID. in: query required: false type: string request_application_credential_name_query_not_required: description: | The name of the application credential. Must be unique to a user. in: query required: false type: string request_nocatalog_unscoped_path_not_required: description: | (Since v3.1) nocatalog only works for scoped token. For unscoped token, the authentication response always excludes the service catalog. in: query required: false type: string request_service_name_query_not_required: description: | Filters the response by a service name. in: query required: false type: string resource_name_query: description: | Filters the response by a specified resource name. in: query required: false type: string role_id_query: description: | Filters the response by a role ID. in: query required: false type: string role_name_query: description: | Filters the response by a role name. in: query required: false type: string scope_domain_id_query: description: | Filters the response by a domain ID. in: query required: false type: string scope_os_inherit_inherited_to: description: | Filters based on role assignments that are inherited. The only value of ``inherited_to`` that is currently supported is ``projects``. in: query required: false type: string scope_project_id_query: description: | Filters the response by a project ID. in: query required: false type: string scope_system_query: description: | Filters the response by system assignments. in: query required: false type: string service_id_query: description: | Filters the response by a service ID. in: query required: false type: string service_type_query: description: | Filters the response by a service type. A valid value is ``compute``, ``ec2``, ``identity``, ``image``, ``network``, or ``volume``. in: query required: false type: string subtree_as_ids: description: | The entire child hierarchy will be included as nested dictionaries in the response. It will contain all the projects ids found by traversing down the hierarchy. in: query required: false type: key-only, no value expected min_version: 3.4 subtree_as_list: description: | The child hierarchy will be included as a list in the response. This list will contain the projects found by traversing down the hierarchy. The returned list will be filtered against the projects the user has an effective role assignment on. in: query required: false type: key-only, no value expected min_version: 3.4 unique_id_query: description: | Filters the response by a unique ID. in: query required: false type: string user_id_query: description: | Filters the response by a user ID. in: query required: false type: string # variables in body audit_ids: description: | A list of one or two audit IDs. An audit ID is a unique, randomly generated, URL-safe string that you can use to track a token. The first audit ID is the current audit ID for the token. The second audit ID is present for only re-scoped tokens and is the audit ID from the token before it was re-scoped. A re- scoped token is one that was exchanged for another token of the same or different scope. You can use these audit IDs to track the use of a token or chain of tokens across multiple requests and endpoints without exposing the token ID to non-privileged users. in: body required: true type: array auth: description: | An ``auth`` object. in: body required: true type: object auth_application_credential_body: description: | Whether the application credential is permitted to be used for creating and deleting additional application credentials and trusts. in: body required: true type: object auth_domain: description: | Specify either ``id`` or ``name`` to uniquely identify the domain. in: body required: false type: object auth_methods: description: | The authentication methods, which are commonly ``password``, ``token``, or other methods. Indicates the accumulated set of authentication methods that were used to obtain the token. For example, if the token was obtained by password authentication, it contains ``password``. Later, if the token is exchanged by using the token authentication method one or more times, the subsequently created tokens contain both ``password`` and ``token`` in their ``methods`` attribute. Unlike multi-factor authentication, the ``methods`` attribute merely indicates the methods that were used to authenticate the user in exchange for a token. The client is responsible for determining the total number of authentication factors. in: body required: true type: array auth_methods_application_credential: description: | The authentication method. To authenticate with an application credential, specify ``application_credential``. in: body required: true type: array auth_methods_passwd: description: | The authentication method. For password authentication, specify ``password``. in: body required: true type: array auth_methods_receipt: description: | The authentication methods, which are commonly ``password``, ``totp``, or other methods. Indicates the accumulated set of authentication methods that were used to obtain the receipt. For example, if the receipt was obtained by password authentication, it contains ``password``. Later, if the receipt is exchanged by using another authentication method one or more times, the subsequently created receipts could contain both ``password`` and ``totp`` in their ``methods`` attribute. in: body required: true type: array auth_methods_token: description: | The authentication method. For token authentication, specify ``token``. in: body required: true type: array auth_methods_totp: description: | The authentication method. For totp authentication, specify ``totp``. in: body required: true type: array auth_token: description: | A ``token`` object. The token authentication method is used. This method is typically used in combination with a request to change authorization scope. in: body required: true type: object auth_token_id: description: | A token ID. in: body required: true type: string catalog: description: | A ``catalog`` object. in: body required: true type: array catalog_response_body_optional: description: | A ``catalog`` object. in: body required: false type: array credential: description: | A ``credential`` object. in: body required: true type: object credential_blob: description: | The credential itself, as a serialized blob. in: body required: true type: string credential_blob_not_required: description: | The credential itself, as a serialized blob. in: body required: false type: string credential_id: description: | The UUID for the credential. in: body required: true type: string credential_links: description: | The links for the ``credential`` resource. in: body required: true type: object credential_type: description: | The credential type, such as ``ec2`` or ``cert``. The implementation determines the list of supported types. in: body required: true type: string credential_type_not_required: description: | The credential type, such as ``ec2`` or ``cert``. The implementation determines the list of supported types. in: body required: false type: string credential_user_id: description: | The ID of the user who owns the credential. in: body required: true type: string credential_user_id_not_required: description: | The ID of the user who owns the credential. in: body required: false type: string credentials: description: | A list of ``credential`` objects. in: body required: true type: array credentials_links: description: | The links for the ``credentials`` resource. in: body required: true type: object default_limit: description: | The default limit for the registered limit. in: body required: true type: integer default_project_id_request_body: description: | The ID of the default project for the user. A user's default project must not be a domain. Setting this attribute does not grant any actual authorization on the project, and is merely provided for convenience. Therefore, the referenced project does not need to exist within the user domain. (Since v3.1) If the user does not have authorization to their default project, the default project is ignored at token creation. (Since v3.1) Additionally, if your default project is not valid, a token is issued without an explicit scope of authorization. in: body required: false type: string default_project_id_response_body: description: | The ID of the default project for the user. in: body required: false type: string default_project_id_update_body: description: | The new ID of the default project for the user. in: body required: false type: string description_limit_request_body: description: | The limit description. in: body required: false type: string description_limit_response_body: description: | The limit description. in: body required: true type: string description_region_request_body: description: | The region description. in: body required: false type: string description_region_response_body: description: | The region description. in: body required: true type: string description_registered_limit_request_body: description: | The registered limit description. in: body required: false type: string description_registered_limit_response_body: description: | The registered limit description. in: body required: true type: string domain: description: | A ``domain`` object in: body required: true type: object domain_config: description: | A ``config`` object. in: body required: true type: object domain_description_request_body: description: | The description of the domain. in: body required: false type: string domain_description_response_body: description: | The description of the domain. in: body required: true type: string domain_description_update_request_body: description: | The new description of the domain. in: body required: false type: string domain_driver: description: | The Identity backend driver. in: body required: true type: string domain_enabled_request_body: description: | If set to ``true``, domain is created enabled. If set to ``false``, domain is created disabled. The default is ``true``. Users can only authorize against an enabled domain (and any of its projects). In addition, users can only authenticate if the domain that owns them is also enabled. Disabling a domain prevents both of these things. in: body required: false type: string domain_enabled_response_body: description: | If set to ``true``, domain is enabled. If set to ``false``, domain is disabled. in: body required: true type: string domain_enabled_update_request_body: description: | If set to ``true``, domain is enabled. If set to ``false``, domain is disabled. The default is ``true``. Users can only authorize against an enabled domain (and any of its projects). In addition, users can only authenticate if the domain that owns them is also enabled. Disabling a domain prevents both of these things. When you disable a domain, all tokens that are authorized for that domain become invalid. However, if you reenable the domain, these tokens become valid again, providing that they haven't expired. in: body required: false type: string domain_id_response_body: description: | The ID of the domain. in: body required: true type: string domain_ldap: description: | An ``ldap`` object. Required to set the LDAP group configuration options. in: body required: true type: object domain_link_response_body: description: | The links to the ``domain`` resource. in: body required: true type: object domain_name_request_body: description: | The name of the domain. in: body required: true type: string domain_name_response_body: description: | The name of the domain. in: body required: true type: string domain_name_update_request_body: description: | The new name of the domain. in: body required: false type: string domain_scope_response_body_optional: description: | A ``domain`` object including the ``id`` and ``name`` representing the domain the token is scoped to. This is only included in tokens that are scoped to a domain. in: body required: false type: object domain_url: description: | The LDAP URL. in: body required: true type: string domain_user_tree_dn: description: | The base distinguished name (DN) of LDAP, from where all users can be reached. For example, ``ou=Users,dc=root,dc=org``. in: body required: true type: string domains: description: | A list of ``domain`` objects in: body required: true type: array email: description: | The email address for the user. in: body required: true type: string enabled_user_request_body: description: | If the user is enabled, this value is ``true``. If the user is disabled, this value is ``false``. in: body required: false type: boolean enabled_user_response_body: description: | If the user is enabled, this value is ``true``. If the user is disabled, this value is ``false``. in: body required: true type: boolean enabled_user_update_body: description: | Enables or disables the user. An enabled user can authenticate and receive authorization. A disabled user cannot authenticate or receive authorization. Additionally, all tokens that the user holds become no longer valid. If you reenable this user, pre-existing tokens do not become valid. To enable the user, set to ``true``. To disable the user, set to ``false``. Default is ``true``. in: body required: false type: boolean endpoint: description: | An ``endpoint`` object. in: body required: true type: object endpoint_enabled: description: | Indicates whether the endpoint appears in the service catalog: - ``false``. The endpoint does not appear in the service catalog. - ``true``. The endpoint appears in the service catalog. in: body required: true type: boolean endpoint_enabled_not_required: description: | Defines whether the endpoint appears in the service catalog: - ``false``. The endpoint does not appear in the service catalog. - ``true``. The endpoint appears in the service catalog. Default is ``true``. in: body required: false type: boolean endpoint_id: description: | The endpoint ID. in: body required: true type: string endpoint_interface: description: | The interface type, which describes the visibility of the endpoint. Value is: - ``public``. Visible by end users on a publicly available network interface. - ``internal``. Visible by end users on an unmetered internal network interface. - ``admin``. Visible by administrative users on a secure network interface. in: body required: true type: string endpoint_links: description: | The links for the ``endpoint`` resource. in: body required: true type: object endpoint_name: description: | The endpoint name. in: body required: true type: string endpoint_region: description: | (Deprecated in v3.2) The geographic location of the service endpoint. in: body required: true type: string endpoint_type: description: | The endpoint type. in: body required: true type: string endpoint_url: description: | The endpoint URL. in: body required: true type: string endpoints: description: | A list of ``endpoint`` objects. in: body required: true type: array endpoints_links: description: | The links for the ``endpoints`` resource. in: body required: true type: object expires_at: description: | The date and time when the token expires. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss.sssZ For example, ``2015-08-27T09:49:58.000000Z``. A ``null`` value indicates that the token never expires. in: body required: true type: string explicit_unscoped_string: description: | The authorization scope (Since v3.4). Specify ``unscoped`` to make an explicit unscoped token request, which returns an unscoped response without any authorization. This request behaves the same as a token request with no scope where the user has no default project defined. If an explicit, ``unscoped`` token request is not made and the user has authorization to their default project, then the response will return a project-scoped token. If a default project is not defined, a token is issued without an explicit scope of authorization, which is the same as asking for an explicit unscoped token. in: body required: false type: string extra_request_body: description: | The extra attributes of a resource. The actual name ``extra`` is not the key name in the request body, but rather a collection of any attributes that a resource may contain that are not part of the resource's default attributes. Generally these are custom fields that are added to a resource in keystone by operators for their own specific uses, such as ``email`` and ``description`` for users. in: body required: false type: string federated_in_request_body: description: | List of federated objects associated with a user. Each object in the list contains the ``idp_id`` and ``protocols``. ``protocols`` is a list of objects, each of which contains ``protocol_id`` and ``unique_id`` of the protocol and user respectively. For example:: "federated": [ { "idp_id": "efbab5a6acad4d108fec6c63d9609d83", "protocols": [ {"protocol_id": mapped, "unique_id": "test@example.com"} ] } ] in: body required: false type: list federated_in_response_body: description: | List of federated objects associated with a user. Each object in the list contains the ``idp_id`` and ``protocols``. ``protocols`` is a list of objects, each of which contains ``protocol_id`` and ``unique_id`` of the protocol and user respectively. For example:: "federated": [ { "idp_id": "efbab5a6acad4d108fec6c63d9609d83", "protocols": [ {"protocol_id": "mapped", "unique_id": "test@example.com"} ] } ] in: body required: false type: list group: description: | A ``group`` object in: body required: true type: object group_description_request_body: description: | The description of the group. in: body required: false type: string group_description_response_body: description: | The description of the group. in: body required: true type: string group_description_update_request_body: description: | The new description of the group. in: body required: false type: string group_domain_id: description: | The ID of the domain that owns the group. If you omit the domain ID, defaults to the domain to which the client token is scoped. in: body required: false type: string group_domain_id_request_body: description: | The ID of the domain of the group. If the domain ID is not provided in the request, the Identity service will attempt to pull the domain ID from the token used in the request. Note that this requires the use of a domain-scoped token. in: body required: false type: string group_domain_id_response_body: description: | The ID of the domain of the group. in: body required: true type: string group_domain_id_update_request_body: description: | The ID of the new domain for the group. The ability to change the domain of a group is now deprecated, and will be removed in subsequent release. It is already disabled by default in most Identity service implementations. in: body required: false type: string group_id_response_body: description: | The ID of the group. in: body required: true type: string group_name_request_body: description: | The name of the group. in: body required: true type: string group_name_response_body: description: | The name of the group. in: body required: true type: string group_name_update_request_body: description: | The new name of the group. in: body required: false type: string groups: description: | A list of ``group`` objects in: body required: true type: array id_region_response_body: description: | The ID for the region. in: body required: true type: string id_region_resquest_body: description: | The ID for the region. in: body required: false type: string id_user_body: description: | The user ID. in: body required: true type: string identity: description: | An ``identity`` object. in: body required: true type: object implies_role_array_body: description: | An array of implied role objects. in: body required: true type: array implies_role_object_body: description: | An implied role object. in: body required: true type: object is_domain_request_body: description: | Indicates whether the project also acts as a domain. If set to ``true``, this project acts as both a project and domain. As a domain, the project provides a name space in which you can create users, groups, and other projects. If set to ``false``, this project behaves as a regular project that contains only resources. Default is ``false``. You cannot update this parameter after you create the project. in: body required: false type: boolean min_version: 3.6 is_domain_response_body: description: | Indicates whether the project also acts as a domain. If set to ``true``, this project acts as both a project and domain. As a domain, the project provides a name space in which you can create users, groups, and other projects. If set to ``false``, this project behaves as a regular project that contains only resources. in: body required: true type: boolean min_version: 3.6 issued_at: description: | The date and time when the token was issued. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss.sssZ For example, ``2015-08-27T09:49:58.000000Z``. in: body required: true type: string limit: description: | A ``limit`` object in: body required: true type: array limit_id: description: | The limit ID. in: body required: true type: string limit_model_description_required_response_body: description: A short description of the enforcement model used in: body required: true type: string limit_model_name_required_response_body: description: The name of the enforcement model in: body required: true type: string limit_model_required_response_body: description: A model object describing the configured enforcement model used by the deployment. in: body required: true type: object limits: description: | A list of ``limits`` objects in: body required: true type: array link_collection: description: | The link to the collection of resources. in: body required: true type: object link_response_body: description: | The link to the resources in question. in: body required: true type: object links_project: description: | The links for the ``project`` resource. in: body required: true type: object links_region: description: | The links for the ``region`` resource. in: body required: true type: object links_user: description: | The links for the ``user`` resource. in: body required: true type: object membership_expires_at_response_body: description: | The date and time when the group membership expires. A ``null`` value indicates that the membership never expires. in: body required: true type: string min_version: 3.14 original_password: description: | The original password for the user. in: body required: true type: string parent_region_id_request_body: description: | To make this region a child of another region, set this parameter to the ID of the parent region. in: body required: false type: string parent_region_id_response_body: description: | To make this region a child of another region, set this parameter to the ID of the parent region. in: body required: true type: string password: description: | The ``password`` object, contains the authentication information. in: body required: true type: object password_expires_at: description: | The date and time when the password expires. The time zone is UTC. This is a response object attribute; not valid for requests. A ``null`` value indicates that the password never expires. in: body required: true type: string min_version: 3.7 password_request_body: description: | The password for the user. in: body required: false type: string policies: description: | A ``policies`` object. in: body required: true type: array policy: description: | A ``policy`` object. in: body required: true type: object policy_blob_obj: description: | The policy rule itself, as a serialized blob. in: body required: true type: object policy_blob_str: description: | The policy rule set itself, as a serialized blob. in: body required: true type: string policy_id: description: | The policy ID. in: body required: true type: string policy_links: description: | The links for the ``policy`` resource. in: body required: true type: object policy_type: description: | The MIME media type of the serialized policy blob. in: body required: true type: string prior_role_body: description: | A prior role object. in: body required: true type: object project: description: | A ``project`` object in: body required: true type: object project_description_request_body: description: | The description of the project. in: body required: false type: string project_description_response_body: description: | The description of the project. in: body required: true type: string project_domain_id: description: | The ID of the domain for the project. If you omit the domain ID, default is the domain to which your token is scoped. in: body required: false type: string project_domain_id_request_body: description: | The ID of the domain for the project. For projects acting as a domain, the ``domain_id`` must not be specified, it will be generated by the Identity service implementation. For regular projects (i.e. those not acing as a domain), if ``domain_id`` is not specified, but ``parent_id`` is specified, then the domain ID of the parent will be used. If neither ``domain_id`` or ``parent_id`` is specified, the Identity service implementation will default to the domain to which the client's token is scoped. If both ``domain_id`` and ``parent_id`` are specified, and they do not indicate the same domain, an ``Bad Request (400)`` will be returned. in: body required: false type: string project_domain_id_response_body: description: | The ID of the domain for the project. in: body required: true type: string project_domain_id_update_request_body: description: | The ID of the new domain for the project. The ability to change the domain of a project is now deprecated, and will be removed in subequent release. It is already disabled by default in most Identity service implementations. in: body required: false type: string project_enabled_request_body: description: | If set to ``true``, project is enabled. If set to ``false``, project is disabled. The default is ``true``. in: body required: false type: boolean project_enabled_response_body: description: | If set to ``true``, project is enabled. If set to ``false``, project is disabled. in: body required: true type: boolean project_enabled_update_request_body: description: | If set to ``true``, project is enabled. If set to ``false``, project is disabled. in: body required: false type: boolean project_id: description: | The ID for the project. in: body required: true type: string project_name_request_body: description: | The name of the project, which must be unique within the owning domain. A project can have the same name as its domain. in: body required: true type: string project_name_response_body: description: | The name of the project. in: body required: true type: string project_name_update_request_body: description: | The name of the project, which must be unique within the owning domain. A project can have the same name as its domain. in: body required: false type: string project_parent_id_request_body: description: | The ID of the parent of the project. If specified on project creation, this places the project within a hierarchy and implicitly defines the owning domain, which will be the same domain as the parent specified. If ``parent_id`` is not specified and ``is_domain`` is ``false``, then the project will use its owning domain as its parent. If ``is_domain`` is ``true`` (i.e. the project is acting as a domain), then ``parent_id`` must not specified (or if it is, it must be ``null``) since domains have no parents. ``parent_id`` is immutable, and can't be updated after the project is created - hence a project cannot be moved within the hierarchy. in: body required: false type: string min_version: 3.4 project_parent_id_response_body: description: | The ID of the parent for the project. in: body required: true type: string min_version: 3.4 project_scope_response_body_optional: description: | A ``project`` object including the ``id``, ``name`` and ``domain`` object representing the project the token is scoped to. This is only included in tokens that are scoped to a project. in: body required: false type: object project_tags_request_body: description: | A list of simple strings assigned to a project. Tags can be used to classify projects into groups. in: body required: false type: array projects: description: | A list of ``project`` objects in: body required: true type: array receipt_expires_at: description: | The date and time when the receipt expires. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss.sssZ For example, ``2015-08-27T09:49:58.000000Z``. A ``null`` value indicates that the receipt never expires. in: body required: true type: string receipt_issued_at: description: | The date and time when the receipt was issued. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss.sssZ For example, ``2015-08-27T09:49:58.000000Z``. in: body required: true type: string region_id_not_required: description: | (Since v3.2) The ID of the region that contains the service endpoint. in: body required: false type: string region_id_request_body: description: | The ID of the region that contains the service endpoint. in: body required: false type: string region_id_required: description: | (Since v3.2) The ID of the region that contains the service endpoint. in: body required: true type: string region_id_response_body: description: | The ID of the region that contains the service endpoint. The value can be None. in: body required: true type: string region_object: description: | A ``region`` object in: body required: true type: object regions_object: description: | A list of ``region`` object in: body required: true type: array registered_limit: description: | A ``registered_limit`` objects in: body required: true type: array registered_limit_id: description: | The registered limit ID. in: body required: true type: string registered_limits: description: | A list of ``registered_limits`` objects in: body required: true type: array request_application_credential_access_rules_body_not_required: description: | A list of ``access_rules`` objects in: body required: false type: list request_application_credential_auth_id_body_not_required: description: | The ID of the application credential used for authentication. If not provided, the application credential must be identified by its name and its owning user. in: body required: false type: string request_application_credential_auth_name_body_not_required: description: | The name of the application credential used for authentication. If provided, must be accompanied by a user object. in: body required: false type: string request_application_credential_auth_secret_body_required: description: | The secret for authenticating the application credential. in: body required: true type: string request_application_credential_body_required: description: | An application credential object. in: body required: true type: object request_application_credential_description_body_not_required: description: | A description of the application credential's purpose. in: body required: false type: string request_application_credential_expires_at_body_not_required: description: | An optional expiry time for the application credential. If unset, the application credential does not expire. in: body required: false type: string request_application_credential_name_body_required: description: | The name of the application credential. Must be unique to a user. in: body required: true type: string request_application_credential_roles_body_not_required: description: | An optional list of role objects, identified by ID or name. The list may only contain roles that the user has assigned on the project. If not provided, the roles assigned to the application credential will be the same as the roles in the current token. in: body required: false type: array request_application_credential_secret_body_not_required: description: | The secret that the application credential will be created with. If not provided, one will be generated. in: body required: false type: string request_application_credential_unrestricted_body_not_required: description: | An optional flag to restrict whether the application credential may be used for the creation or destruction of other application credentials or trusts. Defaults to false. in: body required: false type: boolean request_application_credential_user_body_not_required: description: | A ``user`` object, required if an application credential is identified by name and not ID. in: body required: false type: object request_default_limit_body_not_required: description: | The default limit for the registered limit. in: body required: false type: integer request_domain_options_body_not_required: description: | The resource options for the domain. Available resource options are ``immutable``. in: body required: false type: object request_explicit_domain_id_body_not_required: description: | The ID of the domain. A domain created this way will not use an auto-generated ID, but will use the ID passed in instead. Identifiers passed in this way must conform to the existing ID generation scheme: UUID4 without dashes. in: body required: false type: string request_limit_domain_id_not_required: description: | The name of the domain. in: body required: false type: string request_limit_project_id_not_required: description: | The ID for the project. in: body required: false type: string request_project_options_body_not_required: description: | The resource options for the project. Available resource options are ``immutable``. in: body required: false type: object request_region_id_registered_limit_body_not_required: description: | The ID of the region that contains the service endpoint. Either service_id, resource_name, or region_id must be different than existing value otherwise it will raise 409. in: body required: false type: string request_resource_limit_body_not_required: description: | The override limit. in: body required: false type: integer request_resource_name_body_not_required: description: | The resource name. Either service_id, resource_name or region_id must be different than existing value otherwise it will raise 409. in: body required: false type: string request_role_options_body_not_required: description: | The resource options for the role. Available resource options are ``immutable``. in: body required: false type: object request_service_id_registered_limit_body_not_required: description: | The UUID of the service to update to which the registered limit belongs. Either service_id, resource_name, or region_id must be different than existing value otherwise it will raise 409. in: body required: false type: string required_auth_methods: description: | A list of authentication rules that may be used with the auth receipt to complete the authentication process. in: body required: true type: list of lists resource_limit: description: | The override limit. in: body required: true type: integer resource_name: description: | The resource name. in: body required: true type: string response_access_rules_body: description: | A list of ``access_rules`` objects in: body type: list required: true response_access_rules_id_body: description: | The ID of the access rule in: body type: string required: true response_access_rules_method_body: description: | The request method that the application credential is permitted to use for a given API endpoint. in: body type: string required: true response_access_rules_path_body: description: | The API path that the application credential is permitted to access. May use named wildcards such as ``{tag}`` or the unnamed wildcard ``*`` to match against any string in the path up to a ``/``, or the recursive wildcard ``**`` to include ``/`` in the matched path. in: body type: string required: true response_access_rules_service_body: description: | The service type identifier for the service that the application credential is permitted to access. Must be a service type that is listed in the service catalog and not a code name for a service. in: body type: string required: true response_application_credential_access_rules_body: description: | A list of ``access_rules`` objects in: body type: list required: true response_application_credential_body: description: | The application credential object. in: body type: object required: true response_application_credential_description_body: description: | A description of the application credential's purpose. in: body type: string required: true response_application_credential_expires_at_body: description: | The expiration time of the application credential, if one was specified. in: body type: string required: true response_application_credential_id_body: description: | The ID of the application credential. in: body type: string required: true response_application_credential_name_body: description: | The name of the application credential. in: body type: string required: true response_application_credential_project_id_body: description: | The ID of the project the application credential was created for and that authentication requests using this application credential will be scoped to. in: body type: string required: true response_application_credential_roles_body: description: | A list of one or more roles that this application credential has associated with its project. A token using this application credential will have these same roles. in: body type: array required: true response_application_credential_secret_body: description: | The secret for the application credential, either generated by the server or provided by the user. This is only ever shown once in the response to a create request. It is not stored nor ever shown again. If the secret is lost, a new application credential must be created. in: body type: string required: true response_application_credential_unrestricted_body: description: | A flag indicating whether the application credential may be used for creation or destruction of other application credentials or trusts. in: body type: boolean required: true response_body_project_tags_required: description: | A list of simple strings assigned to a project. in: body required: true type: array response_body_system_required: description: | A list of systems to access based on role assignments. in: body required: true type: array response_domain_options_body_required: description: | The resource options for the domain. Available resource options are ``immutable``. in: body required: true type: object response_limit_domain_id_body: description: | The ID of the domain. in: body required: true type: string response_project_options_body_required: description: | The resource options for the project. Available resource options are ``immutable``. in: body required: true type: object response_role_options_body_required: description: | The resource options for the role. Available resource options are ``immutable``. in: body required: true type: object response_user_options_body_required: description: | The resource options for the user. Available resource options are ``ignore_change_password_upon_first_use``, ``ignore_password_expiry``, ``ignore_lockout_failure_attempts``, ``lock_password``, ``multi_factor_auth_enabled``, and ``multi_factor_auth_rules`` ``ignore_user_inactivity``. in: body required: true type: object role: description: | A ``role`` object in: body required: true type: object role_assignments: description: | A list of ``role_assignment`` objects. in: body required: true type: array role_description_create_body: description: | Add description about the role. in: body required: false type: string role_description_response_body_required: description: | The role description. in: body required: true type: string role_description_update_body: description: | The new role description. in: body required: false type: string role_domain_id_request_body: description: | The ID of the domain of the role. in: body required: false type: string role_id_response_body: description: | The role ID. in: body required: true type: string role_inference_array_body: description: | An array of ``role_inference`` object. in: body required: true type: array role_inference_body: description: | Role inference object that contains ``prior_role`` object and ``implies`` object. in: body required: true type: object role_name_create_body: description: | The role name. in: body required: true type: string role_name_response_body: description: | The role name. in: body required: true type: string role_name_update_body: description: | The new role name. in: body required: false type: string roles: description: | A list of ``role`` objects in: body required: true type: array scope_string: description: | The authorization scope, including the system (Since v3.10), a project, or a domain (Since v3.4). If multiple scopes are specified in the same request (e.g. ``project`` and ``domain`` or ``domain`` and ``system``) an HTTP ``400 Bad Request`` will be returned, as a token cannot be simultaneously scoped to multiple authorization targets. An ID is sufficient to uniquely identify a project but if a project is specified by name, then the domain of the project must also be specified in order to uniquely identify the project by name. A domain scope may be specified by either the domain's ID or name with equivalent results. in: body required: false type: string service: description: | A ``service`` object. in: body required: true type: object service_description: description: | The service description. in: body required: false type: string service_description_not_required: description: | The service description. in: body required: false type: string service_enabled: description: | Defines whether the service and its endpoints appear in the service catalog: - ``false``. The service and its endpoints do not appear in the service catalog. - ``true``. The service and its endpoints appear in the service catalog. in: body required: false type: boolean service_enabled_not_required: description: | Defines whether the service and its endpoints appear in the service catalog: - ``false``. The service and its endpoints do not appear in the service catalog. - ``true``. The service and its endpoints appear in the service catalog. Default is ``true``. in: body required: false type: boolean service_id: description: | The UUID of the service to which the endpoint belongs. in: body required: true type: string service_id_limit: description: | The UUID of the service to which the limit belongs. in: body required: true type: string service_id_registered_limit: description: | The UUID of the service to which the registered limit belongs. in: body required: true type: string service_links: description: | The links for the ``service`` resource. in: body required: true type: object service_name: description: | The service name. in: body required: true type: string service_type: description: | The service type, which describes the API implemented by the service. Value is ``compute``, ``ec2``, ``identity``, ``image``, ``network``, or ``volume``. in: body required: true type: string services: description: | A list of ``service`` object. in: body required: true type: array system: description: | A ``system`` object. in: body required: false type: object system_roles_response_body: description: | A list of ``role`` objects containing ``domain_id``, ``id``, ``links``, and ``name`` attributes. in: body required: true type: array system_scope_response_body_optional: description: | A ``system`` object containing information about which parts of the system the token is scoped to. If the token is scoped to the entire deployment system, the ``system`` object will consist of ``{"all": true}``. This is only included in tokens that are scoped to the system. in: body required: false type: object system_scope_string: description: | Authorization scope specific to the deployment system (Since v3.10). Specifying a project or domain scope in addition to system scope results in an HTTP ``400 Bad Request``. in: body required: false type: string token: description: | A ``token`` object. in: body required: true type: object totp: description: | The ``totp`` object, contains the authentication information. in: body required: true type: object user: description: | A ``user`` object. in: body required: true type: object user_domain_id: description: | The ID of the domain for the user. in: body required: false type: string user_domain_id_request_body: description: | The ID of the domain of the user. If the domain ID is not provided in the request, the Identity service will attempt to pull the domain ID from the token used in the request. Note that this requires the use of a domain-scoped token. in: body required: false type: string user_domain_id_update_body: description: | The ID of the new domain for the user. The ability to change the domain of a user is now deprecated, and will be removed in subequent release. It is already disabled by default in most Identity service implementations. in: body required: false type: string user_id: description: | The ID of the user. Required if you do not specify the user name. in: body required: false type: string user_name: description: | The user name. Required if you do not specify the ID of the user. If you specify the user name, you must also specify the domain, by ID or name. in: body required: false type: string user_name_create_request_body: description: | The user name. Must be unique within the owning domain. in: body required: true type: string user_name_response_body: description: | The user name. Must be unique within the owning domain. in: body required: true type: string user_name_update_body: description: | The new name for the user. Must be unique within the owning domain. in: body required: false type: string user_object: description: | A ``user`` object in: body required: true type: object user_options_request_body: description: | The resource options for the user. Available resource options are ``ignore_change_password_upon_first_use``, ``ignore_password_expiry``, ``ignore_lockout_failure_attempts``, ``lock_password``, ``multi_factor_auth_enabled``, and ``multi_factor_auth_rules`` ``ignore_user_inactivity``. in: body required: false type: object user_password_update_body: description: | The new password for the user. in: body required: true type: string user_update_password_body: description: | The new password for the user. in: body required: false type: string users: description: | A ``users`` object. in: body required: true type: array users_object: description: | A list of ``user`` objects in: body required: true type: array ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/policies.inc0000664000175000017500000001053200000000000021141 0ustar00zuulzuul00000000000000.. -*- rst -*- ========== Policies ========== .. warning:: The ``policies`` API is deprecated. Keystone is not a policy management service. Do not use this. A policy is an arbitrarily serialized policy engine rule set to be consumed by a remote service. You encode policy rule sets into a blob that remote services can consume. To do so, set ``type`` to ``application/json`` and specify policy rules as JSON strings in a ``blob``. For example: :: { "blob":{ "foobar_user":[ "role:compute-user" ] } } Create policy ============= .. rest_method:: POST /v3/policies Creates a policy. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/policies`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy: policy - type: policy_type - blob: policy_blob_str Example ~~~~~~~ .. literalinclude:: ./samples/admin/policy-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: policy_links - blob: policy_blob_str - policy: policy - type: policy_type - id: policy_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 List policies ============= .. rest_method:: GET /v3/policies Lists policies. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/policies`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - type: policy_type_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: policy_links - blob: policy_blob_obj - policies: policies - type: policy_type - id: policy_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/policies-list-response.json :language: javascript Show policy details =================== .. rest_method:: GET /v3/policies/{policy_id} Shows details for a policy. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/policy`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: policy_links - blob: policy_blob_obj - policy: policy - type: policy_type - id: policy_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/policy-show-response.json :language: javascript Update policy ============= .. rest_method:: PATCH /v3/policies/{policy_id} Updates a policy. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/policy`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - policy: policy - type: policy_type - blob: policy_blob_obj Example ~~~~~~~ .. literalinclude:: ./samples/admin/policy-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: policy_links - blob: policy_blob_obj - policy: policy - type: policy_type - id: policy_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/policy-update-response.json :language: javascript Delete policy ============= .. rest_method:: DELETE /v3/policies/{policy_id} Deletes a policy. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/policy`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/project-tags.inc0000664000175000017500000001762100000000000021742 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ Project tags ============ Projects within keystone can be tagged with one to many simple strings. Tags for projects follow the guidelines for resource tags set by the `API Working Group `_. Tags for projects have the following restrictions: .. Note:: - Tags are case sensitive - Forward Slash '/' is not allowed to be in a tag name - Commas ',' are not allowed to be in a tag name in order to simplify requests that specify lists of tags - Each project can have a maximum of 80 tags - Each tag can be a maximum of 255 characters in length .. warning:: We discourage the use of project tags for sensitive information like billing or account codes. By default, access to project tags isn't exclusive to system administrators or users. Domain and project administrators are allowed to tag projects they have authorization to access. Domain and project users (e.g., users with the ``member`` or ``reader`` roles) can view all project tags on all projects within their domain or on projects they are authorized to access. List tags for a project ======================= .. rest_method:: GET /v3/projects/{project_id}/tags Lists all tags within a project. .. note:: HEAD can be used here as well Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - tags: response_body_project_tags_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-tags-list-response.json :language: javascript Modify tag list for a project ============================= .. rest_method:: PUT /v3/projects/{project_id}/tags Modifies the tags for a project. Any existing tags not specified will be deleted. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - tags: response_body_project_tags_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-tags-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - tags: response_body_project_tags_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-tags-update-response.json :language: javascript Remove all tags from a project ============================== .. rest_method:: DELETE /v3/projects/{project_id}/tags Remove all tags from a given project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Check if project contains tag ============================= .. rest_method:: GET /v3/projects/{project_id}/tags/{tag} Checks if a project contains the specified tag. .. note:: HEAD can be used here as well Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - tag: project_tag_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Add single tag to a project =========================== .. rest_method:: PUT /v3/projects/{project_id}/tags/{tag} Creates the specified tag and adds it to the list of tags in the project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - tag: project_tag_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Delete single tag from project ============================== .. rest_method:: DELETE /v3/projects/{project_id}/tags/{tag} Remove a single tag from a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - tag: project_tag_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 =============================== Filtering and searching by tags =============================== Projects can be searched or filtered by tags. The following table and examples define how to filter projects by tags. Filters can also be combined for more complex searching. .. list-table:: :widths: 100 250 :header-rows: 1 * - Tag Query - Description * - tags - Projects that contain all of the specified tags * - tags-any - Projects that contain at least one of the specified tags * - not-tags - Projects that do not contain exactly all of the specified tags * - not-tags-any - Projects that do not contain any one of the specified tags To request the list of projects that have a single tag, the ``tags`` query parameter should be set to the desired tag name. The following example returns projects with the "foo" tag: .. code-block:: bash GET /v3/projects?tags=foo To request the list of projects that have two or more tags, the ``tags`` argument should be set to the list of tags, separated by commas. In this situation, the tags given must all be present for a project to be included in the query result. The following example returns projects that have the "foo" and "bar" tags: .. code-block:: bash GET /v3/projects?tags=foo,bar To request the list of projects that have at least one tag from a given list, the ``tags-any`` argument should be set to the list of tags, separated by commas. In this situation as long as one of the given tags is present, the project will be included in the query result. The following example returns projects that have the “foo” OR “bar” tag: .. code-block:: bash GET /v3/projects?tags-any=foo,bar To request the list of projects that do not have a list of tags, the ``not-tags`` argument should be set to the list of tags, separated by commas. In this situation, the tags given must all be absent for a project to be included in the query result. The following example returns projects that do not have the “foo” nor the “bar” tag: .. code-block:: bash GET /v3/projects?not-tags=foo,bar To request the list of projects that do not have at least one of a list of tags, the ``not-tags-any`` argument should be set to the list of tags, separated by commas. In this situation, as long as one of the given tags is absent, the project will be included in the query result. Example The following example returns projects that do not have the “foo” tag or do not have the “bar” tag: .. code-block:: bash GET /v3/projects?not-tags-any=foo,bar The ``tags``, ``tags-any``, ``not-tags`` and ``not-tags-any`` arguments can be combined to build more complex queries. The following example returns projects that have the “foo” and “bar” tags, plus at least one of “red” and “blue”: .. code-block:: bash GET /v3/projects?tags=foo,bar&tags-any=red,blue ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/projects.inc0000664000175000017500000002034300000000000021164 0ustar00zuulzuul00000000000000.. -*- rst -*- ========== Projects ========== A project is the base unit of resource ownership. Resources are owned by a specific project. A project is owned by a specific domain. (Since Identity API v3.4) You can create a hierarchy of projects by setting a ``parent_id`` when you create a project. All projects in a hierarchy must be owned by the same domain. (Since Identity API v3.6) Projects may, in addition to acting as containers for OpenStack resources, act as a domain (by setting the attribute ``is_domain`` to ``true``), in which case it provides a namespace in which users, groups and other projects can be created. In fact, a domain created using the ``POST /domains`` API will actually be represented as a project with ``is_domain`` set to ``true`` with no parent (``parent_id`` is null). Given this, all projects are considered part of a project hierarchy. Projects created in a domain prior to v3.6 are represented as a two-level hierarchy, with a project that has ``is_domain`` set to ``true`` as the root and all other projects referencing the root as their parent. A project acting as a domain can potentially also act as a container for OpenStack resources, although this depends on whether the policy rule for the relevant resource creation allows this. .. note:: A project's name must be unique within a domain and no more than 64 characters. A project's name must be able to be sent within valid JSON, which could be any UTF-8 character. However, this is constrained to the given backend where project names are stored. For instance, MySQL's restrictions states that UTF-8 support is constrained to the characters in the Basic Multilingual Plane (BMP). Supplementary characters are not permitted. Note that this last restriction is generally true for all ``names`` within resources of the Identity API. Creating a project without using a domain scoped token, i.e. using a project scoped token or a system scoped token, and also without specifying a domain or domain_id, the project will automatically be created on the default domain. List projects ============= .. rest_method:: GET /v3/projects Lists projects. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_query - enabled: project_enabled_query - is_domain: is_domain_query - name: project_name_query - parent_id: parent_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_collection - projects: projects - is_domain: is_domain_response_body - description: project_description_response_body - domain_id: project_domain_id_response_body - enabled: project_enabled_response_body - id: project_id - links: link_response_body - name: project_name_response_body - parent_id: project_parent_id_response_body - tags: response_body_project_tags_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/projects-list-response.json :language: javascript Create project ============== .. rest_method:: POST /v3/projects Creates a project, where the project may act as a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project: project - name: project_name_request_body - is_domain: is_domain_request_body - description: project_description_request_body - domain_id: project_domain_id_request_body - enabled: project_enabled_request_body - parent_id: project_parent_id_request_body - tags: project_tags_request_body - options: request_project_options_body_not_required Examples ~~~~~~~~ Sample for creating a regular project: .. literalinclude:: ./samples/admin/project-create-request.json :language: javascript Sample for creating a project that also acts as a domain: .. literalinclude:: ./samples/admin/project-create-domain-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project: project - is_domain: is_domain_response_body - description: project_description_response_body - domain_id: project_domain_id_response_body - enabled: project_enabled_response_body - id: project_id - links: link_response_body - name: project_name_response_body - parent_id: project_parent_id_response_body - options: response_project_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Show project details ==================== .. rest_method:: GET /v3/projects/{project_id} Shows details for a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - parents_as_list: parents_as_list - subtree_as_list: subtree_as_list - parents_as_ids: parents_as_ids - subtree_as_ids: subtree_as_ids - include_limits: include_limits Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project: project - is_domain: is_domain_response_body - description: project_description_response_body - domain_id: project_domain_id_response_body - enabled: project_enabled_response_body - id: project_id - links: link_response_body - name: project_name_response_body - parent_id: project_parent_id_response_body - options: response_project_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-show-response.json :language: javascript Example with ``parents_as_list`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/admin/project-show-parents-response.json :language: javascript Example with ``subtree_as_list`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/admin/project-show-subtree-response.json :language: javascript Update project ============== .. rest_method:: PATCH /v3/projects/{project_id} Updates a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - project: project - name: project_name_update_request_body - is_domain: is_domain_request_body - description: project_description_request_body - domain_id: project_domain_id_update_request_body - enabled: project_enabled_update_request_body - tags: project_tags_request_body - options: request_project_options_body_not_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project: project - is_domain: is_domain_response_body - description: project_description_response_body - domain_id: project_domain_id_response_body - enabled: project_enabled_response_body - id: project_id - name: project_name_response_body - links: link_response_body - parent_id: project_parent_id_response_body - options: response_project_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-update-response.json :language: javascript Delete project ============== .. rest_method:: DELETE /v3/projects/{project_id} Deletes a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/regions-v3.inc0000664000175000017500000001271400000000000021332 0ustar00zuulzuul00000000000000.. -*- rst -*- ========= Regions ========= A region is a general division of an OpenStack deployment. You can associate zero or more sub-regions with a region to create a tree- like structured hierarchy. Although a region does not have a geographical connotation, a deployment can use a geographical name for a region ID, such as ``us- east``. You can list, create, update, show details for, and delete regions. Show region details =================== .. rest_method:: GET /v3/regions/{region_id} Shows details for a region, by ID. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/regions`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region_id: region_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region: region_object - description: description_region_response_body - id: id_region_response_body - links: links_region - parent_region_id: parent_region_id_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/region-show-response.json :language: javascript Update region ============= .. rest_method:: PATCH /v3/regions/{region_id} Updates a region. You can update the description or parent region ID for a region. You cannot update the region ID. The following error might occur: - ``Not Found (404)``. The parent region ID does not exist. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/region`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region_id: region_id_path - region: region_object - description: description_region_request_body - parent_region_id: parent_region_id_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/region-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region: region_object - description: description_region_response_body - id: id_region_response_body - links: links_region - parent_region_id: parent_region_id_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/region-update-response.json :language: javascript Delete region ============= .. rest_method:: DELETE /v3/regions/{region_id} Deletes a region. The following error might occur: - ``Conflict (409)``. The region cannot be deleted because it has child regions. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/region`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region_id: region_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 List regions ============ .. rest_method:: GET /v3/regions Lists regions. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/regions`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - parent_region_id: parent_region_id_query_not_required Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - regions: regions_object - description: description_region_response_body - id: id_region_response_body - links: links_region - parent_region_id: parent_region_id_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/admin/regions-list-response.json :language: javascript Create region ============= .. rest_method:: POST /v3/regions Creates a region. When you create the region, you can optionally specify a region ID. If you include characters in the region ID that are not allowed in a URI, you must URL-encode the ID. If you omit an ID, the API assigns an ID to the region. The following errors might occur: - ``Not Found (404)``. The parent region ID does not exist. - ``Conflict (409)``. The parent region ID would form a circular relationship. - ``Conflict (409)``. The user-defined region ID is not unique to the OpenStack deployment. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/regions`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region: region_object - description: description_region_request_body - id: id_region_resquest_body - parent_region_id: parent_region_id_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/region-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region: region_object - description: description_region_response_body - id: id_region_response_body - links: links_region - parent_region_id: parent_region_id_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/roles.inc0000664000175000017500000006333200000000000020464 0ustar00zuulzuul00000000000000.. -*- rst -*- ===== Roles ===== OpenStack services typically determine whether a user's API request should be allowed using Role Based Access Control (RBAC). For OpenStack this means the service compares the roles that user has on the project (as indicated by the roles in the token), against the roles required for the API in question (as defined in the service's policy file). A user obtains roles on a project by having these assigned to them via the Identity service API. Roles must initially be created as entities via the Identity services API and, once created, can then be assigned. You can assign roles to a user or group on a project, including projects owned by other domains. You can also assign roles to a user or group on a domain, although this is only currently relevant for using a domain scoped token to execute domain-level Identity service API requests. The creation, checking and deletion of role assignments is done with each of the attributes being specified in the URL. For example to assign a role to a user on a project:: PUT /v3/projects/{project_id}/users/{user_id}/roles/{role_id} You can also list roles assigned to the system, or to a specified domain, project, or user using this form of API, however a more generalized API for list assignments is provided where query parameters are used to filter the set of assignments returned in the collection. For example: - List role assignments for the specified user:: GET /role_assignments?user.id={user_id} - List role assignments for the specified project:: GET /role_assignments?scope.project.id={project_id} - List system role assignments for a specific user:: GET /role_assignments?scope.system=all?user.id={user_id} - List system role assignments for all users and groups:: GET /role_assignments?scope.system=all Since Identity API v3.10, you can grant role assignments to users and groups on an entity called the ``system``. The role assignment API also supports listing and filtering role assignments on the system. Since Identity API v3.6, you can also list all role assignments within a tree of projects, for example the following would list all role assignments for a specified project and its sub-projects:: GET /role_assignments?scope.project.id={project_id}&include_subtree=true If you specify ``include_subtree=true``, you must also specify the ``scope.project.id``. Otherwise, this call returns the ``Bad Request (400)`` response code. Each role assignment entity in the collection contains a link to the assignment that created the entity. As mentioned earlier, role assignments can be made to a user or a group on a particular project, domain, or the entire system. A user who is a member of a group that has a role assignment, will also be treated as having that role assignment by virtue of their group membership. The *effective* role assignments of a user (on a given project or domain) therefore consists of any direct assignments they have, plus any they gain by virtue of membership of groups that also have assignments on the given project or domain. This set of effective role assignments is what is placed in the token for reference by services wishing to check policy. You can list the effective role assignments using the ``effective`` query parameter at the user, project, and domain level: - Determine what a user can actually do:: GET /role_assignments?user.id={user_id}&effective - Get the equivalent set of role assignments that are included in a project-scoped token response:: GET /role_assignments?user.id={user_id}&scope.project.id={project_id}&effective When listing in effective mode, since the group assignments have been effectively expanded out into assignments for each user, the group role assignment entities themselves are not returned in the collection. However, in the response, the ``links`` entity section for each assignment gained by virtue of group membership will contain a URL that enables access to the membership of the group. By default only the IDs of entities are returned in collections from the role_assignment API calls. The names of entities may also be returned, in addition to the IDs, by using the ``include_names`` query parameter on any of these calls, for example: - List role assignments including names:: GET /role_assignments?include_names List roles ========== .. rest_method:: GET /v3/roles Lists roles. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: role_name_query - domain_id: domain_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_collection - roles: roles - domain_id: domain_id_response_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body - description: role_description_response_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/roles-list-response.json :language: javascript Create role =========== .. rest_method:: POST /v3/roles Creates a role. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role: role - name: role_name_create_body - domain_id: role_domain_id_request_body - description: role_description_create_body - options: request_role_options_body_not_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/role-create-request.json :language: javascript Example for Domain Specific Role ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/admin/domain-specific-role-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role: role - domain_id: domain_id_response_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body - description: role_description_response_body_required - options: response_role_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Show role details ================= .. rest_method:: GET /v3/roles/{role_id} Shows details for a role. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_id: role_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role: role - domain_id: domain_id_response_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body - description: role_description_response_body_required - options: response_role_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/role-show-response.json :language: javascript Update role =========== .. rest_method:: PATCH /v3/roles/{role_id} Updates a role. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_id: role_id_path - role: role - name: role_name_update_body - description: role_description_update_body - options: request_role_options_body_not_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/role-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role: role - domain_id: domain_id_response_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body - description: role_description_response_body_required - options: response_role_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/role-update-response.json :language: javascript Delete role =========== .. rest_method:: DELETE /v3/roles/{role_id} Deletes a role. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List role assignments for group on domain ========================================= .. rest_method:: GET /v3/domains/{domain_id}/groups/{group_id}/roles Lists role assignments for a group on a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_group_roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-group-roles-list-response.json :language: javascript The functionality of this request can also be achieved using the generalized list assignments API:: GET /role_assignments?group.id={group_id}&scope.domain.id={domain_id} Assign role to group on domain ============================== .. rest_method:: PUT /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} Assigns a role to a group on a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Check whether group has role assignment on domain ================================================= .. rest_method:: HEAD /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} Validates that a group has a role assignment on a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Unassign role from group on domain ================================== .. rest_method:: DELETE /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} Unassigns a role from a group on a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List role assignments for user on domain ======================================== .. rest_method:: GET /v3/domains/{domain_id}/users/{user_id}/roles Lists role assignments for a user on a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_user_roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - user_id: user_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - roles: roles - id: role_id_response_body - links: link_response_body - name: role_name_response_body Status Codes ~~~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/domain-user-roles-list-response.json :language: javascript The functionality of this request can also be achieved using the generalized list assignments API:: GET /role_assignments?user.id={user_id}&scope.domain.id={domain_id} Assign role to user on domain ============================= .. rest_method:: PUT /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} Assigns a role to a user on a domain. Relationship: ``https://developer.openstack.org/api-ref/identity/v3/index.html#assign-role-to-user-on-domain`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Check whether user has role assignment on domain ================================================ .. rest_method:: HEAD /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} Validates that a user has a role assignment on a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Unassigns role from user on domain ================================== .. rest_method:: DELETE /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} Unassigns a role from a user on a domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/domain_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_path - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 List role assignments for group on project ========================================== .. rest_method:: GET /v3/projects/{project_id}/groups/{group_id}/roles Lists role assignments for a group on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-group-roles-list-response.json :language: javascript The functionality of this request can also be achieved using the generalized list assignments API:: GET /role_assignments?group.id={group_id}&scope.project.id={project_id} Assign role to group on project =============================== .. rest_method:: PUT /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} Assigns a role to a group on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Check whether group has role assignment on project ================================================== .. rest_method:: HEAD /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} Validates that a group has a role assignment on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Unassign role from group on project =================================== .. rest_method:: DELETE /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} Unassigns a role from a group on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List role assignments for user on project ========================================= .. rest_method:: GET /v3/projects/{project_id}/users/{user_id}/roles Lists role assignments for a user on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/project-user-roles-list-response.json :language: javascript The functionality of this request can also be achieved using the generalized list assignments API:: GET /role_assignments?user.id={user_id}&scope.project.id={project_id} Assign role to user on project ============================== .. rest_method:: PUT /v3/projects/{project_id}/users/{user_id}/roles/{role_id} Assigns a role to a user on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Check whether user has role assignment on project ================================================= .. rest_method:: HEAD /v3/projects/{project_id}/users/{user_id}/roles/{role_id} Validates that a user has a role on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Unassign role from user on project ================================== .. rest_method:: DELETE /v3/projects/{project_id}/users/{user_id}/roles/{role_id} Unassigns a role from a user on a project. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/project_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List implied (inference) roles for role ======================================= .. rest_method:: GET /v3/roles/{prior_role_id}/implies Lists implied (inference) roles for a role. Relationship: ``https://developer.openstack.org/api-ref/identity/v3/#list-implied-roles-for-role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - prior_role_id: prior_role_id Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_inference: role_inference_body - prior_role: prior_role_body - implies: implies_role_array_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/list-implied-roles-for-role-response.json :language: javascript Create role inference rule ========================== .. rest_method:: PUT /v3/roles/{prior_role_id}/implies/{implies_role_id} Creates a role inference rule. Relationship: ``https://developer.openstack.org/api-ref/identity/v3/#create-role-inference-rule`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - prior_role_id: prior_role_id - implies_role_id: implies_role_id Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_inference: role_inference_body - prior_role: prior_role_body - implies: implies_role_object_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 401 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/create-role-inferences-response.json :language: javascript Get role inference rule ======================= .. rest_method:: GET /v3/roles/{prior_role_id}/implies/{implies_role_id} Gets a role inference rule. Relationship: ``https://developer.openstack.org/api-ref/identity/v3/#get-role-inference-rule`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - prior_role_id: prior_role_id - implies_role_id: implies_role_id Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_inference: role_inference_body - prior_role: prior_role_body - implies: implies_role_object_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/get-role-inferences-response.json :language: javascript Confirm role inference rule =========================== .. rest_method:: HEAD /v3/roles/{prior_role_id}/implies/{implies_role_id} Checks a role role inference rule. Relationship: ``https://developer.openstack.org/api-ref/identity/v3/#confirm-role-inference-rule`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - prior_role_id: prior_role_id - implies_role_id: implies_role_id Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 404 Example ~~~~~~~ Status: 204 No Content Delete role inference rule ========================== .. rest_method:: DELETE /v3/roles/{prior_role_id}/implies/{implies_role_id} Deletes a role inference rule. Relationship: ``https://developer.openstack.org/api-ref/identity/v3/#delete-role-inference-rule`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - prior_role_id: prior_role_id - implies_role_id: implies_role_id Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 404 Example ~~~~~~~ Status: 204 No Content List role assignments ===================== .. rest_method:: GET /v3/role_assignments Lists role assignments. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/role_assignments`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - effective: effective_query - include_names: include_names_query - include_subtree: include_subtree_query - group.id: group_id_query - role.id: role_id_query - scope.system: scope_system_query - scope.domain.id: scope_domain_id_query - scope.project.id: scope_project_id_query - user.id: user_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_assignments: role_assignments Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/role-assignments-list-response.json :language: javascript List all role inference rules ============================= .. rest_method:: GET /v3/role_inferences Lists all role inference rules. Relationship: ``https://developer.openstack.org/api-ref/identity/v3/#list-all-role-inference-rules`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - role_inferences: role_inference_array_body - prior_role: prior_role_body - implies: implies_role_object_body - id: role_id_response_body - links: link_response_body - name: role_name_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/role-inferences-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/api-ref/source/v3/samples/0000775000175000017500000000000000000000000020302 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4661145 keystone-26.0.0/api-ref/source/v3/samples/admin/0000775000175000017500000000000000000000000021372 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/access-rule-get-response.json0000664000175000017500000000041300000000000027102 0ustar00zuulzuul00000000000000{ "access_rule": { "path": "/v2.0/metrics", "links": { "self": "https://example.com/identity/v3/access_rules/07d719df00f349ef8de77d542edf010c" }, "id": "07d719df00f349ef8de77d542edf010c", "service": "monitoring", "method": "GET" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/access-rules-list-response.json0000664000175000017500000000070200000000000027462 0ustar00zuulzuul00000000000000{ "links": { "self": "https://example.com/identity/v3/users/3e0716aefcad4b129a0f19f95ec9489e/access_rules", "previous": null, "next": null }, "access_rules": [ { "path": "/v2.0/metrics", "links": { "self": "https://example.com/identity/v3/access_rules/07d719df00f349ef8de77d542edf010c" }, "id": "07d719df00f349ef8de77d542edf010c", "service": "monitoring", "method": "GET" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/application-credential-create-request.json0000664000175000017500000000075000000000000031631 0ustar00zuulzuul00000000000000{ "application_credential": { "name": "monitoring", "secret": "rEaqvJka48mpv", "description": "Application credential for monitoring.", "expires_at": "2018-02-27T18:30:59Z", "roles": [ {"name": "Reader"} ], "access_rules": [ { "path": "/v2.0/metrics", "method": "GET", "service": "monitoring" } ], "unrestricted": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/application-credential-create-response.json0000664000175000017500000000146400000000000032002 0ustar00zuulzuul00000000000000{ "application_credential": { "description": "Application credential for monitoring.", "roles": [ { "id": "6aff702516544aeca22817fd3bc39683", "domain_id": null, "name": "Reader" } ], "access_rules": [ { "path": "/v2.0/metrics", "id": "07d719df00f349ef8de77d542edf010c", "service": "monitoring", "method": "GET" } ], "links": { "self": "http://example.com/identity/v3/users/fd786d56402c4d1691372e7dee0d00b5/application_credentials/58d61ff8e6e34accb35874016d1dba8b" }, "expires_at": "2018-02-27T18:30:59.000000", "unrestricted": false, "secret": "rEaqvJka48mpv", "project_id": "231c62fb0fbd485b995e8b060c3f0d98", "id": "58d61ff8e6e34accb35874016d1dba8b", "name": "monitoring" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/application-credential-get-response.json0000664000175000017500000000142500000000000031313 0ustar00zuulzuul00000000000000{ "application_credential": { "description": "Application credential for monitoring.", "roles": [ { "id": "6aff702516544aeca22817fd3bc39683", "domain_id": null, "name": "Reader" } ], "access_rules": [ { "path": "/v2.0/metrics", "id": "07d719df00f349ef8de77d542edf010c", "service": "monitoring", "method": "GET" } ], "links": { "self": "http://example.com/identity/v3/users/fd786d56402c4d1691372e7dee0d00b5/application_credentials/58d61ff8e6e34accb35874016d1dba8b" }, "expires_at": "2018-02-27T18:30:59.000000", "unrestricted": false, "project_id": "231c62fb0fbd485b995e8b060c3f0d98", "id": "58d61ff8e6e34accb35874016d1dba8b", "name": "monitoring" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/application-credential-list-response.json0000664000175000017500000000367200000000000031515 0ustar00zuulzuul00000000000000{ "links": { "self": "http://example.com/identity/v3/users/fd786d56402c4d1691372e7dee0d00b5/application_credentials", "previous": null, "next": null }, "application_credentials": [ { "description": "Application credential for backups.", "roles": [ { "domain_id": null, "name": "Writer", "id": "6aff702516544aeca22817fd3bc39683" } ], "access_rules": [ ], "links": { "self": "http://example.com/identity/v3/users/fd786d56402c4d1691372e7dee0d00b5/application_credentials/308a7e905eee4071aac5971744c061f6" }, "expires_at": "2018-02-27T18:30:59.000000", "unrestricted": false, "project_id": "231c62fb0fbd485b995e8b060c3f0d98", "id": "308a7e905eee4071aac5971744c061f6", "name": "backups" }, { "description": "Application credential for monitoring.", "roles": [ { "id": "6aff702516544aeca22817fd3bc39683", "domain_id": null, "name": "Reader" } ], "access_rules": [ { "path": "/v2.0/metrics", "id": "07d719df00f349ef8de77d542edf010c", "service": "monitoring", "method": "GET" } ], "links": { "self": "http://example.com/identity/v3/users/fd786d56402c4d1691372e7dee0d00b5/application_credentials/58d61ff8e6e34accb35874016d1dba8b" }, "expires_at": "2018-02-27T18:30:59.000000", "unrestricted": false, "project_id": "231c62fb0fbd485b995e8b060c3f0d98", "id": "58d61ff8e6e34accb35874016d1dba8b", "name": "monitoring" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-application-credential-id-request.json0000664000175000017500000000044300000000000031720 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "application_credential" ], "application_credential": { "id": "423f19a4ac1e4f48bbb4180756e6eb6c", "secret": "rEaqvJka48mpv" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-application-credential-name-request.json0000664000175000017500000000057100000000000032246 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "application_credential" ], "application_credential": { "name": "monitoring", "secret": "rEaqvJka48mpv", "user": { "id": "423f19a4ac1e4f48bbb4180756e6eb6c" } } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-application-credential-response.json0000664000175000017500000000363700000000000031504 0ustar00zuulzuul00000000000000{ "token": { "application_credential": { "id": "423f19a4ac1e4f48bbb4180756e6eb6c", "name": "monitoring", "restricted": true }, "audit_ids": [ "9JsolhssRzKfyayTIiCRUg" ], "catalog": [ { "endpoints": [ { "region_id": "RegionOne", "url": "http://example.com/identity", "region": "RegionOne", "interface": "admin", "id": "81737f23cd8f45169fcd700cb658c8ad" }, { "region_id": "RegionOne", "url": "http://example.com/identity", "region": "RegionOne", "interface": "public", "id": "a7b9155184ed4607853304408e7e8d32" } ], "type": "identity", "id": "408af8b8554248fc8d686bef54ae3bf6", "name": "keystone" } ], "expires_at": "2018-01-15T22:14:05.000000Z", "is_domain": false, "issued_at": "2018-01-15T21:14:05.000000Z", "methods": [ "application_credential" ], "project": { "domain": { "id": "default", "name": "Default" }, "id": "231c62fb0fbd485b995e8b060c3f0d98", "name": "demo" }, "roles": [ { "id": "df8b7e3bf6fb49e9ba19122da2bae916", "name": "Member" } ], "user": { "password_expires_at": null, "domain": { "id": "default", "name": "Default" }, "id": "fd786d56402c4d1691372e7dee0d00b5", "name": "demo" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-explicit-unscoped-request.json0000664000175000017500000000052700000000000032215 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "ee4dfb6e5540447cb3741905149d9b6e", "password": "devstacker" } } }, "scope": "unscoped" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-explicit-unscoped-response.json0000664000175000017500000000100100000000000032347 0ustar00zuulzuul00000000000000{ "token": { "methods": [ "password" ], "expires_at": "2015-11-09T01:42:57.527363Z", "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": null }, "audit_ids": [ "lC2Wj1jbQe-dLjLyOx4qPQ" ], "issued_at": "2015-11-09T00:42:57.527404Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-project-scoped-response.json0000664000175000017500000004036600000000000031652 0ustar00zuulzuul00000000000000{ "token": { "methods": [ "password" ], "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "expires_at": "2015-11-07T02:58:43.578887Z", "project": { "domain": { "id": "default", "name": "Default" }, "id": "a6944d763bf64ee6a275f1263fae0352", "name": "admin" }, "is_domain": false, "catalog": [ { "endpoints": [ { "region_id": "RegionOne", "url": "http://example.com/identity/v2.0", "region": "RegionOne", "interface": "public", "id": "068d1b359ee84b438266cb736d81de97" }, { "region_id": "RegionOne", "url": "http://example.com/identity_v2_admin/v2.0", "region": "RegionOne", "interface": "admin", "id": "8bfc846841ab441ca38471be6d164ced" }, { "region_id": "RegionOne", "url": "http://example.com/identity/v2.0", "region": "RegionOne", "interface": "internal", "id": "beb6d358c3654b4bada04d4663b640b9" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "admin", "id": "ae36c0dbb0634e1dbf711f9fc2359975" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "internal", "id": "d286b51530144d90a4de52d214d3ad1e" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "public", "id": "d6e681dd4aab4ae5a0937ed60bb4ae33" } ], "type": "compute_legacy", "id": "1c4bfbabe3b346b1bbe27a4b3258964f", "name": "nova_legacy" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v2/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "internal", "id": "2dce7429526e44808235fe918063a914" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v2/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "public", "id": "a9a9929e6dc645c882ac1abd8bf73d38" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v2/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "admin", "id": "c7d5f958df7f4c8da84db91094bdc198" } ], "type": "volumev2", "id": "202382a1b8a94210bb3120af958092c4", "name": "cinderv2" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8080", "region": "RegionOne", "interface": "admin", "id": "29b58f1406804c8180ccc01793ff8038" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8080/v1/AUTH_a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "public", "id": "4c2c2968008c4e77973a5922e192d982" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8080/v1/AUTH_a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "internal", "id": "f6e7b28008bf41eaa114176a15ac1410" } ], "type": "object-store", "id": "52fecdef9ad543779c1312392cc2b115", "name": "swift" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:9696/", "region": "RegionOne", "interface": "public", "id": "6a2840dc63bf433592cd8bca2183eb3c" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9696/", "region": "RegionOne", "interface": "internal", "id": "7967cf45f8ab439a80cf24420e5ffd0e" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9696/", "region": "RegionOne", "interface": "admin", "id": "84943ce595264303bd44e5d6d79bea7b" } ], "type": "network", "id": "67b993549db94296a853d635b48db3c9", "name": "neutron" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8888", "region": "RegionOne", "interface": "public", "id": "2896609ef89741148bbd8c93babf5a12" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8888", "region": "RegionOne", "interface": "internal", "id": "30de385478fe4325849f98d1e45bc5e6" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8888", "region": "RegionOne", "interface": "admin", "id": "41256dc4b3c942daa383f940a9a56536" } ], "type": "messaging", "id": "6fc9cc3e6b3843b899478554f9e297d3", "name": "zaqar" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:9000", "region": "RegionOne", "interface": "admin", "id": "07ea5fe3ae784001a73f131fb1764bf4" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9000", "region": "RegionOne", "interface": "internal", "id": "31e709ecb15d4881806dbced4eb3e60e" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9000", "region": "RegionOne", "interface": "public", "id": "a0c2a150a6ae4bbc85f1d428b9d78a1b" } ], "type": "messaging-websocket", "id": "816031f798cc4ac7879eda0cf9cf033a", "name": "zaqar-websocket" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8773/", "region": "RegionOne", "interface": "internal", "id": "24df0277c2b6499ea6051bea8c59ff74" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8773/", "region": "RegionOne", "interface": "public", "id": "438f4b3f3c314bbf988f1442cc3ddfa5" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8773/", "region": "RegionOne", "interface": "admin", "id": "90a1c9fab54c452fa02a59ff87165029" } ], "type": "ec2", "id": "915e2a8b1f314d55bba28432c9d5c1de", "name": "ec2" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2.1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "admin", "id": "2511589f262a407bb0071a814a480af4" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2.1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "internal", "id": "9cf9209ae4fc4673a7295611001cf0ae" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2.1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "public", "id": "d200b2509e1343e3887dcc465b4fa534" } ], "type": "compute", "id": "a226b3eeb5594f50bf8b6df94636ed28", "name": "nova" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8004/v1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "internal", "id": "bf2fe80c2a614e438d3e55b00e85b9ff" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8004/v1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "admin", "id": "bfc9615fc24e4045aaf719f060984bf1" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8004/v1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "public", "id": "c76cf9930b0f4ccea6b1157f80119cfc" } ], "type": "orchestration", "id": "a5f7070bda40443fa3819fbdf1689af1", "name": "heat" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "internal", "id": "3e321c2c6fa04152b3e86c18b91b93ae" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "admin", "id": "55aef0f2557449d4946dc9461b73a63b" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v1/a6944d763bf64ee6a275f1263fae0352", "region": "RegionOne", "interface": "public", "id": "7c91a66a200e458ca6e4e00fddf4d98b" } ], "type": "volume", "id": "b6b5edc3fc384b6787149e91b3b31988", "name": "cinder" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:9292", "region": "RegionOne", "interface": "public", "id": "512c10d230874ad295662157eeab0135" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9292", "region": "RegionOne", "interface": "internal", "id": "614b1ab241da47a8b3a4e8f67b771446" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9292", "region": "RegionOne", "interface": "admin", "id": "9cef78a4286c42f3b977fbe4d5f927a6" } ], "type": "image", "id": "d512f8860c0f45cf99b1c3cef86cfd97", "name": "glance" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8000/v1", "region": "RegionOne", "interface": "internal", "id": "7f776d5a83d346b48e519555362b1da6" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8000/v1", "region": "RegionOne", "interface": "public", "id": "8303a7225a2d439fa39905c6a20202c3" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8000/v1", "region": "RegionOne", "interface": "admin", "id": "942fa998d1c644e0b0c085d5a0995a13" } ], "type": "cloudformation", "id": "ed0805af6ee54a19ad7e5add8465ac41", "name": "heat-cfn" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" }, "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw" ], "issued_at": "2015-11-07T01:58:43.578929Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-system-scoped-request-with-domain.json0000664000175000017500000000074200000000000033572 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "name": "admin", "domain": { "name": "Default" }, "password": "devstacker" } } }, "scope": { "system": { "all": true } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-unscoped-request-with-domain.json0000664000175000017500000000060200000000000032606 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "name": "admin", "domain": { "name": "Default" }, "password": "devstacker" } } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-unscoped-request.json0000664000175000017500000000047200000000000030375 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "423f19a4ac1e4f48bbb4180756e6eb6c", "password": "devstacker" } } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-unscoped-response.json0000664000175000017500000000100100000000000030530 0ustar00zuulzuul00000000000000{ "token": { "methods": [ "password" ], "expires_at": "2015-11-06T15:32:17.893769Z", "user": { "domain": { "id": "default", "name": "Default" }, "id": "423f19a4ac1e4f48bbb4180756e6eb6c", "name": "admin", "password_expires_at": null }, "audit_ids": [ "ZzZwkUflQfygX7pdYDBCQQ" ], "issued_at": "2015-11-06T14:32:17.893797Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-password-user-name-unscoped-response-HTTP.txt0000664000175000017500000000042000000000000033051 0ustar00zuulzuul00000000000000HTTP/1.1 201 Created Date: Fri, 06 Nov 2015 14:29:56 GMT Server: Apache/2.4.7 (Ubuntu) X-Subject-Token: ffe47524401e4d61adc7310f7e5b6191 Vary: X-Auth-Token x-openstack-request-id: req-f2f3f6ca-e342-4cd8-bc12-71a5436ef5fc Content-Length: 297 Content-Type: application/json ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-token-explicit-unscoped-request.json0000664000175000017500000000033600000000000031471 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } }, "scope": "unscoped" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-token-scoped-request.json0000664000175000017500000000047700000000000027315 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } }, "scope": { "project": { "id": "5b50efd009b540559104ee3c03bbb2b7" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-token-scoped-response.json0000664000175000017500000004036300000000000027461 0ustar00zuulzuul00000000000000{ "token": { "methods": [ "token" ], "roles": [ { "id": "5090055d6bd547dc83e0e8f070803708", "name": "admin" } ], "expires_at": "2015-11-05T22:00:11.000000Z", "project": { "domain": { "id": "default", "name": "Default" }, "id": "5b50efd009b540559104ee3c03bbb2b7", "name": "admin" }, "is_domain": false, "catalog": [ { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:9292", "region": "RegionOne", "interface": "admin", "id": "b2605da9b25943beb49b2bd86aca2202" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9292", "region": "RegionOne", "interface": "public", "id": "c4d1184caf8c4351bff4bf502a09684e" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9292", "region": "RegionOne", "interface": "internal", "id": "cd73bda89e3948738c2721a8c3acac54" } ], "type": "image", "id": "495df2483dc145dbb6b34bfbdd787aae", "name": "glance" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8773/", "region": "RegionOne", "interface": "internal", "id": "7d03218a7f4246e8b9e3992318bf5397" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8773/", "region": "RegionOne", "interface": "public", "id": "9ad7f8ce438c4212b8aac930bca04c86" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8773/", "region": "RegionOne", "interface": "admin", "id": "d84aad1a45c44e4da09b719167383049" } ], "type": "ec2", "id": "54204024bb7d4665a8efc34fc758f1f7", "name": "ec2" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:9000", "region": "RegionOne", "interface": "admin", "id": "1077687c18514490a3ec980eadd1bd13" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9000", "region": "RegionOne", "interface": "public", "id": "1e86d8bef1514c3fba8d157a22ccce88" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9000", "region": "RegionOne", "interface": "internal", "id": "f6a6b7bbba66443ead3a0e31a008c271" } ], "type": "messaging-websocket", "id": "6b8655af7d044a15bec3cdca4f2919f8", "name": "zaqar-websocket" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8004/v1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "admin", "id": "083663fd231e40ad97384ad3efb9f1b7" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8004/v1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "internal", "id": "0f4b7054ea27450eac43f685a4fc1d2c" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8004/v1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "public", "id": "5f3ea39df2e44378b1802a1a87ef9ac4" } ], "type": "orchestration", "id": "6d6346ff2ca842e5968373fbb93e231f", "name": "heat" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2.1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "public", "id": "bc2230a70d6a444e9fba75b85fbda41b" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2.1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "internal", "id": "d8102dc2b9984d04b30b91b0a6037470" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2.1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "admin", "id": "f8253a53edd749bf8b107a53a5d47a82" } ], "type": "compute", "id": "75df965385cc4120a17110c1fde00182", "name": "nova" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://example.com/identity_v2_admin/v2.0", "region": "RegionOne", "interface": "admin", "id": "0ceeb58592274caea5bc942a07d5473f" }, { "region_id": "RegionOne", "url": "http://example.com/identity/v2.0", "region": "RegionOne", "interface": "internal", "id": "8126f2c7021d413e9c98ec3a0ba0fd58" }, { "region_id": "RegionOne", "url": "http://example.com/identity/v2.0", "region": "RegionOne", "interface": "public", "id": "c693879254544e3fb502e795a3f6acc8" } ], "type": "identity", "id": "78aad571d38049e69c866c2abac76af6", "name": "keystone" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "admin", "id": "3654138dc64a45aeb5a8153f2a089c74" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "internal", "id": "7a0d12d0b7314afd9b53d1618ab546ea" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v1/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "public", "id": "82b68ff3aedb43e2acc8307234d3fd0b" } ], "type": "volume", "id": "80491007c0ab462daaa9087250325f59", "name": "cinder" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8000/v1", "region": "RegionOne", "interface": "internal", "id": "24dfa252fba64469b8b1a832f04bded9" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8000/v1", "region": "RegionOne", "interface": "public", "id": "e0a01d6cd3be4f6abcc72367b2d87993" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8000/v1", "region": "RegionOne", "interface": "admin", "id": "f33f79d42df247e1bf6daf43a548b014" } ], "type": "cloudformation", "id": "ac5cc6e3c62840818ab338c981d5603f", "name": "heat-cfn" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:9696/", "region": "RegionOne", "interface": "admin", "id": "3e78c357b3c8469fbea12eb681f88a0c" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9696/", "region": "RegionOne", "interface": "public", "id": "89d2aad3dc8e478fbabb21dd7db0962a" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:9696/", "region": "RegionOne", "interface": "internal", "id": "b6d4a8cf5e4042848a749a3116497e55" } ], "type": "network", "id": "b33660edd1eb45e485f7e5f14401a739", "name": "neutron" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8888", "region": "RegionOne", "interface": "public", "id": "1f8287cf963948778ab0eb109d9f857d" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8888", "region": "RegionOne", "interface": "internal", "id": "3adf5f9cc5184d92af5ff0fdef043e4a" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8888", "region": "RegionOne", "interface": "admin", "id": "f747223060b3414f947fdcdca2ce8714" } ], "type": "messaging", "id": "cf3e38e9aed54e2d84ea64485317d7a0", "name": "zaqar" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "public", "id": "08f507ccb552476b98f3af7718f25557" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "admin", "id": "d20091ba591347b2b419e5fbde9b7976" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8774/v2/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "internal", "id": "e6b667776e7245dea6e39f2820e080b0" } ], "type": "compute_legacy", "id": "d442e96b273a48018567aeec5800c3e0", "name": "nova_legacy" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v2/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "internal", "id": "012c78a6694a494995c58d5955fb7822" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v2/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "admin", "id": "802d5de210874f068ba31c7e27c29d70" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8776/v2/5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "public", "id": "b37ada66e02e44c9a9a7976d77365503" } ], "type": "volumev2", "id": "d93e78c7967f49acbdd732b9dd97e0d0", "name": "cinderv2" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://23.253.248.171:8080/v1/AUTH_5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "public", "id": "265ce88a0e1642fc90b2ec20ccb279ff" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8080", "region": "RegionOne", "interface": "admin", "id": "500b7f066d39492faff8a3f710fb5a2f" }, { "region_id": "RegionOne", "url": "http://23.253.248.171:8080/v1/AUTH_5b50efd009b540559104ee3c03bbb2b7", "region": "RegionOne", "interface": "internal", "id": "a33b0684f817405280df1f5600777a75" } ], "type": "object-store", "id": "da1b1b5c529946fcb3ee3abdcf376fcb", "name": "swift" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "10a2e6e717a245d9acad3e5f97aeca3d", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" }, "audit_ids": [ "wLc7nDMsQiKqf8VFU4ySpg" ], "issued_at": "2015-11-05T21:32:30.505384Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-token-unscoped-request.json0000664000175000017500000000030100000000000027642 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/auth-token-unscoped-response.json0000664000175000017500000000077600000000000030030 0ustar00zuulzuul00000000000000{ "token": { "methods": [ "token" ], "expires_at": "2015-11-05T22:00:11.000000Z", "user": { "domain": { "id": "default", "name": "Default" }, "id": "10a2e6e717a245d9acad3e5f97aeca3d", "name": "admin", "password_expires_at": null }, "audit_ids": [ "mAjXQhiYRyKwkB4qygdLVg" ], "issued_at": "2015-11-05T21:00:33.819948Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/create-role-inferences-response.json0000664000175000017500000000130100000000000030435 0ustar00zuulzuul00000000000000{ "role_inference": { "prior_role": { "id": "7ceab6192ea34a548cc71b24f72e762c", "links": { "self": "http://example.com/identity/v3/roles/7ceab6192ea34a548cc71b24f72e762c" }, "name": "prior role name" }, "implies": { "id": "97e2f5d38bc94842bc3da818c16762ed", "links": { "self": "http://example.com/identity/v3/roles/97e2f5d38bc94842bc3da818c16762ed" }, "name": "implied role name" } }, "links": { "self": "http://example.com/identity/v3/roles/7ceab6192ea34a548cc71b24f72e762c/implies/97e2f5d38bc94842bc3da818c16762ed" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/credential-create-request.json0000664000175000017500000000035100000000000027325 0ustar00zuulzuul00000000000000{ "credential": { "blob": "{\"access\":\"181920\",\"secret\":\"secretKey\"}", "project_id": "731fc6f265cd486d900f16e84c5cb594", "type": "ec2", "user_id": "bb5476fd12884539b41d5a88f838d773" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/credential-create-response.json0000664000175000017500000000073300000000000027477 0ustar00zuulzuul00000000000000{ "credential": { "user_id": "bb5476fd12884539b41d5a88f838d773", "links": { "self": "http://example.com/identity/v3/credentials/3d3367228f9c7665266604462ec60029bcd83ad89614021a80b2eb879c572510" }, "blob": "{\"access\":\"181920\",\"secret\":\"secretKey\"}", "project_id": "731fc6f265cd486d900f16e84c5cb594", "type": "ec2", "id": "3d3367228f9c7665266604462ec60029bcd83ad89614021a80b2eb879c572510" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/credential-show-response.json0000664000175000017500000000104300000000000027207 0ustar00zuulzuul00000000000000{ "credential": { "user_id": "bb5476fd12884539b41d5a88f838d773", "links": { "self": "http://example.com/identity/v3/credentials/207e9b76935efc03804d3dd6ab52d22e9b22a0711e4ada4ff8b76165a07311d7" }, "blob": "{\"access\": \"a42a27755ce6442596b049bd7dd8a563\", \"secret\": \"71faf1d40bb24c82b479b1c6fbbd9f0c\", \"trust_id\": null}", "project_id": "6e01855f345f4c59812999b5e459137d", "type": "ec2", "id": "207e9b76935efc03804d3dd6ab52d22e9b22a0711e4ada4ff8b76165a07311d7" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/credential-update-request.json0000664000175000017500000000035100000000000027344 0ustar00zuulzuul00000000000000{ "credential": { "blob": "{\"access\":\"181920\",\"secret\":\"secretKey\"}", "project_id": "731fc6f265cd486d900f16e84c5cb594", "type": "ec2", "user_id": "bb5476fd12884539b41d5a88f838d773" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/credential-update-response.json0000664000175000017500000000073300000000000027516 0ustar00zuulzuul00000000000000{ "credential": { "user_id": "bb5476fd12884539b41d5a88f838d773", "links": { "self": "http://example.com/identity/v3/credentials/207e9b76935efc03804d3dd6ab52d22e9b22a0711e4ada4ff8b76165a07311d7" }, "blob": "{\"access\":\"181920\",\"secret\":\"secretKey\"}", "project_id": "731fc6f265cd486d900f16e84c5cb594", "type": "ec2", "id": "207e9b76935efc03804d3dd6ab52d22e9b22a0711e4ada4ff8b76165a07311d7" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/credentials-list-response.json0000664000175000017500000001323500000000000027373 0ustar00zuulzuul00000000000000{ "credentials": [ { "user_id": "bb5476fd12884539b41d5a88f838d773", "links": { "self": "http://example.com/identity/v3/credentials/207e9b76935efc03804d3dd6ab52d22e9b22a0711e4ada4ff8b76165a07311d7" }, "blob": "{\"access\": \"a42a27755ce6442596b049bd7dd8a563\", \"secret\": \"71faf1d40bb24c82b479b1c6fbbd9f0c\", \"trust_id\": null}", "project_id": "6e01855f345f4c59812999b5e459137d", "type": "ec2", "id": "207e9b76935efc03804d3dd6ab52d22e9b22a0711e4ada4ff8b76165a07311d7" }, { "user_id": "6f556708d04b4ea6bc72d7df2296b71a", "links": { "self": "http://example.com/identity/v3/credentials/2441494e52ab6d594a34d74586075cb299489bdd1e9389e3ab06467a4f460609" }, "blob": "{\"access\": \"7da79ff0aa364e1396f067e352b9b79a\", \"secret\": \"7a18d68ba8834b799d396f3ff6f1e98c\", \"trust_id\": null}", "project_id": "1a1d14690f3c4ec5bf5f321c5fde3c16", "type": "ec2", "id": "2441494e52ab6d594a34d74586075cb299489bdd1e9389e3ab06467a4f460609" }, { "user_id": "c14107e65d5c4a7f8894fc4b3fc209ff", "links": { "self": "http://example.com/identity/v3/credentials/3397b204b5f04c495bcdc8f34c8a39996f280f9172658241873e15f070ec79d7" }, "blob": "{\"access\": \"db9c58a558534a10a070110de4f9f20c\", \"secret\": \"973e790b88db447ba6f93bca02bc745b\", \"trust_id\": null}", "project_id": "7396e43183db40dcbf40dd727637b548", "type": "ec2", "id": "3397b204b5f04c495bcdc8f34c8a39996f280f9172658241873e15f070ec79d7" }, { "user_id": "915cc5f8cca6466aba6c6be06cbabfdf", "links": { "self": "http://example.com/identity/v3/credentials/352d5dd7a4aa19c4f2f23ee288bf65dc23a0bc293f40ffd2128ffe6a8cf3e871" }, "blob": "{\"access\": \"817c6c3487a440c1a0b1d3f92b30ca37\", \"secret\": \"47d681117d1c46e69a0c9ec811dae2e9\", \"trust_id\": null}", "project_id": "2bf9767f9db949ee8364262a28a23062", "type": "ec2", "id": "352d5dd7a4aa19c4f2f23ee288bf65dc23a0bc293f40ffd2128ffe6a8cf3e871" }, { "user_id": "bb5476fd12884539b41d5a88f838d773", "links": { "self": "http://example.com/identity/v3/credentials/3d3367228f9c7665266604462ec60029bcd83ad89614021a80b2eb879c572510" }, "blob": "{\"access\":\"181920\",\"secret\":\"secretKey\"}", "project_id": "731fc6f265cd486d900f16e84c5cb594", "type": "ec2", "id": "3d3367228f9c7665266604462ec60029bcd83ad89614021a80b2eb879c572510" }, { "user_id": "bb5476fd12884539b41d5a88f838d773", "links": { "self": "http://example.com/identity/v3/credentials/6b7d803fc03b85866904b6b79e0a8fa1f4013b584163b4477eed96717eb402c0" }, "blob": "{\"access\": \"f2ba45670b504a518b46e920d760fde2\", \"secret\": \"bf7fff2b3a844730b2db793411756e55\", \"trust_id\": null}", "project_id": "731fc6f265cd486d900f16e84c5cb594", "type": "ec2", "id": "6b7d803fc03b85866904b6b79e0a8fa1f4013b584163b4477eed96717eb402c0" }, { "user_id": "2b657f6742ac416697e6821b3b2ee785", "links": { "self": "http://example.com/identity/v3/credentials/7d391b869631e5c4836708ea3bb3e0a5cbe0481201b5f0ddd5685ad3b3faa564" }, "blob": "{\"access\": \"a1525da4e7c0438ebf3058372d637b59\", \"secret\": \"c9165d2542b141e8b2a1ff61a5f5487c\", \"trust_id\": null}", "project_id": "2bf9767f9db949ee8364262a28a23062", "type": "ec2", "id": "7d391b869631e5c4836708ea3bb3e0a5cbe0481201b5f0ddd5685ad3b3faa564" }, { "user_id": "bb5476fd12884539b41d5a88f838d773", "links": { "self": "http://example.com/identity/v3/credentials/7ef4faa904ae7b8b4ddc7bad15b05ee359dad7d7a9b82861d4ad92fdbbb2eb4e" }, "blob": "{\"access\": \"7d7559359b57419eb5f5f5dcd65ab57d\", \"secret\": \"570652bcf8c2483c86eb29e9734eed3c\", \"trust_id\": null}", "project_id": "731fc6f265cd486d900f16e84c5cb594", "type": "ec2", "id": "7ef4faa904ae7b8b4ddc7bad15b05ee359dad7d7a9b82861d4ad92fdbbb2eb4e" }, { "user_id": "aedb193e9bb8400485f8d8426f7a031f", "links": { "self": "http://example.com/identity/v3/credentials/9c1c428d8e0e8338a5e16489ecfff9962f2b00f984ce4c7e9015e4003f478df8" }, "blob": "{\"access\": \"b3a6e5f4427c47e9b202264d91a19e49\", \"secret\": \"d9eb470f503f4b46932de38db7a79402\", \"trust_id\": null}", "project_id": "a2672ecf9dd34c6980448b25a47e0947", "type": "ec2", "id": "9c1c428d8e0e8338a5e16489ecfff9962f2b00f984ce4c7e9015e4003f478df8" }, { "user_id": "c14107e65d5c4a7f8894fc4b3fc209ff", "links": { "self": "http://example.com/identity/v3/credentials/e2c35ac2becb0fca3c3c2f035692a4f46a9cbf3b6e86c8a47f5aafe837d78a05" }, "blob": "{\"access\": \"1ed843b1bd4a409f9562400085adbaa4\", \"secret\": \"236ab24db1f04ec995fcf618ed4fc0f5\", \"trust_id\": null}", "project_id": "6e01855f345f4c59812999b5e459137d", "type": "ec2", "id": "e2c35ac2becb0fca3c3c2f035692a4f46a9cbf3b6e86c8a47f5aafe837d78a05" } ], "links": { "self": "http://example.com/identity/v3/credentials", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-create-request.json0000664000175000017500000000033600000000000027730 0ustar00zuulzuul00000000000000{ "config": { "identity": { "driver": "ldap" }, "ldap": { "url": "ldap://myldap.com:389/", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-create-response.json0000664000175000017500000000033600000000000030076 0ustar00zuulzuul00000000000000{ "config": { "identity": { "driver": "ldap" }, "ldap": { "url": "ldap://myldap.com:389/", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-default-response.json0000664000175000017500000000035600000000000030261 0ustar00zuulzuul00000000000000{ "config": { "identity": { "driver": "ldap" }, "ldap": { "url": "ldap://localhost", "user": "", "suffix": "cn=example,cn=com", .... } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-default-response.json0000664000175000017500000000020200000000000031401 0ustar00zuulzuul00000000000000{ "ldap": { "url": "ldap://localhost", "user": "", "suffix": "cn=example,cn=com". .... } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-option-default-response.json0000664000175000017500000000003100000000000032707 0ustar00zuulzuul00000000000000{ "driver": "ldap" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-option-show-response.json0000664000175000017500000000004400000000000032247 0ustar00zuulzuul00000000000000{ "url": "http://myldap/root" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-option-update-request.json0000664000175000017500000000005500000000000032405 0ustar00zuulzuul00000000000000{ "url": "http://myldap/my_other_root" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-option-update-response.json0000664000175000017500000000034300000000000032553 0ustar00zuulzuul00000000000000{ "config": { "identity": { "driver": "ldap" }, "ldap": { "url": "http://myldap/my_other_root", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-show-response.json0000664000175000017500000000015700000000000030746 0ustar00zuulzuul00000000000000{ "ldap": { "url": "http://myldap/root", "user_tree_dn": "ou=Users,dc=root,dc=org" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-update-request.json0000664000175000017500000000024300000000000031076 0ustar00zuulzuul00000000000000{ "config": { "ldap": { "url": "http://myldap/my_new_root", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-group-update-response.json0000664000175000017500000000034100000000000031243 0ustar00zuulzuul00000000000000{ "config": { "identity": { "driver": "ldap" }, "ldap": { "url": "http://myldap/my_new_root", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-show-response.json0000664000175000017500000000032300000000000027607 0ustar00zuulzuul00000000000000{ "config": { "identity": { "driver": "ldap" }, "ldap": { "url": "http://myldap/root", "user_tree_dn": "ou=Users,dc=root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-update-request.json0000664000175000017500000000024300000000000027744 0ustar00zuulzuul00000000000000{ "config": { "ldap": { "url": "http://myldap/my_new_root", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-config-update-response.json0000664000175000017500000000034100000000000030111 0ustar00zuulzuul00000000000000{ "config": { "identity": { "driver": "ldap" }, "ldap": { "url": "http://myldap/my_new_root", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-create-request.json0000664000175000017500000000017300000000000026464 0ustar00zuulzuul00000000000000{ "domain": { "description": "Domain description", "enabled": true, "name": "myDomain" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-create-response.json0000664000175000017500000000041400000000000026630 0ustar00zuulzuul00000000000000{ "domain": { "description": "Domain description", "enabled": true, "id": "161718", "links": { "self": "http://example.com/identity/v3/domains/161718" }, "name": "myDomain", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-group-roles-list-response.json0000664000175000017500000000104600000000000030616 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "123456", "links": { "self": "http://example.com/identity/v3/roles/123456" }, "name": "admin" }, { "id": "123457", "links": { "self": "http://example.com/identity/v3/roles/123457" }, "name": "manager" } ], "links": { "self": "http://example.com/identity/v3/domains/161718/groups/101112/roles", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-group-update-request.json0000664000175000017500000000024300000000000027633 0ustar00zuulzuul00000000000000{ "config": { "ldap": { "url": "http://myldap/my_new_root", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-show-response.json0000664000175000017500000000047700000000000026356 0ustar00zuulzuul00000000000000{ "domain": { "description": "Owns users and tenants (i.e. projects) available on Identity API v2.", "enabled": true, "id": "default", "links": { "self": "http://example.com/identity/v3/domains/default" }, "name": "Default", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-specific-role-create-request.json0000664000175000017500000000022200000000000031201 0ustar00zuulzuul00000000000000{ "role": { "description": "My new role" "domain_id": "92e782c4988642d783a95f4a87c3fdd7", "name": "developer" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-update-request.json0000664000175000017500000000013700000000000026503 0ustar00zuulzuul00000000000000{ "domain": { "description": "Owns users and projects on Identity API v2." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-update-response.json0000664000175000017500000000044600000000000026654 0ustar00zuulzuul00000000000000{ "domain": { "links": { "self": "http://example.com/identity/v3/domains/default" }, "enabled": true, "description": "Owns users and projects on Identity API v2.", "name": "Default", "id": "default", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domain-user-roles-list-response.json0000664000175000017500000000104500000000000030437 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "123456", "links": { "self": "http://example.com/identity/v3/roles/123456" }, "name": "admin" }, { "id": "123457", "links": { "self": "http://example.com/identity/v3/roles/123457" }, "name": "manager" } ], "links": { "self": "http://example.com/identity/v3/domains/161718/users/313233/roles", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/domains-list-response.json0000664000175000017500000000145100000000000026525 0ustar00zuulzuul00000000000000{ "domains": [ { "description": "Used for swift functional testing", "enabled": true, "id": "5a75994a383c449184053ff7270c4e91", "links": { "self": "http://example.com/identity/v3/domains/5a75994a383c449184053ff7270c4e91" }, "name": "swift_test" }, { "description": "Owns users and tenants (i.e. projects) available on Identity API v2.", "enabled": true, "id": "default", "links": { "self": "http://example.com/identity/v3/domains/default" }, "name": "Default" } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/domains" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/endpoint-create-request.json0000664000175000017500000000033000000000000027030 0ustar00zuulzuul00000000000000{ "endpoint": { "interface": "public", "region_id": "RegionOne", "url": "http://example.com/identity/v3/endpoints/828384", "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/endpoint-create-response.json0000664000175000017500000000070000000000000027177 0ustar00zuulzuul00000000000000{ "endpoint": { "region_id": "RegionOne", "links": { "self": "http://example.com/identity/v3/endpoints/e9b475a8742d4ff1a81b353c5a37e138" }, "url": "http://example.com/identity/v3/endpoints/828384", "region": "RegionOne", "enabled": true, "interface": "public", "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596", "id": "e9b475a8742d4ff1a81b353c5a37e138" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/endpoint-show-response.json0000664000175000017500000000065300000000000026723 0ustar00zuulzuul00000000000000{ "endpoint": { "enabled": true, "id": "01c3d5b92f7841ac83fb4b26173c12c7", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/01c3d5b92f7841ac83fb4b26173c12c7" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "3b2d6ad7e02c4cde8498a547601f1b8f", "url": "http://23.253.211.234:9696/" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/endpoint-update-request.json0000664000175000017500000000027200000000000027054 0ustar00zuulzuul00000000000000{ "endpoint": { "interface": "public", "region_id": "north", "url": "http://example.com/identity/v3/endpoints/828384", "service_id": "345678" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/endpoint-update-response.json0000664000175000017500000000047000000000000027222 0ustar00zuulzuul00000000000000{ "endpoint": { "id": "828384", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/828384" }, "region_id": "north", "service_id": "686766", "url": "http://example.com/identity/v3/endpoints/828384" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/endpoints-list-response.json0000664000175000017500000003111100000000000027072 0ustar00zuulzuul00000000000000{ "endpoints": [ { "enabled": true, "id": "0649c5be323f4792afbc1efdd480847d", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/0649c5be323f4792afbc1efdd480847d" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "ef6b15e425814dc69d830361baae0e33", "url": "http://23.253.211.234:8080/v1/AUTH_$(tenant_id)s" }, { "enabled": true, "id": "06b85ed2aa57413ca0b1813daed329a9", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/06b85ed2aa57413ca0b1813daed329a9" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "98cfd5347fb84601b2f88f3afd8dddd4", "url": "http://23.253.211.234:8776/v1/$(tenant_id)s" }, { "enabled": true, "id": "070102f162e04f91a52c7887d0604163", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/070102f162e04f91a52c7887d0604163" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "312f401c14d143d8b3e3f4daf0418add", "url": "http://23.253.211.234:8774/v2.1/$(tenant_id)s" }, { "enabled": true, "id": "0fd73b621e424cc0a172853264519cbc", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/0fd73b621e424cc0a172853264519cbc" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "17a877162c8e405b81d563d95ec4e3f8", "url": "http://23.253.211.234:8776/v2/$(tenant_id)s" }, { "enabled": true, "id": "1899667a3b1544ccb355fdfc4184d7d7", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/1899667a3b1544ccb355fdfc4184d7d7" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "9b67aed49e0d4c2fb46ca9476a3b9243", "url": "http://23.253.211.234:9292" }, { "enabled": true, "id": "3b3611ea2e554ee7b85e7f2213b02c33", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/3b3611ea2e554ee7b85e7f2213b02c33" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "2a662f90700b4478929d4b24cc6a320b", "url": "http://23.253.211.234:9696/" }, { "enabled": true, "id": "3ea2b420306f48c6bf0cf51c2fefea03", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/3ea2b420306f48c6bf0cf51c2fefea03" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "736fb9bb21ef498287db9abcc55b20d9", "url": "http://23.253.211.234:8774/v2/$(tenant_id)s" }, { "enabled": true, "id": "41b122182f574a44b0e246aff6ca29c5", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/41b122182f574a44b0e246aff6ca29c5" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "9b67aed49e0d4c2fb46ca9476a3b9243", "url": "http://23.253.211.234:9292" }, { "enabled": true, "id": "44a736dd5eeb4347acec66b5f11c8f80", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/44a736dd5eeb4347acec66b5f11c8f80" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "2a662f90700b4478929d4b24cc6a320b", "url": "http://23.253.211.234:9696/" }, { "enabled": true, "id": "499e8f6718ef466ba3fb315fa8f9e0b8", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/499e8f6718ef466ba3fb315fa8f9e0b8" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "312f401c14d143d8b3e3f4daf0418add", "url": "http://23.253.211.234:8774/v2.1/$(tenant_id)s" }, { "enabled": true, "id": "545b1e9f126248428c5cdbec7420c353", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/545b1e9f126248428c5cdbec7420c353" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "736fb9bb21ef498287db9abcc55b20d9", "url": "http://23.253.211.234:8774/v2/$(tenant_id)s" }, { "enabled": true, "id": "629dc5a64e954ad09a45e87bc48299ba", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/629dc5a64e954ad09a45e87bc48299ba" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "2a662f90700b4478929d4b24cc6a320b", "url": "http://23.253.211.234:9696/" }, { "enabled": true, "id": "642a329a660544fdaab2420c0da7d49b", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/642a329a660544fdaab2420c0da7d49b" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "ef6b15e425814dc69d830361baae0e33", "url": "http://23.253.211.234:8080/v1/AUTH_$(tenant_id)s" }, { "enabled": true, "id": "72f8fc8536e44a19bc3388218efcc741", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/72f8fc8536e44a19bc3388218efcc741" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "9b67aed49e0d4c2fb46ca9476a3b9243", "url": "http://23.253.211.234:9292" }, { "enabled": true, "id": "74121e71962e4947ac622c41706f0ee7", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/74121e71962e4947ac622c41706f0ee7" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "17a877162c8e405b81d563d95ec4e3f8", "url": "http://23.253.211.234:8776/v2/$(tenant_id)s" }, { "enabled": true, "id": "7431a4f971dc4abb8d0e387434a06817", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/7431a4f971dc4abb8d0e387434a06817" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596", "url": "http://23.253.211.234:8773/" }, { "enabled": true, "id": "7cffc75a14ca4334b458e475750bd84f", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/7cffc75a14ca4334b458e475750bd84f" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "efeb249cbcd3412496bc4b194ea058da", "url": "http://example.com/identity/v2.0" }, { "enabled": true, "id": "a422a6fa163b4a6ba8309e067ce3750b", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/a422a6fa163b4a6ba8309e067ce3750b" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "312f401c14d143d8b3e3f4daf0418add", "url": "http://23.253.211.234:8774/v2.1/$(tenant_id)s" }, { "enabled": true, "id": "ac6a74efe9944afdb129d4df70cde0ec", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/ac6a74efe9944afdb129d4df70cde0ec" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596", "url": "http://23.253.211.234:8773/" }, { "enabled": true, "id": "adf43d7ff0d14d0fa1e8a5187f40e1af", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/adf43d7ff0d14d0fa1e8a5187f40e1af" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "efeb249cbcd3412496bc4b194ea058da", "url": "http://example.com/identity/v2.0" }, { "enabled": true, "id": "b18be64a118244d39217db72534f8b33", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/b18be64a118244d39217db72534f8b33" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "736fb9bb21ef498287db9abcc55b20d9", "url": "http://23.253.211.234:8774/v2/$(tenant_id)s" }, { "enabled": true, "id": "c828983c9c214d819674649aa693cdff", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/c828983c9c214d819674649aa693cdff" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "98cfd5347fb84601b2f88f3afd8dddd4", "url": "http://23.253.211.234:8776/v1/$(tenant_id)s" }, { "enabled": true, "id": "d062ebdb244f447498768fc0ced32e2d", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/d062ebdb244f447498768fc0ced32e2d" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "98cfd5347fb84601b2f88f3afd8dddd4", "url": "http://23.253.211.234:8776/v1/$(tenant_id)s" }, { "enabled": true, "id": "d281219ec0df4cf2b7c681463d5dcf51", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/d281219ec0df4cf2b7c681463d5dcf51" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "17a877162c8e405b81d563d95ec4e3f8", "url": "http://23.253.211.234:8776/v2/$(tenant_id)s" }, { "enabled": true, "id": "d8e0824a17404431b5d978a87ac1bede", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/d8e0824a17404431b5d978a87ac1bede" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "efeb249cbcd3412496bc4b194ea058da", "url": "http://example.com/identity_v2_admin/v2.0" }, { "enabled": true, "id": "d9b54bdc063046828ac3c6487bea8047", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/d9b54bdc063046828ac3c6487bea8047" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596", "url": "http://23.253.211.234:8773/" }, { "enabled": true, "id": "ea74f9771dec475eabfc2cdff5364413", "interface": "admin", "links": { "self": "http://example.com/identity/v3/endpoints/ea74f9771dec475eabfc2cdff5364413" }, "region": "RegionOne", "region_id": "RegionOne", "service_id": "ef6b15e425814dc69d830361baae0e33", "url": "http://23.253.211.234:8080" } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/endpoints" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/get-available-domain-scopes-response.json0000664000175000017500000000131700000000000031357 0ustar00zuulzuul00000000000000{ "domains": [ { "description": "my domain description", "enabled": true, "id": "1789d1", "links": { "self": "https://example.com/identity/v3/domains/1789d1" }, "name": "my domain" }, { "description": "description of my other domain", "enabled": true, "id": "43e8da", "links": { "self": "https://example.com/identity/v3/domains/43e8da" }, "name": "another domain" } ], "links": { "self": "https://example.com/identity/v3/auth/domains", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/get-available-project-scopes-response.json0000664000175000017500000000124600000000000031557 0ustar00zuulzuul00000000000000{ "projects": [ { "domain_id": "1789d1", "enabled": true, "id": "263fd9", "links": { "self": "https://example.com/identity/v3/projects/263fd9" }, "name": "Test Group" }, { "domain_id": "1789d1", "enabled": true, "id": "50ef01", "links": { "self": "https://example.com/identity/v3/projects/50ef01" }, "name": "Build Group" } ], "links": { "self": "https://example.com/identity/v3/auth/projects", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/get-available-system-scopes-response.json0000664000175000017500000000023200000000000031427 0ustar00zuulzuul00000000000000{ "system": [ { "all": true } ], "links": { "self": "https://example.com/identity/v3/auth/system" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/get-role-inferences-response.json0000664000175000017500000000130100000000000027751 0ustar00zuulzuul00000000000000{ "role_inference": { "prior_role": { "id": "7ceab6192ea34a548cc71b24f72e762c", "links": { "self": "http://example.com/identity/v3/roles/7ceab6192ea34a548cc71b24f72e762c" }, "name": "prior role name" }, "implies": { "id": "97e2f5d38bc94842bc3da818c16762ed", "links": { "self": "http://example.com/identity/v3/roles/97e2f5d38bc94842bc3da818c16762ed" }, "name": "implied role name" } }, "links": { "self": "http://example.com/identity/v3/roles/7ceab6192ea34a548cc71b24f72e762c/implies/97e2f5d38bc94842bc3da818c16762ed" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/get-service-catalog-response.json0000664000175000017500000000202200000000000027742 0ustar00zuulzuul00000000000000{ "catalog": [ { "endpoints": [ { "id": "39dc322ce86c4111b4f06c2eeae0841b", "interface": "public", "region": "RegionOne", "url": "http://localhost:5000" }, { "id": "ec642f27474842e78bf059f6c48f4e99", "interface": "internal", "region": "RegionOne", "url": "http://localhost:5000" }, { "id": "c609fc430175452290b62a4242e8a7e8", "interface": "admin", "region": "RegionOne", "url": "http://localhost:5000" } ], "id": "4363ae44bdf34a3981fde3b823cb9aa2", "type": "identity", "name": "keystone" } ], "links": { "self": "https://example.com/identity/v3/catalog", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/group-create-request.json0000664000175000017500000000021500000000000026346 0ustar00zuulzuul00000000000000{ "group": { "description": "Contract developers", "domain_id": "default", "name": "Contract developers" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/group-create-response.json0000664000175000017500000000047200000000000026521 0ustar00zuulzuul00000000000000{ "group": { "domain_id": "default", "description": "Contract developers", "id": "c0d675eac29945ad9dfd08aa1bb75751", "links": { "self": "http://example.com/identity/v3/groups/c0d675eac29945ad9dfd08aa1bb75751" }, "name": "Contract developers" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/group-roles-domain-list-response.json0000664000175000017500000000107500000000000030620 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "91011", "links": { "self": "http://example.com/identity/v3/roles/91011" }, "name": "admin" }, { "id": "91011", "links": { "self": "http://example.com/identity/v3/roles/91011" }, "name": "admin" } ], "links": { "self": "http://example.com/identity/v3/OS-INHERIT/domains/1234/groups/5678/roles/inherited_to_projects", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/group-show-response.json0000664000175000017500000000047200000000000026236 0ustar00zuulzuul00000000000000{ "group": { "description": "Contract developers", "domain_id": "default", "id": "c0d675eac29945ad9dfd08aa1bb75751", "links": { "self": "http://example.com/identity/v3/groups/c0d675eac29945ad9dfd08aa1bb75751" }, "name": "Contract developers" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/group-update-request.json0000664000175000017500000000016700000000000026373 0ustar00zuulzuul00000000000000{ "group": { "description": "Contract developers 2016", "name": "Contract developers 2016" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/group-update-response.json0000664000175000017500000000050400000000000026534 0ustar00zuulzuul00000000000000{ "group": { "description": "Contract developers 2016", "domain_id": "default", "id": "c0d675eac29945ad9dfd08aa1bb75751", "links": { "self": "http://example.com/identity/v3/groups/c0d675eac29945ad9dfd08aa1bb75751" }, "name": "Contract developers 2016" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/group-users-list-response.json0000664000175000017500000000163200000000000027367 0ustar00zuulzuul00000000000000{ "links": { "self": "http://example.com/identity/v3/groups/9ce0ad4e58a84d7a97b92f7955d10c92/users", "previous": null, "next": null }, "users": [ { "domain_id": "default", "description": null, "enabled": true, "id": "acd565a08293c1e48bc0dd0d72ad5d5d" "name": "Henry", "links": { "self": "http://example.com/identity/v3/users/acd565a08293c1e48bc0dd0d72ad5d5d" } }, { "domain_id": "default", "description": null, "enabled": true, "id": "fff603a0829d41e48bc0dd0d72ad61ce", "name": "Paul", "links": { "self": "http://example.com/identity/v3/users/fff603a0829d41e48bc0dd0d72ad61ce" }, "password_expires_at": "2016-11-06T15:32:17.000000" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/groups-list-response.json0000664000175000017500000000144200000000000026412 0ustar00zuulzuul00000000000000{ "links": { "self": "http://example.com/identity/v3/groups", "previous": null, "next": null }, "groups": [ { "description": "non-admin group", "domain_id": "default", "id": "96372bbb152f475aa37e9a76a25a029c", "links": { "self": "http://example.com/identity/v3/groups/96372bbb152f475aa37e9a76a25a029c" }, "name": "nonadmins" }, { "description": "openstack admin group", "domain_id": "default", "id": "9ce0ad4e58a84d7a97b92f7955d10c92", "links": { "self": "http://example.com/identity/v3/groups/9ce0ad4e58a84d7a97b92f7955d10c92" }, "name": "admins" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/identity-version-response.json0000664000175000017500000000067300000000000027443 0ustar00zuulzuul00000000000000{ "version": { "id": "v3.4", "links": [ { "href": "http://example.com/identity/v3/", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v3+json" } ], "status": "stable", "updated": "2015-03-30T00:00:00Z" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/identity-versions-response.json0000664000175000017500000000255500000000000027627 0ustar00zuulzuul00000000000000{ "versions": { "values": [ { "id": "v3.4", "links": [ { "href": "http://example.com/identity/v3/", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v3+json" } ], "status": "stable", "updated": "2015-03-30T00:00:00Z" }, { "id": "v2.0", "links": [ { "href": "http://example.com/identity/v2.0/", "rel": "self" }, { "href": "https://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json" } ], "status": "stable", "updated": "2014-04-17T00:00:00Z" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/limit-flat-model-response.json0000664000175000017500000000023700000000000027263 0ustar00zuulzuul00000000000000{ "model": { "description": "Limit enforcement and validation does not take project hierarchy into consideration.", "name": "flat" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/limit-show-response.json0000664000175000017500000000071500000000000026220 0ustar00zuulzuul00000000000000{ "limit": { "resource_name": "volume", "region_id": null, "links": { "self": "http://10.3.150.25/identity/v3/limits/25a04c7a065c430590881c646cdcdd58" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "project_id": "3a705b9f56bb439381b43c4fe59dccce", "domain_id": null, "id": "25a04c7a065c430590881c646cdcdd58", "resource_limit": 11, "description": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/limits-create-request.json0000664000175000017500000000111500000000000026513 0ustar00zuulzuul00000000000000{ "limits":[ { "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "project_id": "3a705b9f56bb439381b43c4fe59dccce", "region_id": "RegionOne", "resource_name": "snapshot", "resource_limit": 5 }, { "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "domain_id": "edbafc92be354ffa977c58aa79c7bdb2", "resource_name": "volume", "resource_limit": 10, "description": "Number of volumes for project 3a705b9f56bb439381b43c4fe59dccce" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/limits-create-response.json0000664000175000017500000000210700000000000026663 0ustar00zuulzuul00000000000000{ "limits": [ { "resource_name": "volume", "region_id": null, "links": { "self": "http://10.3.150.25/identity/v3/limits/25a04c7a065c430590881c646cdcdd58" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "project_id": null, "domain_id": "edbafc92be354ffa977c58aa79c7bdb2", "id": "25a04c7a065c430590881c646cdcdd58", "resource_limit": 10, "description": "Number of volumes for project 3a705b9f56bb439381b43c4fe59dccce" }, { "resource_name": "snapshot", "region_id": "RegionOne", "links": { "self": "http://10.3.150.25/identity/v3/limits/3229b3849f584faea483d6851f7aab05" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "project_id": "3a705b9f56bb439381b43c4fe59dccce", "domain_id": null, "id": "3229b3849f584faea483d6851f7aab05", "resource_limit": 5, "description": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/limits-list-response.json0000664000175000017500000000230500000000000026373 0ustar00zuulzuul00000000000000{ "links": { "self": "http://10.3.150.25/identity/v3/limits", "previous": null, "next": null }, "limits": [ { "resource_name": "volume", "region_id": null, "links": { "self": "http://10.3.150.25/identity/v3/limits/25a04c7a065c430590881c646cdcdd58" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "project_id": "3a705b9f56bb439381b43c4fe59dccce", "domain_id": null, "id": "25a04c7a065c430590881c646cdcdd58", "resource_limit": 11, "description": "Number of volumes for project 3a705b9f56bb439381b43c4fe59dccce" }, { "resource_name": "snapshot", "region_id": "RegionOne", "links": { "self": "http://10.3.150.25/identity/v3/limits/3229b3849f584faea483d6851f7aab05" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "project_id": "3a705b9f56bb439381b43c4fe59dccce", "domain_id": null, "id": "3229b3849f584faea483d6851f7aab05", "resource_limit": 5, "description": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/limits-update-request.json0000664000175000017500000000022000000000000026526 0ustar00zuulzuul00000000000000{ "limit": { "resource_limit": 5, "description": "Number of snapshots for project 3a705b9f56bb439381b43c4fe59dccce" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/limits-update-response.json0000664000175000017500000000102300000000000026676 0ustar00zuulzuul00000000000000{ "limit": { "resource_name": "snapshot", "region_id": "RegionOne", "links": { "self": "http://10.3.150.25/identity/v3/limits/3229b3849f584faea483d6851f7aab05" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "project_id": "3a705b9f56bb439381b43c4fe59dccce", "domain_id": null, "id": "3229b3849f584faea483d6851f7aab05", "resource_limit": 5, "description": "Number of snapshots for project 3a705b9f56bb439381b43c4fe59dccce" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/list-implied-roles-for-role-response.json0000664000175000017500000000175000000000000031365 0ustar00zuulzuul00000000000000{ "role_inference": { "prior_role": { "id": "42c764f0c19146728dbfe73a49cc35c3", "links": { "self": "http://example.com/identity/v3/roles/42c764f0c19146728dbfe73a49cc35c3" }, "name": "prior role name" }, "implies": [ { "id": "066fbfc8b3e54fb68784c9e7e92ab8d7", "links": { "self": "http://example.com/identity/v3/roles/066fbfc8b3e54fb68784c9e7e92ab8d7" }, "name": "implied role1 name" }, { "id": "32a0df1cc22848aca3986adae9e0b9a0", "links": { "self": "http://example.com/identity/v3/roles/32a0df1cc22848aca3986adae9e0b9a0" }, "name": "implied role2 name" } ] }, "links" : { "self": "http://example.com/identity/v3/roles/42c764f0c19146728dbfe73a49cc35c3/implies" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/list-system-roles-for-group-response.json0000664000175000017500000000072100000000000031456 0ustar00zuulzuul00000000000000{ "roles": [ { "domain_id": null, "id": "6d550353899f4b0fbf3e410e1b6ddc05", "links": { "self": "http://example.com/identity/v3/roles/6d550353899f4b0fbf3e410e1b6ddc05" }, "name": "admin" } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/system/groups/934cc15c4d03479ebba167d67d47737f/roles" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/list-system-roles-for-user-response.json0000664000175000017500000000071700000000000031305 0ustar00zuulzuul00000000000000{ "roles": [ { "domain_id": null, "id": "6d550353899f4b0fbf3e410e1b6ddc05", "links": { "self": "http://example.com/identity/v3/roles/6d550353899f4b0fbf3e410e1b6ddc05" }, "name": "admin" } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/system/users/0b916f1b1e51455cb24b3a051520c576/roles" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/policies-list-response.json0000664000175000017500000000145100000000000026702 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/policies" }, "policies": [ { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "717273", "links": { "self": "http://example.com/identity/v3/policies/717273" }, "type": "application/json" }, { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "717274", "links": { "self": "http://example.com/identity/v3/policies/717274" }, "type": "application/json" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/policy-create-request.json0000664000175000017500000000016500000000000026515 0ustar00zuulzuul00000000000000{ "policy": { "blob": "{'foobar_user': 'role:compute-user'}", "type": "application/json" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/policy-create-response.json0000664000175000017500000000044400000000000026663 0ustar00zuulzuul00000000000000{ "policy": { "links": { "self": "http://example.com/identity/v3/policies/88f5b83f8f8e41daba4c25eed1a7bbc6" }, "blob": "{'foobar_user': 'role:compute-user'}", "type": "application/json", "id": "88f5b83f8f8e41daba4c25eed1a7bbc6" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/policy-show-response.json0000664000175000017500000000044400000000000026400 0ustar00zuulzuul00000000000000{ "policy": { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "717273", "links": { "self": "http://example.com/identity/v3/policies/717273" }, "type": "application/json" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/policy-update-request.json0000664000175000017500000000025100000000000026530 0ustar00zuulzuul00000000000000{ "policy": { "blob": { "foobar_user": [ "role:compute-user" ] }, "type": "application/json" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/policy-update-response.json0000664000175000017500000000044400000000000026702 0ustar00zuulzuul00000000000000{ "policy": { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "717273", "links": { "self": "http://example.com/identity/v3/policies/717273" }, "type": "application/json" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-create-domain-request.json0000664000175000017500000000022500000000000030126 0ustar00zuulzuul00000000000000{ "project": { "description": "My new domain", "enabled": true, "is_domain": true, "name": "myNewDomain" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-create-request.json0000664000175000017500000000031700000000000026663 0ustar00zuulzuul00000000000000{ "project": { "description": "My new project", "domain_id": "default", "enabled": true, "is_domain": false, "name": "myNewProject", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-create-response.json0000664000175000017500000000066200000000000027034 0ustar00zuulzuul00000000000000{ "project": { "description": "My new project", "domain_id": "default", "enabled": true, "id": "93ebbcc35335488b96ff9cd7d18cbb2e", "is_domain": false, "links": { "self": "http://example.com/identity/v3/projects/93ebbcc35335488b96ff9cd7d18cbb2e" }, "name": "myNewProject", "parent_id": "default", "tags": [], "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-enable-request.json0000664000175000017500000000006300000000000026644 0ustar00zuulzuul00000000000000{ "project": { "enabled": true } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-group-roles-list-response.json0000664000175000017500000000104700000000000031016 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "123456", "links": { "self": "http://example.com/identity/v3/roles/123456" }, "name": "admin" }, { "id": "123457", "links": { "self": "http://example.com/identity/v3/roles/123457" }, "name": "manager" } ], "links": { "self": "http://example.com/identity/v3/projects/456789/groups/101112/roles", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-show-parents-response.json0000664000175000017500000000134000000000000030215 0ustar00zuulzuul00000000000000{ "project": { "domain_id": "1789d1", "enabled": true, "id": "263fd9", "links": { "self": "http://example.com/identity/v3/projects/263fd9" }, "name": "Dev Group A", "options": {}, "parent_id": "183ab2", "parents": [ { "project": { "domain_id": "1789d1", "enabled": true, "id": "183ab2", "links": { "self": "http://example.com/identity/v3/projects/183ab2" }, "name": "Dev Group A Parent", "parent_id": null } } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-show-response.json0000664000175000017500000000061300000000000026545 0ustar00zuulzuul00000000000000{ "project": { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "0c4e939acacf4376bdcd1129f1a054ad", "links": { "self": "http://example.com/identity/v3/projects/0c4e939acacf4376bdcd1129f1a054ad" }, "name": "admin", "parent_id": "default", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-show-subtree-response.json0000664000175000017500000000306000000000000030213 0ustar00zuulzuul00000000000000{ "project": { "domain_id": "1789d1", "enabled": true, "id": "263fd9", "links": { "self": "http://example.com/identity/v3/projects/263fd9" }, "name": "Dev Group A", "options": {}, "parent_id": "183ab2", "subtree": [ { "project": { "domain_id": "1789d1", "enabled": true, "id": "9n1jhb", "links": { "self": "http://example.com/identity/v3/projects/9n1jhb" }, "name": "Dev Group A Child 1", "parent_id": "263fd9" } }, { "project": { "domain_id": "1789d1", "enabled": true, "id": "4b6aa1", "links": { "self": "http://example.com/identity/v3/projects/4b6aa1" }, "name": "Dev Group A Child 2", "parent_id": "263fd9" } }, { "project": { "domain_id": "1789d1", "enabled": true, "id": "b76eq8", "links": { "self": "http://example.com/identity/v3/projects/b76xq8" }, "name": "Dev Group A Grandchild", "parent_id": "4b6aa1" } } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-tags-list-response.json0000664000175000017500000000003700000000000027474 0ustar00zuulzuul00000000000000{ "tags": ["foo", "bar"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-tags-update-request.json0000664000175000017500000000003700000000000027635 0ustar00zuulzuul00000000000000{ "tags": ["foo", "bar"] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-tags-update-response.json0000664000175000017500000000101400000000000027777 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://identity:5000/v3/projects" }, "projects": [ { "description": "Test Project", "domain_id": "default", "enabled": true, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "links": { "self": "http://identity:5000/v3/projects/3d4c2c82bd5948f0bcab0cf3a7c9b48c" }, "name": "demo", "tags": ["foo", "bar"] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-update-request.json0000664000175000017500000000015300000000000026700 0ustar00zuulzuul00000000000000{ "project": { "description": "My updated project", "name": "myUpdatedProject" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-update-response.json0000664000175000017500000000065700000000000027057 0ustar00zuulzuul00000000000000{ "project": { "description": "My updated project", "domain_id": null, "links": { "self": "http://example.com/identity/v3/projects/93ebbcc35335488b96ff9cd7d18cbb2e" }, "enabled": true, "id": "93ebbcc35335488b96ff9cd7d18cbb2e", "is_domain": true, "name": "myUpdatedProject", "parent_id": null, "tags": [], "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/project-user-roles-list-response.json0000664000175000017500000000072300000000000030640 0ustar00zuulzuul00000000000000{ "links": { "self": "http://example.com/identity/v3/projects/9e5a15e2c0dd42aab0990a463e839ac1/users/b964a9e51c0046a4a84d3f83a135a97c/roles", "previous": null, "next": null }, "roles": [ { "id": "3b5347fa7a144008ba57c0acea469cc3", "links": { "self": "http://example.com/identity/v3/roles/3b5347fa7a144008ba57c0acea469cc3" }, "name": "admin" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/projects-list-response.json0000664000175000017500000000705400000000000026731 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/projects" }, "projects": [ { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "0c4e939acacf4376bdcd1129f1a054ad", "links": { "self": "http://example.com/identity/v3/projects/0c4e939acacf4376bdcd1129f1a054ad" }, "name": "admin", "parent_id": null, "tags": [] }, { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "0cbd49cbf76d405d9c86562e1d579bd3", "links": { "self": "http://example.com/identity/v3/projects/0cbd49cbf76d405d9c86562e1d579bd3" }, "name": "demo", "parent_id": null, "tags": [] }, { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "2db68fed84324f29bb73130c6c2094fb", "links": { "self": "http://example.com/identity/v3/projects/2db68fed84324f29bb73130c6c2094fb" }, "name": "swifttenanttest2", "parent_id": null, "tags": [] }, { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "3d594eb0f04741069dbbb521635b21c7", "links": { "self": "http://example.com/identity/v3/projects/3d594eb0f04741069dbbb521635b21c7" }, "name": "service", "parent_id": null, "tags": [] }, { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "43ebde53fc314b1c9ea2b8c5dc744927", "links": { "self": "http://example.com/identity/v3/projects/43ebde53fc314b1c9ea2b8c5dc744927" }, "name": "swifttenanttest1", "parent_id": null, "tags": [] }, { "is_domain": false, "description": "", "domain_id": "1bc2169ca88e4cdaaba46d4c15390b65", "enabled": true, "id": "4b1eb781a47440acb8af9850103e537f", "links": { "self": "http://example.com/identity/v3/projects/4b1eb781a47440acb8af9850103e537f" }, "name": "swifttenanttest4", "parent_id": null, "tags": [] }, { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "5961c443439d4fcebe42643723755e9d", "links": { "self": "http://example.com/identity/v3/projects/5961c443439d4fcebe42643723755e9d" }, "name": "invisible_to_admin", "parent_id": null, "tags": [] }, { "is_domain": false, "description": null, "domain_id": "default", "enabled": true, "id": "fdb8424c4e4f4c0ba32c52e2de3bd80e", "links": { "self": "http://example.com/identity/v3/projects/fdb8424c4e4f4c0ba32c52e2de3bd80e" }, "name": "alt_demo", "parent_id": null, "tags": [] } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/region-create-request.json0000664000175000017500000000021500000000000026475 0ustar00zuulzuul00000000000000{ "region": { "description": "My subregion", "id": "RegionOneSubRegion", "parent_region_id": "RegionOne" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/region-create-response.json0000664000175000017500000000037300000000000026650 0ustar00zuulzuul00000000000000{ "region": { "parent_region_id": "RegionOne", "id": "RegionOneSubRegion", "links": { "self": "http://example.com/identity/v3/regions/RegionOneSubRegion" }, "description": "My subregion" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/region-show-response.json0000664000175000017500000000035700000000000026367 0ustar00zuulzuul00000000000000{ "region": { "description": "My subregion 3", "id": "RegionThree", "links": { "self": "http://example.com/identity/v3/regions/RegionThree" }, "parent_region_id": "RegionOne" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/region-update-request.json0000664000175000017500000000010200000000000026507 0ustar00zuulzuul00000000000000{ "region": { "description": "My subregion 3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/region-update-response.json0000664000175000017500000000035700000000000026671 0ustar00zuulzuul00000000000000{ "region": { "parent_region_id": "RegionOne", "id": "RegionThree", "links": { "self": "http://example.com/identity/v3/regions/RegionThree" }, "description": "My subregion 3" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/regions-list-response.json0000664000175000017500000000060200000000000026536 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/regions" }, "regions": [ { "description": "", "id": "RegionOne", "links": { "self": "http://example.com/identity/v3/regions/RegionOne" }, "parent_region_id": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/registered-limit-show-response.json0000664000175000017500000000063400000000000030353 0ustar00zuulzuul00000000000000{ "registered_limit": { "resource_name": "volume", "region_id": null, "links": { "self": "http://10.3.150.25/identity/v3/registered_limits/773147dd53cd4a17b921d555cf17c633" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "default_limit": 10, "id": "773147dd53cd4a17b921d555cf17c633", "description": "Number of volumes" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/registered-limits-create-request.json0000664000175000017500000000065600000000000030657 0ustar00zuulzuul00000000000000{ "registered_limits":[ { "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "region_id": "RegionOne", "resource_name": "snapshot", "default_limit": 5 }, { "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "resource_name": "volume", "default_limit": 10, "description": "Number of volumes" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/registered-limits-create-response.json0000664000175000017500000000157700000000000031030 0ustar00zuulzuul00000000000000{ "registered_limits": [ { "resource_name": "volume", "region_id": null, "links": { "self": "http://10.3.150.25/identity/v3/registered_limits/773147dd53cd4a17b921d555cf17c633" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "default_limit": 10, "id": "773147dd53cd4a17b921d555cf17c633", "description": "Number of volumes" }, { "resource_name": "snapshot", "region_id": "RegionOne", "links": { "self": "http://10.3.150.25/identity/v3/registered_limits/e35a965b2b244209bb0c2b193c55955f" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "default_limit": 5, "id": "e35a965b2b244209bb0c2b193c55955f", "description": null } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/registered-limits-list-response.json0000664000175000017500000000201000000000000030517 0ustar00zuulzuul00000000000000{ "links": { "self": "http://10.3.150.25/identity/v3/registered_limits", "previous": null, "next": null }, "registered_limits": [ { "resource_name": "snapshot", "region_id": null, "links": { "self": "http://10.3.150.25/identity/v3/registered_limits/195acb8a093e43e9afb23d6628361e7c" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "default_limit": 10, "id": "195acb8a093e43e9afb23d6628361e7c", "description": null }, { "resource_name": "volume", "region_id": "RegionOne", "links": { "self": "http://10.3.150.25/identity/v3/registered_limits/ea7f74f15cba4c6db1406fe52532f98d" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "default_limit": 5, "id": "ea7f74f15cba4c6db1406fe52532f98d", "description": "Number of volumes" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/registered-limits-update-request.json0000664000175000017500000000035600000000000030673 0ustar00zuulzuul00000000000000{ "registered_limit": { "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "region_id": "RegionOne", "resource_name": "snapshot", "default_limit": 5, "description": "Number of snapshots" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/registered-limits-update-response.json0000664000175000017500000000064600000000000031043 0ustar00zuulzuul00000000000000{ "registered_limit": { "resource_name": "snapshot", "region_id": "RegionOne", "links": { "self": "http://10.3.150.25/identity/v3/registered_limits/e35a965b2b244209bb0c2b193c55955f" }, "service_id": "9408080f1970482aa0e38bc2d4ea34b7", "default_limit": 5, "id": "e35a965b2b244209bb0c2b193c55955f", "description": "Number of snapshots" } } ././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000117 path=keystone-26.0.0/api-ref/source/v3/samples/admin/role-assignments-effective-list-include-names-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-assignments-effective-list-include-names-respon0000664000175000017500000000426000000000000033546 0ustar00zuulzuul00000000000000{ "role_assignments": [ { "links": { "assignment": "http://example.com/identity/v3/domains/161718/users/313233/roles/123456" }, "role": { "domain": { "id": "161718", "name": "Default" }, "id": "123456", "name": "admin" }, "scope": { "domain": { "id": "161718", "name": "Default" } }, "user": { "domain": { "id": "161718", "name": "Default" }, "id": "313233", "name": "admin" } }, { "links": { "assignment": "http://example.com/identity/v3/projects/456789/groups/101112/roles/123456", "membership": "http://example.com/identity/v3/groups/101112/users/313233" }, "role": { "domain": { "id": "161718", "name": "Default" }, "id": "123456", "name": "admin" }, "scope": { "project": { "domain": { "id": "161718", "name": "Default" } "id": "456789", "name": "admin" } }, "user": { "domain": { "id": "161718", "name": "Default" }, "id": "313233", "name": "admin" } } ], "links": { "self": "http://example.com/identity/v3/role_assignments?effective&include_names=true", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-assignments-effective-list-response.json0000664000175000017500000000212500000000000032322 0ustar00zuulzuul00000000000000{ "role_assignments": [ { "links": { "assignment": "http://example.com/identity/v3/domains/161718/users/313233/roles/123456" }, "role": { "id": "123456" }, "scope": { "domain": { "id": "161718" } }, "user": { "id": "313233" } }, { "links": { "assignment": "http://example.com/identity/v3/projects/456789/groups/101112/roles/123456", "membership": "http://example.com/identity/v3/groups/101112/users/313233" }, "role": { "id": "123456" }, "scope": { "project": { "id": "456789" } }, "user": { "id": "313234" } } ], "links": { "self": "http://example.com/identity/v3/role_assignments?effective", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-assignments-effective-list-response.txt0000664000175000017500000000012000000000000032161 0ustar00zuulzuul00000000000000GET /role_assignments?user.id={user_id}&scope.project.id={project_id}&effective ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-assignments-list-include-subtree-response.json0000664000175000017500000000235500000000000033461 0ustar00zuulzuul00000000000000{ "role_assignments": [ { "links": { "assignment": "http://example.com/identity/v3/OS-INHERIT/domains/161718/users/313233/roles/123456/inherited_to_projects" }, "role": { "id": "123456" }, "scope": { "domain": { "id": "161718" }, "OS-INHERIT:inherited_to": "projects" }, "user": { "id": "313233" } }, { "group": { "id": "101112-" }, "links": { "assignment": "http://example.com/identity/v3/projects/456789/groups/101112/roles/123456" }, "role": { "id": "123456" }, "scope": { "project": { "id": "456789" } } } ], "links": { "self": "http://example.com/identity/v3/role_assignments", "previous": null, "next": null } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-assignments-list-response.json0000664000175000017500000000176100000000000030371 0ustar00zuulzuul00000000000000{ "role_assignments": [ { "links": { "assignment": "http://example.com/identity/v3/domains/161718/users/313233/roles/123456" }, "role": { "id": "123456" }, "scope": { "domain": { "id": "161718" } }, "user": { "id": "313233" } }, { "group": { "id": "101112" }, "links": { "assignment": "http://example.com/identity/v3/projects/456789/groups/101112/roles/123456" }, "role": { "id": "123456" }, "scope": { "project": { "id": "456789" } } } ], "links": { "self": "http://example.com/identity/v3/role_assignments", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-assignments-list-response.txt0000664000175000017500000000012000000000000030223 0ustar00zuulzuul00000000000000GET /role_assignments?user.id={user_id}&scope.project.id={project_id}&effective ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-create-request.json0000664000175000017500000000013200000000000026151 0ustar00zuulzuul00000000000000{ "role": { "description": "My new role", "name": "developer" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-create-response.json0000664000175000017500000000043500000000000026325 0ustar00zuulzuul00000000000000{ "role": { "id": "1e443fa8cee3482a8a2b6954dd5c8f12", "links": { "self": "http://example.com/identity/v3/roles/1e443fa8cee3482a8a2b6954dd5c8f12" }, "description": "My new role", "name": "developer", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-inferences-response.json0000664000175000017500000000441400000000000027204 0ustar00zuulzuul00000000000000{ "role_inferences": [ { "prior_role": { "id": "1acd3c5aa0e246b9a7427d252160dcd1", "links": { "self": "http://example.com/identity/v3/roles/1acd3c5aa0e246b9a7427d252160dcd1" }, "description": "My new role", "name": "prior role name" }, "implies": [ { "id": "3602510e2e1f499589f78a0724dcf614", "links": { "self": "http://example.com/identity/v3/roles/3602510e2e1f499589f78a0724dcf614" }, "description": "My new role", "name": "implied role1 name" }, { "id": "738289aeef684e73a987f7cf2ec6d925", "links": { "self": "http://example.com/identity/v3/roles/738289aeef684e73a987f7cf2ec6d925" }, "description": "My new role", "name": "implied role2 name" } ] }, { "prior_role": { "id": "bbf7a5098bb34407b7164eb6ff9f144e", "links": { "self" : "http://example.com/identity/v3/roles/bbf7a5098bb34407b7164eb6ff9f144e" }, "description": "My new role", "name": "prior role name" }, "implies": [ { "id": "872b20ad124c4c1bafaef2b1aae316ab", "links": { "self": "http://example.com/identity/v3/roles/872b20ad124c4c1bafaef2b1aae316ab" }, "description": null, "name": "implied role1 name" }, { "id": "1d865b1b2da14cb7b05254677e5f36a2", "links": { "self": "http://example.com/identity/v3/roles/1d865b1b2da14cb7b05254677e5f36a2" }, "description": null, "name": "implied role2 name" } ] } ], "links": { "self": "http://example.com/identity/v3/role_inferences" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-show-response.json0000664000175000017500000000052600000000000026043 0ustar00zuulzuul00000000000000{ "role": { "domain_id": "d07792fd66ac4ed881723ab9f1c9925f", "id": "1e443fa8cee3482a8a2b6954dd5c8f12", "links": { "self": "http://example.com/identity/v3/roles/1e443fa8cee3482a8a2b6954dd5c8f12" }, "description": "My new role", "name": "Developer", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-update-request.json0000664000175000017500000000013200000000000026170 0ustar00zuulzuul00000000000000{ "role": { "description": "My new role", "name": "Developer" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/role-update-response.json0000664000175000017500000000052600000000000026345 0ustar00zuulzuul00000000000000{ "role": { "domain_id": "73748865fb964ded9e836d491d32dcfb", "id": "1e443fa8cee3482a8a2b6954dd5c8f12", "links": { "self": "http://example.com/identity/v3/roles/1e443fa8cee3482a8a2b6954dd5c8f12" }, "description": "My new role", "name": "Developer", "options": {} } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/roles-list-response.json0000664000175000017500000000340000000000000026213 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/roles" }, "roles": [ { "id": "5318e65d75574c17bf5339d3df33a5a3", "links": { "self": "http://example.com/identity/v3/roles/5318e65d75574c17bf5339d3df33a5a3" }, "description": "My new role", "name": "admin" }, { "id": "642bcfc75c384fd181adf34d9b2df897", "links": { "self": "http://example.com/identity/v3/roles/642bcfc75c384fd181adf34d9b2df897" }, "description": "My new role", "name": "anotherrole" }, { "id": "779a76d74f544224a7ef8762ca0de627", "links": { "self": "http://example.com/identity/v3/roles/779a76d74f544224a7ef8762ca0de627" }, "description": "My new role", "name": "Member" }, { "id": "9fe2ff9ee4384b1894a90878d3e92bab", "links": { "self": "http://example.com/identity/v3/roles/9fe2ff9ee4384b1894a90878d3e92bab" }, "name": "_member_" }, { "id": "ba2dfba61c934ee89e3110de36273229", "links": { "self": "http://example.com/identity/v3/roles/ba2dfba61c934ee89e3110de36273229" }, "description": "My new role", "name": "ResellerAdmin" }, { "id": "f127b97616f24d3ebceb7be840210adc", "links": { "self": "http://example.com/identity/v3/roles/f127b97616f24d3ebceb7be840210adc" }, "description": null, "name": "service" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/service-create-request.json0000664000175000017500000000017500000000000026657 0ustar00zuulzuul00000000000000{ "service": { "type": "compute", "name": "compute2", "description": "Compute service 2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/service-create-response.json0000664000175000017500000000050500000000000027022 0ustar00zuulzuul00000000000000{ "service": { "name": "compute2", "links": { "self": "http://example.com/identity/v3/services/3f552eb79c48436db2868e948d8cf330" }, "enabled": true, "type": "compute", "id": "3f552eb79c48436db2868e948d8cf330", "description": "Compute service 2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/service-show-response.json0000664000175000017500000000043200000000000026536 0ustar00zuulzuul00000000000000{ "service": { "description": "Keystone Identity Service", "enabled": true, "id": "686766", "links": { "self": "http://example.com/identity/v3/services/686766" }, "name": "keystone", "type": "identity" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/service-update-request.json0000664000175000017500000000011500000000000026670 0ustar00zuulzuul00000000000000{ "service": { "description": "Block Storage Service V2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/service-update-response.json0000664000175000017500000000051500000000000027042 0ustar00zuulzuul00000000000000{ "service": { "name": "cinderv2", "links": { "self": "http://example.com/identity/v3/services/5789da9864004dd088fce14c1c626a4b" }, "enabled": true, "type": "volumev2", "id": "5789da9864004dd088fce14c1c626a4b", "description": "Block Storage Service V2" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/services-list-response.json0000664000175000017500000000640400000000000026721 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/services" }, "services": [ { "description": "Nova Compute Service", "enabled": true, "id": "1999c3a858c7408fb586817620695098", "links": { "self": "http://example.com/identity/v3/services/1999c3a858c7408fb586817620695098" }, "name": "nova", "type": "compute" }, { "description": "Cinder Volume Service V2", "enabled": true, "id": "39216610e75547f1883037e11976fc0f", "links": { "self": "http://example.com/identity/v3/services/39216610e75547f1883037e11976fc0f" }, "name": "cinderv2", "type": "volumev2" }, { "description": "Neutron Service", "enabled": true, "id": "4fe41a27de3341af9100123f765eac0d", "links": { "self": "http://example.com/identity/v3/services/4fe41a27de3341af9100123f765eac0d" }, "name": "neutron", "type": "network" }, { "description": "EC2 Compatibility Layer", "enabled": true, "id": "61d3d05bdd1449f18923c83f52a4d762", "links": { "self": "http://example.com/identity/v3/services/61d3d05bdd1449f18923c83f52a4d762" }, "name": "ec2", "type": "ec2" }, { "description": "Glance Image Service", "enabled": true, "id": "69afa3d57d1948ea988beeb252bbaa5d", "links": { "self": "http://example.com/identity/v3/services/69afa3d57d1948ea988beeb252bbaa5d" }, "name": "glance", "type": "image" }, { "description": "Nova Compute Service V2.1", "enabled": true, "id": "79b691ee7be649d9bf8613efc0960206", "links": { "self": "http://example.com/identity/v3/services/79b691ee7be649d9bf8613efc0960206" }, "name": "novav21", "type": "computev21" }, { "description": "Swift Service", "enabled": true, "id": "92419b70ebe64c6c873bd20b14360e6b", "links": { "self": "http://example.com/identity/v3/services/92419b70ebe64c6c873bd20b14360e6b" }, "name": "swift", "type": "object-store" }, { "description": "Keystone Identity Service", "enabled": true, "id": "b8f8454fc07b46b781204d2a436f9d1c", "links": { "self": "http://example.com/identity/v3/services/b8f8454fc07b46b781204d2a436f9d1c" }, "name": "keystone", "type": "identity" }, { "description": "Cinder Volume Service", "enabled": true, "id": "cdda3bea0742407f95e70f4758f46558", "links": { "self": "http://example.com/identity/v3/services/cdda3bea0742407f95e70f4758f46558" }, "name": "cinder", "type": "volume" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/token-validate-request.txt0000664000175000017500000000006600000000000026532 0ustar00zuulzuul00000000000000Headers: X-Auth-Token: 1dd7e3 X-Subject-Token: c67580 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-create-request.json0000664000175000017500000000122300000000000026170 0ustar00zuulzuul00000000000000{ "user": { "default_project_id": "263fd9", "domain_id": "1789d1", "enabled": true, "federated": [ { "idp_id": "efbab5a6acad4d108fec6c63d9609d83", "protocols": [ { "protocol_id": "mapped", "unique_id": "test@example.com" } ] } ], "name": "James Doe", "password": "secretsecret", "description": "James Doe user", "email": "jdoe@example.com", "options": { "ignore_password_expiry": true } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-create-response.json0000664000175000017500000000144500000000000026344 0ustar00zuulzuul00000000000000{ "user": { "default_project_id": "263fd9", "description": "James Doe user", "domain_id": "1789d1", "email": "jdoe@example.com", "enabled": true, "federated": [ { "idp_id": "efbab5a6acad4d108fec6c63d9609d83", "protocols": [ { "protocol_id": "mapped", "unique_id": "test@example.com" } ] } ], "id": "ff4e51", "links": { "self": "https://example.com/identity/v3/users/ff4e51" }, "name": "James Doe", "options": { "ignore_password_expiry": true }, "password_expires_at": "2016-11-06T15:32:17.000000" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-groups-list-response.json0000664000175000017500000000157600000000000027376 0ustar00zuulzuul00000000000000{ "groups": [ { "description": "Developers cleared for work on all general projects", "domain_id": "1789d1", "id": "ea167b", "links": { "self": "https://example.com/identity/v3/groups/ea167b" }, "membership_expires_at": null, "name": "Developers" }, { "description": "Developers cleared for work on secret projects", "domain_id": "1789d1", "id": "a62db1", "links": { "self": "https://example.com/identity/v3/groups/a62db1" }, "membership_expires_at": "2016-11-06T15:32:17.000000", "name": "Secure Developers" } ], "links": { "self": "http://example.com/identity/v3/users/9fe1d3/groups", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-password-update-request.json0000664000175000017500000000015400000000000030051 0ustar00zuulzuul00000000000000{ "user": { "password": "new_secretsecret", "original_password": "secretsecret" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-projects-list-response.json0000664000175000017500000000155400000000000027704 0ustar00zuulzuul00000000000000{ "projects": [ { "description": "description of this project", "domain_id": "161718", "enabled": true, "id": "456788", "links": { "self": "http://example.com/identity/v3/projects/456788" }, "name": "a project name", "parent_id": "212223" }, { "description": "description of this project", "domain_id": "161718", "enabled": true, "id": "456789", "links": { "self": "http://example.com/identity/v3/projects/456789" }, "name": "another domain", "parent_id": "212223" } ], "links": { "self": "http://example.com/identity/v3/users/313233/projects", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-roles-domain-list-response.json0000664000175000017500000000107400000000000030441 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "91011", "links": { "self": "http://example.com/identity/v3/roles/91011" }, "name": "admin" }, { "id": "91011", "links": { "self": "http://example.com/identity/v3/roles/91011" }, "name": "admin" } ], "links": { "self": "http://example.com/identity/v3/OS-INHERIT/domains/1234/users/5678/roles/inherited_to_projects", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-show-response.json0000664000175000017500000000054000000000000026054 0ustar00zuulzuul00000000000000{ "user": { "default_project_id": "263fd9", "domain_id": "1789d1", "enabled": true, "federated": [], "id": "9fe1d3", "links": { "self": "https://example.com/identity/v3/users/9fe1d3" }, "name": "jsmith", "password_expires_at": "2016-11-06T15:32:17.000000" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-update-request.json0000664000175000017500000000025400000000000026212 0ustar00zuulzuul00000000000000{ "user": { "default_project_id": "263fd9", "enabled": true, "options": { "ignore_lockout_failure_attempts": true } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/user-update-response.json0000664000175000017500000000066600000000000026367 0ustar00zuulzuul00000000000000{ "user": { "default_project_id": "263fd9", "domain_id": "1789d1", "enabled": true, "federated": [], "id": "ff4e51", "links": { "self": "https://example.com/identity/v3/users/ff4e51" }, "name": "jamesdoe", "options": { "ignore_lockout_failure_attempts": true }, "password_expires_at": "2016-11-06T15:32:17.000000" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/admin/users-list-response.json0000664000175000017500000001136100000000000026235 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/users" }, "users": [ { "domain_id": "default", "enabled": true, "id": "2844b2a08be147a08ef58317d6471f1f", "links": { "self": "http://example.com/identity/v3/users/2844b2a08be147a08ef58317d6471f1f" }, "name": "glance", "password_expires_at": null }, { "domain_id": "default", "enabled": true, "id": "4ab84ab39de54f4d96eaff8f2145a7cd", "links": { "self": "http://example.com/identity/v3/users/4ab84ab39de54f4d96eaff8f2145a7cd" }, "name": "swiftusertest1", "password_expires_at": "2016-11-06T15:32:17.000000" }, { "domain_id": "default", "enabled": true, "id": "56696a9a04864d63877a3d06a6f0b24b", "links": { "self": "http://example.com/identity/v3/users/56696a9a04864d63877a3d06a6f0b24b" }, "name": "swift", "password_expires_at": null }, { "domain_id": "default", "enabled": true, "id": "5acb638d15da44fc8de41b9a4bd41875", "links": { "self": "http://example.com/identity/v3/users/5acb638d15da44fc8de41b9a4bd41875" }, "name": "alt_demo", "password_expires_at": "2016-11-06T15:32:17.000000" }, { "domain_id": "default", "enabled": true, "id": "7596e862b1af473c8ed6ae99d35b51e3", "links": { "self": "http://example.com/identity/v3/users/7596e862b1af473c8ed6ae99d35b51e3" }, "name": "demo", "password_expires_at": "2016-11-06T15:32:17.000000" }, { "domain_id": "default", "enabled": true, "id": "802edb2141b44e77bbde241417450749", "links": { "self": "http://example.com/identity/v3/users/802edb2141b44e77bbde241417450749" }, "name": "nova", "password_expires_at": null }, { "domain_id": "592ab0800d3745baaf45c610fa41950a", "enabled": true, "id": "9aca3883784647fe9aff3a50d922489a", "links": { "self": "http://example.com/identity/v3/users/9aca3883784647fe9aff3a50d922489a" }, "name": "swiftusertest4", "password_expires_at": "2016-11-06T15:32:17.000000" }, { "domain_id": "default", "enabled": true, "id": "a1251b011f9345e68c2458b841152034", "links": { "self": "http://example.com/identity/v3/users/a1251b011f9345e68c2458b841152034" }, "name": "swiftusertest3", "password_expires_at": "2016-11-06T15:32:17.000000" }, { "domain_id": "default", "enabled": true, "id": "a43f46eb318041f6b712143862e3ad70", "links": { "self": "http://example.com/identity/v3/users/a43f46eb318041f6b712143862e3ad70" }, "name": "neutron", "password_expires_at": null }, { "domain_id": "default", "enabled": true, "id": "b964a9e51c0046a4a84d3f83a135a97c", "links": { "self": "http://example.com/identity/v3/users/b964a9e51c0046a4a84d3f83a135a97c" }, "name": "admin", "password_expires_at": null }, { "domain_id": "default", "enabled": true, "id": "dc87e591c0d247d5ac04e873bd8a1646", "links": { "self": "http://example.com/identity/v3/users/dc87e591c0d247d5ac04e873bd8a1646" }, "name": "cinder", "password_expires_at": null }, { "domain_id": "default", "enabled": true, "id": "ed214dc1c2c6468b926c96eca6c8aee9", "links": { "self": "http://example.com/identity/v3/users/ed214dc1c2c6468b926c96eca6c8aee9" }, "name": "glance-swift", "password_expires_at": "2016-11-06T15:32:17.000000" }, { "domain_id": "default", "enabled": true, "id": "f4f6587b058a4f46a00242549b430d37", "links": { "self": "http://example.com/identity/v3/users/f4f6587b058a4f46a00242549b430d37" }, "name": "swiftusertest2", "password_expires_at": "2016-11-06T15:32:17.000000" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/api-ref/source/v3/samples/auth/0000775000175000017500000000000000000000000021243 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4701145 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/0000775000175000017500000000000000000000000023116 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/domain-id-password.json0000664000175000017500000000063600000000000027517 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "ee4dfb6e5540447cb3741905149d9b6e", "password": "devstacker" } } }, "scope": { "domain": { "id": "default" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/domain-id-token.json0000664000175000017500000000044500000000000026773 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } }, "scope": { "domain": { "id": "default" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/domain-name-password.json0000664000175000017500000000064000000000000030036 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "ee4dfb6e5540447cb3741905149d9b6e", "password": "devstacker" } } }, "scope": { "domain": { "name": "Default" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/domain-name-token.json0000664000175000017500000000044700000000000027321 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } }, "scope": { "domain": { "name": "Default" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/project-id-password.json0000664000175000017500000000067000000000000027714 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "ee4dfb6e5540447cb3741905149d9b6e", "password": "devstacker" } } }, "scope": { "project": { "id": "a6944d763bf64ee6a275f1263fae0352" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/project-id-token.json0000664000175000017500000000047700000000000027177 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } }, "scope": { "project": { "id": "a6944d763bf64ee6a275f1263fae0352" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/project-id-totp.json0000664000175000017500000000065400000000000027042 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "totp" ], "totp": { "user": { "id": "ee4dfb6e5540447cb3741905149d9b6e", "passcode": "123456" } } }, "scope": { "project": { "id": "a6944d763bf64ee6a275f1263fae0352" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/project-name-password.json0000664000175000017500000000076200000000000030242 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "ee4dfb6e5540447cb3741905149d9b6e", "password": "devstacker" } } }, "scope": { "project": { "domain": { "id": "default" }, "name": "admin" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/project-name-token.json0000664000175000017500000000057100000000000027516 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } }, "scope": { "project": { "domain": { "id": "default" }, "name": "admin" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/system-password.json0000664000175000017500000000063200000000000027176 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "ee4dfb6e5540447cb3741905149d9b6e", "password": "devstacker" } } }, "scope": { "system": { "all": true } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/requests/system-token.json0000664000175000017500000000044100000000000026452 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "'$OS_TOKEN'" } }, "scope": { "system": { "all": true } } } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4701145 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/0000775000175000017500000000000000000000000023264 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/auth-receipt-password.json0000664000175000017500000000072300000000000030413 0ustar00zuulzuul00000000000000{ "receipt":{ "expires_at":"2018-07-05T08:39:23.000000Z", "issued_at":"2018-07-05T08:34:23.000000Z", "methods": [ "password" ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin" } }, "required_auth_methods": [ ["totp", "password"] ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/domain-scoped-password.json0000664000175000017500000000365200000000000030547 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw" ], "catalog": [ { "endpoints": [ { "id": "068d1b359ee84b438266cb736d81de97", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "8bfc846841ab441ca38471be6d164ced", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "beb6d358c3654b4bada04d4663b640b9", "interface": "internal", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" } ], "domain": { "id": "default", "name": "Default" }, "expires_at": "2015-11-07T02:58:43.578887Z", "issued_at": "2015-11-07T01:58:43.578929Z", "methods": [ "password" ], "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/domain-scoped-token.json0000664000175000017500000000374500000000000030030 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw", "oppr9r6pQo6mWb5Ji4zgwg" ], "catalog": [ { "endpoints": [ { "id": "068d1b359ee84b438266cb736d81de97", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "8bfc846841ab441ca38471be6d164ced", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "beb6d358c3654b4bada04d4663b640b9", "interface": "internal", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" } ], "domain": { "id": "default", "name": "Default" }, "expires_at": "2015-11-07T02:58:43.578887Z", "issued_at": "2015-11-07T01:58:43.578929Z", "methods": [ "token", "password" ], "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/project-scoped-password-totp.json0000664000175000017500000000413400000000000031726 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw" ], "catalog": [ { "endpoints": [ { "id": "068d1b359ee84b438266cb736d81de97", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "8bfc846841ab441ca38471be6d164ced", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "beb6d358c3654b4bada04d4663b640b9", "interface": "internal", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" } ], "expires_at": "2015-11-07T02:58:43.578887Z", "is_domain": false, "issued_at": "2015-11-07T01:58:43.578929Z", "methods": [ "password", "totp" ], "project": { "domain": { "id": "default", "name": "Default" }, "id": "a6944d763bf64ee6a275f1263fae0352", "name": "admin" }, "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/project-scoped-password.json0000664000175000017500000000411000000000000030734 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw" ], "catalog": [ { "endpoints": [ { "id": "068d1b359ee84b438266cb736d81de97", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "8bfc846841ab441ca38471be6d164ced", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "beb6d358c3654b4bada04d4663b640b9", "interface": "internal", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" } ], "expires_at": "2015-11-07T02:58:43.578887Z", "is_domain": false, "issued_at": "2015-11-07T01:58:43.578929Z", "methods": [ "password" ], "project": { "domain": { "id": "default", "name": "Default" }, "id": "a6944d763bf64ee6a275f1263fae0352", "name": "admin" }, "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/project-scoped-token.json0000664000175000017500000000420300000000000030215 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw", "oppr9r6pQo6mWb5Ji4zgwg" ], "catalog": [ { "endpoints": [ { "id": "068d1b359ee84b438266cb736d81de97", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "8bfc846841ab441ca38471be6d164ced", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "beb6d358c3654b4bada04d4663b640b9", "interface": "internal", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" } ], "expires_at": "2015-11-07T02:58:43.578887Z", "is_domain": false, "issued_at": "2015-11-07T01:58:43.578929Z", "methods": [ "token", "password" ], "project": { "domain": { "id": "default", "name": "Default" }, "id": "a6944d763bf64ee6a275f1263fae0352", "name": "admin" }, "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/system-scoped-password.json0000664000175000017500000000361000000000000030616 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw" ], "catalog": [ { "endpoints": [ { "id": "068d1b359ee84b438266cb736d81de97", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "8bfc846841ab441ca38471be6d164ced", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "beb6d358c3654b4bada04d4663b640b9", "interface": "internal", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" } ], "expires_at": "2015-11-07T02:58:43.578887Z", "issued_at": "2015-11-07T01:58:43.578929Z", "methods": [ "password" ], "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "system": { "all": true }, "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/system-scoped-token.json0000664000175000017500000000370300000000000030077 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "3T2dc1CGQxyJsHdDu1xkcw", "oppr9r6pQo6mWb5Ji4zgwg" ], "catalog": [ { "endpoints": [ { "id": "068d1b359ee84b438266cb736d81de97", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "8bfc846841ab441ca38471be6d164ced", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" }, { "id": "beb6d358c3654b4bada04d4663b640b9", "interface": "internal", "region": "RegionOne", "region_id": "RegionOne", "url": "http://example.com/identity" } ], "type": "identity", "id": "050726f278654128aba89757ae25950c", "name": "keystone" } ], "expires_at": "2015-11-07T02:58:43.578887Z", "issued_at": "2015-11-07T01:58:43.578929Z", "methods": [ "token", "password" ], "roles": [ { "id": "51cc68287d524c759f47c811e6463340", "name": "admin" } ], "system": { "all": true }, "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin", "password_expires_at": "2016-11-06T15:32:17.000000" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/samples/auth/responses/unscoped-password.json0000664000175000017500000000100100000000000027627 0ustar00zuulzuul00000000000000{ "token": { "audit_ids": [ "mAjXQhiYRyKwkB4qygdLVg" ], "expires_at": "2015-11-05T22:00:11.000000Z", "issued_at": "2015-11-05T21:00:33.819948Z", "methods": [ "password" ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "10a2e6e717a245d9acad3e5f97aeca3d", "name": "admin", "password_expires_at": null } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/service-catalog.inc0000664000175000017500000002236000000000000022404 0ustar00zuulzuul00000000000000.. -*- rst -*- =============================== Service catalog and endpoints =============================== A service is an OpenStack web service that you can access through a URL, i.e. an endpoint. A service catalog lists the services that are available to the caller based upon the current authorization. You can create, list, show details for, update, and delete services. When you create or update a service, you can enable the service, which causes it and its endpoints to appear in the service catalog. You can create, list, show details for, update, and delete endpoints. List services ============= .. rest_method:: GET /v3/services Lists all services. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/services`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: request_service_name_query_not_required - type: service_type_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: service_name - links: service_links - enabled: service_enabled - services: services - type: service_type - id: service_id - description: service_description Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/services-list-response.json :language: javascript Create service ============== .. rest_method:: POST /v3/services Creates a service. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/services`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - description: service_description - service: service - enabled: service_enabled - type: service_type - name: service_name Example ~~~~~~~ .. literalinclude:: ./samples/admin/service-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: service_name - service: service - links: service_links - type: service_type - id: service_id - description: service_description Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Show service details ==================== .. rest_method:: GET /v3/services/{service_id} Shows details for a service. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/service`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - service_id: service_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: service_name - service: service - links: service_links - type: service_type - id: service_id - description: service_description Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/service-show-response.json :language: javascript Update service ============== .. rest_method:: PATCH /v3/services/{service_id} Updates a service. The request body is the same as the create service request body, except that you include only those attributes that you want to update. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/services`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - type: service_type - enabled: service_enabled_not_required - description: service_description_not_required - service: service - name: service_name - service_id: service_id_path Example ~~~~~~~ .. literalinclude:: ./samples/admin/service-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: service_name - service: service - links: service_links - type: service_type - id: service_id - description: service_description Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/service-update-response.json :language: javascript Delete service ============== .. rest_method:: DELETE /v3/services/{service_id} Deletes a service. If you try to delete a service that still has associated endpoints, this call either deletes all associated endpoints or fails until all endpoints are deleted. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/service`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - service_id: service_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List endpoints ============== .. rest_method:: GET /v3/endpoints Lists all available endpoints. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/endpoints`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - interface: interface_query - service_id: service_id_query - region_id: region_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - region_id: region_id_required - links: endpoints_links - url: endpoint_url - region: endpoint_region - enabled: endpoint_enabled - interface: endpoint_interface - service_id: service_id - endpoints: endpoints - id: endpoint_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/endpoints-list-response.json :language: javascript Create endpoint =============== .. rest_method:: POST /v3/endpoints Creates an endpoint. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/endpoints`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint: endpoint - url: endpoint_url - enabled: endpoint_enabled_not_required - interface: endpoint_interface - service_id: service_id - region_id: region_id_not_required Example ~~~~~~~ .. literalinclude:: ./samples/admin/endpoint-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint: endpoint - links: endpoint_links - url: endpoint_url - region: endpoint_region - enabled: endpoint_enabled - interface: endpoint_interface - service_id: service_id - id: endpoint_id - region_id: region_id_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Show endpoint details ===================== .. rest_method:: GET /v3/endpoints/{endpoint_id} Shows details for an endpoint. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/endpoints`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_id: endpoint_id_path Response -------- Parameters ~~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint: endpoint - name: endpoint_name - links: endpoint_links - url: endpoint_url - region: endpoint_region - interface: endpoint_interface - service_id: service_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/endpoint-show-response.json :language: javascript Update endpoint =============== .. rest_method:: PATCH /v3/endpoints/{endpoint_id} Updates an endpoint. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/endpoint`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint: endpoint - url: endpoint_url - region: endpoint_region - interface: endpoint_interface - service_id: service_id - endpoint_id: endpoint_id_path Example ~~~~~~~ .. literalinclude:: ./samples/admin/endpoint-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint: endpoint - id: endpoint_id - links: endpoint_links - url: endpoint_url - region: endpoint_region - interface: endpoint_interface - service_id: service_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/endpoint-update-response.json :language: javascript Delete endpoint =============== .. rest_method:: DELETE /v3/endpoints/{endpoint_id} Deletes an endpoint. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/endpoint`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/status.yaml0000664000175000017500000000417100000000000021050 0ustar00zuulzuul00000000000000# Success Codes 200: default: | Request was successful. 201: default: | Resource was created and is ready to use. 202: default: | Request was accepted for processing, but the processing has not been completed. A 'location' header is included in the response which contains a link to check the progress of the request. 204: default: | The server has fulfilled the request. 300: default: | There are multiple choices for resources. The request has to be more specific to successfully retrieve one of these resources. # Error Codes 400: default: | Some content in the request was invalid. 401: default: | User must authenticate before making a request. auth_failed: | Authentication attempt has failed. auth_receipt: | User has successfully supplied some auth methods, but not enough for full authentication. auth_receipt_failure: | Authentication attempt has failed. Either the auth receipt has expired, or the additional auth methods supplied were invalid. 403: default: | Policy does not allow current user to do this operation. 404: default: | The requested resource could not be found. 405: default: | Method is not valid for this endpoint. 406: default: | The requested API version is not supported by the API. 409: default: | This operation conflicted with another operation on this resource. 410: default: | The access request to the target resource is no longer available. 413: default: | The request is larger than the server is willing or able to process. 415: default: | The request entity has a media type which the server or resource does not support. 500: default: | Something went wrong inside the service. This should not happen usually. If it does happen, it means the server has experienced some serious problems. 501: default: | The server either does not recognize the request method, or it lacks the ability to fulfill the request. 503: default: | Service is not available. This is mostly caused by service configuration errors which prevents the service from successful start up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/system-roles.inc0000664000175000017500000001624700000000000022011 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================= System Role Assignments ======================= A system role assignment ultimately controls access to system-level API calls. System role assignments are similar to project or domain role assignments, but are meant for a different target. Instead of giving a user or group a role on a project, they can be given a system role. Good examples of system-level APIs include management of the service catalog and compute hypervisors. List system role assignments for a user ======================================= .. rest_method:: GET /v3/system/users/{user_id}/roles Lists all system role assignment a user has. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_user_roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_response_body - roles: system_roles_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/list-system-roles-for-user-response.json :language: javascript The functionality of this request can also be achieved using the generalized list assignments API:: GET /role_assignments?user.id={user_id}&scope.system Assign a system role to a user ============================== .. rest_method:: PUT /v3/system/users/{user_id}/roles/{role_id} Grant a user a role on the system. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Check user for a system role assignment ======================================= .. rest_method:: HEAD /v3/system/users/{user_id}/roles/{role_id} Check if a specific user has a role assignment on the system. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Get system role assignment for a user ===================================== .. rest_method:: GET /v3/system/users/{user_id}/roles/{role_id} Get a specific system role assignment for a user. This is the same API as ``HEAD /v3/system/users/{user_id}/roles/{role_id}``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Delete a system role assignment from a user =========================================== .. rest_method:: DELETE /v3/system/users/{user_id}/roles/{role_id} Remove a system role assignment from a user. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_user_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List system role assignments for a group ======================================== .. rest_method:: GET /v3/system/groups/{group_id}/roles Lists all system role assignment a group has. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_group_roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_response_body - roles: system_roles_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/list-system-roles-for-group-response.json :language: javascript The functionality of this request can also be achieved using the generalized list assignments API:: GET /role_assignments?group.id={group_id}&scope.system Assign a system role to a group =============================== .. rest_method:: PUT /v3/system/groups/{group_id}/roles/{role_id} Grant a group a role on the system. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Check group for a system role assignment ======================================== .. rest_method:: HEAD /v3/system/groups/{group_id}/roles/{role_id} Check if a specific group has a role assignment on the system. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Get system role assignment for a group ====================================== .. rest_method:: GET /v3/system/groups/{group_id}/roles/{role_id} Get a specific system role assignment for a group. This is the same API as ``HEAD /v3/system/groups/{group_id}/roles/{role_id}``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Delete a system role assignment from a group ============================================ .. rest_method:: DELETE /v3/system/groups/{group_id}/roles/{role_id} Remove a system role assignment from a group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/system_group_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - group_id: group_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/unified_limits.inc0000664000175000017500000002751200000000000022344 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== Unified Limits ============== In OpenStack, a quota system mainly contains two parts: ``limit`` and ``usage``. The Unified limits in Keystone is a replacement of the ``limit`` part. It contains two kinds of resouces: ``Registered Limit`` and ``Limit``. A ``registered limit`` is a default limit. It is usually created by the services which are registered in Keystone. A ``limit`` is the limit that override the registered limit for each project. List Registered Limits ====================== .. rest_method:: GET /v3/registered_limits Lists Registered Limits. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/registered_limits`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - service_id: service_id_query - region_id: region_id_query - resource_name: resource_name_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_collection - registered_limits: registered_limits - id: registered_limit_id - service_id: service_id_registered_limit - region_id: region_id_response_body - resource_name: resource_name - default_limit: default_limit - description: description_registered_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/registered-limits-list-response.json :language: javascript Create Registered Limits ======================== .. rest_method:: POST /v3/registered_limits Creates registered limits. It supports to create more than one registered limit in one request. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/registered_limits`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - registered_limits: registered_limits - service_id: service_id_registered_limit - region_id: region_id_request_body - resource_name: resource_name - default_limit: default_limit - description: description_registered_limit_request_body Examples ~~~~~~~~ .. literalinclude:: ./samples/admin/registered-limits-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - registered_limits: registered_limits - id: registered_limit_id - service_id: service_id_registered_limit - region_id: region_id_response_body - resource_name: resource_name - default_limit: default_limit - description: description_registered_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Examples ~~~~~~~~ .. literalinclude:: ./samples/admin/registered-limits-create-response.json :language: javascript Update Registered Limit ======================== .. rest_method:: PATCH /v3/registered_limits/{registered_limit_id} Updates the specified registered limit. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/registered_limit`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - registered_limit_id: registered_limit_id_path - registered_limit: registered_limit - service_id: request_service_id_registered_limit_body_not_required - region_id: request_region_id_registered_limit_body_not_required - resource_name: request_resource_name_body_not_required - default_limit: request_default_limit_body_not_required - description: description_registered_limit_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/registered-limits-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - registered_limit: registered_limit - id: registered_limit_id - service_id: service_id_registered_limit - region_id: region_id_response_body - resource_name: resource_name - default_limit: default_limit - description: description_registered_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/registered-limits-update-response.json :language: javascript Show Registered Limit Details ============================= .. rest_method:: GET /v3/registered_limits/{registered_limit_id} Shows details for a registered limit. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/registered_limit`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - registered_limit_id: registered_limit_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - registered_limit: registered_limit - id: registered_limit_id - service_id: service_id_registered_limit - region_id: region_id_response_body - resource_name: resource_name - default_limit: default_limit - description: description_registered_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/registered-limit-show-response.json :language: javascript Delete Registered Limit ======================= .. rest_method:: DELETE /v3/registered_limits/{registered_limit_id} Deletes a registered limit. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/registered_limit`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - registered_limit_id: registered_limit_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Get Enforcement Model ===================== .. rest_method:: GET /v3/limits/model Return the configured limit enforcement model. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/limit_model`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - model: limit_model_required_response_body - name: limit_model_name_required_response_body - description: limit_model_description_required_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Flat Enforcement Example ~~~~~~~~~~~~~~~~~~~~~~~~ .. literalinclude:: ./samples/admin/limit-flat-model-response.json :language: javascript List Limits =========== .. rest_method:: GET /v3/limits Lists Limits. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/limits`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - service_id: service_id_query - region_id: region_id_query - resource_name: resource_name_query - project_id: scope_project_id_query - domain_id: scope_domain_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_collection - limits: limits - id: limit_id - project_id: project_id - domain_id: response_limit_domain_id_body - service_id: service_id_limit - region_id: region_id_response_body - resource_name: resource_name - resource_limit: resource_limit - description: description_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/limits-list-response.json :language: javascript Create Limits ============= .. rest_method:: POST /v3/limits Creates limits. It supports to create more than one limit in one request. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/limits`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limits: limits - project_id: request_limit_project_id_not_required - domain_id: request_limit_domain_id_not_required - service_id: service_id_limit - region_id: region_id_request_body - resource_name: resource_name - resource_limit: resource_limit - description: description_limit_request_body Examples ~~~~~~~~ .. literalinclude:: ./samples/admin/limits-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limits: limits - id: limit_id - project_id: project_id - domain_id: response_limit_domain_id_body - service_id: service_id_limit - region_id: region_id_response_body - resource_name: resource_name - resource_limit: resource_limit - description: description_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Examples ~~~~~~~~ .. literalinclude:: ./samples/admin/limits-create-response.json :language: javascript Update Limit ============= .. rest_method:: PATCH /v3/limits/{limit_id} Updates the specified limit. It only supports to update ``resource_limit`` or ``description`` for the limit. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/limit`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limit_id: limit_id_path - limit: limit - resource_limit: request_resource_limit_body_not_required - description: description_limit_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/limits-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limit: limit - id: limit_id - project_id: project_id - domain_id: response_limit_domain_id_body - service_id: service_id_limit - region_id: region_id_response_body - resource_name: resource_name - resource_limit: resource_limit - description: description_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/limits-update-response.json :language: javascript Show Limit Details ================== .. rest_method:: GET /v3/limits/{limit_id} Shows details for a limit. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/limit`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limit_id: limit_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limit: limit - id: limit_id - project_id: project_id - domain_id: response_limit_domain_id_body - service_id: service_id_limit - region_id: region_id_response_body - resource_name: resource_name - resource_limit: resource_limit - description: description_limit_response_body - links: link_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/limit-show-response.json :language: javascript Delete Limit ============ .. rest_method:: DELETE /v3/limits/{limit_id} Deletes a limit. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/limit`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - limit_id: limit_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 401 - 403 - 404 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3/users.inc0000664000175000017500000002051300000000000020473 0ustar00zuulzuul00000000000000.. -*- rst -*- ======= Users ======= A user is an individual API consumer that is owned by a domain. A role explicitly associates a user with projects or domains. A user with no assigned roles has no access to OpenStack resources. You can list, create, show details for, update, delete, and change the password for users. You can also list groups, projects, and role assignments for a specified user. To list user roles, see `Roles `_. List users ========== .. rest_method:: GET /v3/users Lists users. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/users`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - domain_id: domain_id_query - enabled: enabled_user_query - idp_id: idp_id_query - name: name_user_query - password_expires_at: password_expires_at_query - protocol_id: protocol_id_query - unique_id: unique_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_collection - users: users_object - default_project_id: default_project_id_response_body - domain_id: domain_id_response_body - enabled: enabled_user_response_body - id: id_user_body - links: links_user - name: user_name_response_body - password_expires_at: password_expires_at Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 Example ~~~~~~~ .. literalinclude:: ./samples/admin/users-list-response.json :language: javascript Create user =========== .. rest_method:: POST /v3/users Creates a user. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/users`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user: user_object - default_project_id: default_project_id_request_body - domain_id: user_domain_id_request_body - federated: federated_in_request_body - enabled: enabled_user_request_body - name: user_name_create_request_body - password: password_request_body - extra: extra_request_body - options: user_options_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user: user_object - default_project_id: default_project_id_response_body - domain_id: domain_id_response_body - enabled: enabled_user_response_body - federated: federated_in_response_body - id: id_user_body - links: links_user - name: user_name_response_body - password_expires_at: password_expires_at - options: response_user_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 201 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-create-response.json :language: javascript Show user details ================= .. rest_method:: GET /v3/users/{user_id} Shows details for a user. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/user`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user: user_object - default_project_id: default_project_id_response_body - domain_id: domain_id_response_body - enabled: enabled_user_response_body - federated: federated_in_response_body - id: id_user_body - links: links_user - name: user_name_response_body - password_expires_at: password_expires_at Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-show-response.json :language: javascript Update user =========== .. rest_method:: PATCH /v3/users/{user_id} Updates a user. If the back-end driver does not support this functionality, this call might return the HTTP ``Not Implemented (501)`` response code. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/user`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - user: user_object - default_project_id: default_project_id_update_body - domain_id: user_domain_id_update_body - enabled: enabled_user_update_body - federated: federated_in_request_body - name: user_name_update_body - password: user_update_password_body - options: user_options_request_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user: user_object - default_project_id: default_project_id_response_body - domain_id: domain_id_response_body - enabled: enabled_user_response_body - federated: federated_in_response_body - id: id_user_body - links: links_user - name: user_name_response_body - password_expires_at: password_expires_at - options: response_user_options_body_required Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 - 501 Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-update-response.json :language: javascript Delete user =========== .. rest_method:: DELETE /v3/users/{user_id} Deletes a user. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/user`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 List groups to which a user belongs =================================== .. rest_method:: GET /v3/users/{user_id}/groups Lists groups to which a user belongs. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/user_groups`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: link_collection - groups: groups - description: group_description_response_body - domain_id: group_domain_id_response_body - id: group_id_response_body - links: link_response_body - name: group_name_response_body - membership_expires_at: membership_expires_at_response_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-groups-list-response.json :language: javascript List projects for user ====================== .. rest_method:: GET /v3/users/{user_id}/projects List projects to which the user has authorization to access. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/user_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 200 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-projects-list-response.json :language: javascript Change password for user ======================== .. rest_method:: POST /v3/users/{user_id}/password Changes the password for a user. .. note:: This API call does not require a token for authentication. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/user_change_password`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - user: user_object - original_password: original_password - password: user_password_update_body Example ~~~~~~~ .. literalinclude:: ./samples/admin/user-password-update-request.json :language: javascript Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success status.yaml - 204 .. rest_status_code:: error status.yaml - 400 - 401 - 403 - 404 - 409 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.438115 keystone-26.0.0/api-ref/source/v3-ext/0000775000175000017500000000000000000000000017434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/endpoint-policy.inc0000664000175000017500000002270400000000000023251 0ustar00zuulzuul00000000000000.. -*- rst -*- ======================== OS-ENDPOINT-POLICY API ======================== Creates, verifies, and deletes associations between service endpoints and policies. Such associations enable an endpoint to request its policy. To create, check, or delete an association, you reference a policy by its ID in the Identity server. The extension supports these associations: - A policy and endpoint association. - A policy and service-type endpoint in a region association. - A policy and service-type endpoint association. This order reflects policies in their most to least-specific order. When an endpoint requests the appropriate policy for itself, the extension finds the policy by traversing the ordered sequence of methods of association. The extension shows the policy for the first association that it finds. If the region of the endpoint has a parent, the extension examines the region associations up the region tree in ascending order. For region associations, the extension examines any parent regions in ascending order. The extension does not combine polices. Associate policy and endpoint ============================= .. rest_method:: PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} Associates a policy and an endpoint. If an association already exists between the endpoint and another policy, this call replaces that association. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Verify a policy and endpoint association ======================================== .. rest_method:: GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} Verifies an association between a policy and an endpoint. A HEAD version of this API is also supported. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Delete a policy and endpoint association ======================================== .. rest_method:: DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} Deletes an association between a policy and an endpoint. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Associate policy and service-type endpoint ========================================== .. rest_method:: PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} Associates a policy and any endpoint of a service type. If an association already exists between the endpoint of a service type and another policy, this call replaces that association. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - service_id: service_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Verify a policy and service-type endpoint association ===================================================== .. rest_method:: GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} Verifies an association between a policy and an endpoint of a service type. A HEAD version of this API is also supported. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - service_id: service_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Delete a policy and service-type endpoint association ===================================================== .. rest_method:: DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} Deletes an association between a policy and an endpoint of a service type. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - service_id: service_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Show policy for endpoint ======================== .. rest_method:: GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/policy Shows a policy for an endpoint. The extension finds the policy by traversing the ordered sequence of methods of association. The extension shows the policy for the first association that it finds. If the region of the endpoint has a parent, the extension examines the region associations up the region tree in ascending order. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy: policy - type: policy_type - blob: policy_blob - links: policy_links - id: policy_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: samples/OS-ENDPOINT-POLICY/policy-show-response.json :language: javascript Check policy and service endpoint association ============================================= .. rest_method:: HEAD /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/policy Checks whether a policy is associated with an endpoint. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path Response -------- Status Codes ~~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Associate policy and service-type endpoint in a region ====================================================== .. rest_method:: PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} Associates a policy and an endpoint of a service type in a region. If an association already exists between the service in a region and another policy, this call replaces that association. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - service_id: service_id_path - region_id: region_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Verify a policy and service-type endpoint in a region association ================================================================= .. rest_method:: GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} Verifies an association between a policy and service-type endpoint in a region. A HEAD version of this API is also supported. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - service_id: service_id_path - region_id: region_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Delete a policy and service-type endpoint in a region association ================================================================= .. rest_method:: DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} Deletes an association between a policy and service-type endpoint in a region. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path - service_id: service_id_path - region_id: region_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 List policy and service endpoint associations ============================================= .. rest_method:: GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints Lists all the endpoints that are currently associated with a policy through any of the association methods. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy_id: policy_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - links: policy_links - url: url - region: region - next: next - self: self - interface: interface - service_id: service_id - endpoints: endpoints - id: endpoint_id - previous: previous Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: samples/OS-ENDPOINT-POLICY/policy-endpoint-associations-list-response.json :language: javascript Show the effective policy associated with an endpoint ===================================================== .. rest_method:: GET /v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy Returns the policy that is currently associated with the given endpoint, by working through the ordered sequence of methods of association. The first association that is found will be returned. If the region of the endpoint has a parent, then region associations will be examined up the region tree in ascending order. A HEAD version of this API is also supported. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_id: endpoint_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - policy: policy - type: policy_type - blob: policy_blob - links: policy_links - id: policy_id Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: samples/OS-ENDPOINT-POLICY/policy-show-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/ep-filter.inc0000664000175000017500000003703500000000000022026 0ustar00zuulzuul00000000000000.. -*- rst -*- ================ OS-EP-FILTER API ================ This API enables the creation of custom catalogs using project scope. The result is the ability to advertise specific endpoints based on the project in use. The association can be done two different ways. The first is by building a direct association between the project and the endpoint, which implies that all tokens scoped to that particular project will receive a specific endpoint, or set of endpoints, in the service catalog. The second is by creating an endpoint group. An endpoint group is a filter that consists of at least one endpoint attribute. By associating a project to an endpoint group, all service catalogs scoped to that project will contain endpoints that match the attributes defined in the endpoint group. Using endpoint groups is a way to dynamically associate an endpoint, or a group of endpoints, to a specific project. API Resources ============= Endpoint Group -------------- An endpoint group represents a dynamic collection of service endpoints having the same characteristics, such as ``service_id``, ``interface``, or ``region_id``. Any endpoint attribute could be used as part of a filter. An example use case would be to give a particular project access to specific service endpoints. When users authenticate for scoped tokens to that project, they are presented with endpoints for the service because the association matched attributes of the endpoints based on the filters. Continuing with this example, let's assume we want a specific set of endpoints to be advertised in the service catalog for a particular project. We can create an endpoint group to explicitly filter endpoints based on their interface type and service ID. .. literalinclude:: samples/OS-EP-FILTER/create-endpoint-group-request.json :language: javascript This implies an Endpoint Group that will only return endpoints that have an interface type of ``admin`` and correspond to a service with an ID of ``1b501a``. Create Endpoint Group ===================== .. rest_method:: POST /v3/OS-EP-FILTER/endpoint_groups Create a new endpoint group filter that represents a dynamic collection of service endpoints having the same characteristics Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_groups`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: eg_name - filters: eg_filters - description: eg_description Example ~~~~~~~ .. literalinclude:: samples/OS-EP-FILTER/create-endpoint-group-request.json :language: javascript Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 201 Created .. literalinclude:: samples/OS-EP-FILTER/endpoint-group-response.json :language: javascript Get Endpoint Group ================== .. rest_method:: GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} Show details of an endpoint group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/endpoint-group-response.json :language: javascript Check Endpoint Group ==================== .. rest_method:: HEAD /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} Determine if an endpoint group exists. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK Update Endpoint Group ===================== .. rest_method:: PATCH /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} Modify attributes of an endpoint group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path - name: eg_name - filters: eg_filters - description: eg_description Example ~~~~~~~ .. literalinclude:: samples/OS-EP-FILTER/update-endpoint-group-request.json :language: javascript Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/update-endpoint-group-response.json :language: javascript Delete Endpoint Group ===================== .. rest_method:: DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} Delete an endpoint group. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 401 Status: 204 No Content List Endpoint Groups ==================== .. rest_method:: GET /v3/OS-EP-FILTER/endpoint_groups List all available endpoint groups. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_groups`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - name: request_endpoint_group_name_query_not_required Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/endpoint-groups-response.json :language: javascript Project and Endpoint Associations --------------------------------- As previously noted, projects can be associated with endpoints either directly or by using endpoint groups. The following API calls describe how to associate a project to a single endpoint and an endpoint group. Create Association ================== .. rest_method:: PUT /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Creates a direct association between ``project_id`` and ``endpoint_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/project_endpoint`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 204 No Content Check Association ================= .. rest_method:: HEAD /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Verifies the existence of an association between ``project_id`` and ``endpoint_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/project_endpoint`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 204 No Content Delete Association ================== .. rest_method:: DELETE /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} Removes a direct association between ``project_id`` and ``endpoint_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/project_endpoint`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 204 No Content List Associations by Project ============================= .. rest_method:: GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints Returns all endpoints that are currently associated with ``project_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/project_endpoints`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/list-associations-by-project-response.json :language: javascript List Associations by Endpoint ============================= .. rest_method:: GET /v3/OS-EP-FILTER/endpoints/{endpoint_id}/projects Returns all projects that are currently associated with ``endpoint_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_id: endpoint_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/list-associations-by-endpoint-response.json :language: javascript Project and Endpoint Group Associations --------------------------------------- Projects can be associated to muliple endpoints by being associated to a single endpoint group. All endpoints that match the filter in the endpoint group will be associated with the project. The following API calls describe how to associate a project to an endpoint group. Create Endpoint Group to Project Association ============================================ .. rest_method:: PUT /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} Creates an association between ``endpoint_group_id`` and ``project_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group_project`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 204 No Content Get Endpoint Group to Project Association ========================================= .. rest_method:: GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} Verifies the existence of an association between ``project_id`` and ``endpoint_group_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group_project`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/endpoint-project-response.json :language: javascript Check Endpoint Group to Project Association =========================================== .. rest_method:: HEAD /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} Verifies the existence of an association between ``project_id`` and ``endpoint_group_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group_project`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK Delete Endpoint Group to Project Association ============================================ .. rest_method:: DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} Removes the association between ``project_id`` and ``endpoint_group_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group_project`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 204 No Content List Projects Associated with Endpoint Group ============================================ .. rest_method:: GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects Returns all projects that are currently associated with ``endpoint_group_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group_projects`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/list-associations-by-endpoint-response.json :language: javascript List Endpoints Associated with Endpoint Group ============================================= .. rest_method:: GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints Returns all the endpoints that are currently associated with ``endpoint_group_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/endpoint_group_endpoints`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - endpoint_group_id: endpoint_group_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/list-service-endpoints.json :language: javascript List Endpoint Groups Associated with Project ============================================ .. rest_method:: GET /v3/OS-EP-FILTER/projects/{project_id}/endpoint_groups Returns all the endpoint groups that are currently associated with ``project_id``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/1.0/rel/project_endpoint_groups`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - project_id: project_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-EP-FILTER/endpoint-groups-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/api-ref/source/v3-ext/federation/0000775000175000017500000000000000000000000021554 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.438115 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/0000775000175000017500000000000000000000000023563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/assertion.inc0000664000175000017500000000713300000000000026271 0ustar00zuulzuul00000000000000.. -*- rst -*- *New in version 1.1* Generate a SAML assertion ========================= .. rest_method:: POST /v3/auth/OS-FEDERATION/saml2 A user may generate a SAML assertion document based on the scoped token that is used in the request. Request Parameters: To generate a SAML assertion, a user must provides a scoped token ID and Service Provider ID in the request body. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/saml2`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/assertion/parameters.yaml - auth: auth Example ~~~~~~~ .. literalinclude:: federation/assertion/samples/saml-assertion-request.json :language: javascript The response will be a full SAML assertion. Note that for readability the certificate has been truncated. Server will also set two HTTP headers: ``X-sp-url`` and ``X-auth-url``. The former is the URL where assertion should be sent, whereas the latter remote URL where token will be issued once the client is finally authenticated. Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/assertion/parameters.yaml - Headers: headers - xml: saml_xml Example ~~~~~~~ .. literalinclude:: federation/assertion/samples/saml-assertion-response.xml :language: xml For more information about how a SAML assertion is structured, refer to the `specification `__. Generate an ECP wrapped SAML assertion ====================================== .. rest_method:: POST /v3/auth/OS-FEDERATION/saml2/ecp A user may generate a SAML assertion document to work with the *Enhanced Client or Proxy* (ECP) profile based on the scoped token that is used in the request. Request Parameters: To generate an ECP wrapped SAML assertion, a user must provides a scoped token ID and Service Provider ID in the request body. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/saml2/ecp`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/assertion/parameters.yaml - auth: auth Example ~~~~~~~ .. literalinclude:: federation/assertion/samples/ecp-saml-assertion-request.json :language: javascript The response will be an ECP wrapped SAML assertion. Note that for readability the certificate has been truncated. Server will also set two HTTP headers: ``X-sp-url`` and ``X-auth-url``. The former is the URL where assertion should be sent, whereas the latter remote URL where token will be issued once the client is finally authenticated. Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/assertion/parameters.yaml - Headers: headers - xml: saml_xml Example ~~~~~~~ .. literalinclude:: federation/assertion/samples/ecp-saml-assertion-response.xml :language: xml Retrieve Metadata properties ============================ .. rest_method:: GET /v3/OS-FEDERATION/saml2/metadata A user may retrieve Metadata about an Identity Service acting as an Identity Provider. The response will be a full document with Metadata properties. Note that for readability, this example certificate has been truncated. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/metadata`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/assertion/parameters.yaml - Headers: headers - xml: metadata_xml Example ~~~~~~~ .. literalinclude:: federation/assertion/samples/metadata-response.xml :language: xml For more information about how a SAML assertion is structured, refer to the `specification `__.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/parameters.yaml0000664000175000017500000000104500000000000026612 0ustar00zuulzuul00000000000000# variables in header # variables in path # variables in query # variables in body auth: description: | Auth data with user's identity and Service Provider scope information in: body required: true type: object headers: description: | XML headers in: body required: true type: object metadata_xml: description: | Identity Provider metadata information in XML format in: body required: true type: object saml_xml: description: | SAML assertion in XML format in: body required: true type: object ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.438115 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/samples/0000775000175000017500000000000000000000000025227 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/samples/ecp-saml-assertion-request.json0000664000175000017500000000046100000000000033317 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "--token_id--" } }, "scope": { "service_provider": { "id": "--sp_id--" } } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/samples/ecp-saml-assertion-response.xml0000664000175000017500000001302000000000000033307 0ustar00zuulzuul00000000000000Headers: Content-Type: text/xml X-sp-url: http://beta.example.com/Shibboleth.sso/POST/ECP X-auth-url: http://beta.example.com/identity/v3/OS-FEDERATION/identity_providers/beta/protocols/auth ss:mem:1ddfe8b0f58341a5a840d2e8717b0737 http://keystone.idp/v3/OS-FEDERATION/saml2/idp http://keystone.idp/v3/OS-FEDERATION/saml2/idp 0KH2CxdkfzU+6eiRhTC+mbObUKI= m2jh5gDvX/1k+4uKtbb08CHp2b9UWsLwjtMijs9C9gZV2dIJKiF9SJBWE4C79qT4 uktgeB0RQiFrgxOGfpp1gyQunmNyZcipcetOk4PebH4/z+po/59w8oGp89fPfdRj WhWA0fWP32Pr5eslRQjbHnSRTFMp3ycBZHsCCsTWdhyiWC6aERsspHeeGjkzxRAZ HxJ8oLMj/TWBJ2iaUDUT6cxa1svmtumoC3GPPOreuGELXTL5MtKotTVqYN6lZP8B Ueaji11oRI1HE9XMuPu0iYlSo1i3JyejciSFgplgdHsebpM29PMo8oz2TCybY39p kmuD4y9XX3lRBcpJRxku7w== ... admin urn:oasis:names:tc:SAML:2.0:ac:classes:Password http://keystone.idp/v3/OS-FEDERATION/saml2/idp admin Default admin admin Default ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/samples/metadata-response.xml0000664000175000017500000000234100000000000031365 0ustar00zuulzuul00000000000000Headers: Content-Type: text/xml MIIDpTCCAo0CAREwDQYJKoZIhvcNAQEFBQAwgZ openstack openstack openstack openstack first lastname admin@example.com 555-555-5555 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/samples/saml-assertion-request.json0000664000175000017500000000046100000000000032552 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "--token_id--" } }, "scope": { "service_provider": { "id": "--sp_id--" } } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/assertion/samples/saml-assertion-response.xml0000664000175000017500000001077200000000000032555 0ustar00zuulzuul00000000000000Headers: Content-Type: text/xml X-sp-url: http://beta.example.com/Shibboleth.sso/POST/ECP X-auth-url: http://beta.example.com/identity/v3/OS-FEDERATION/identity_providers/beta/protocols/auth http://keystone.idp/v3/OS-FEDERATION/saml2/idp http://keystone.idp/v3/OS-FEDERATION/saml2/idp IgfoWcCoBpmv64ianaK/qj63QQQ= H6GvkAcDW0BSoBaktpVTxUFtvUAcFMXRqYXLFvmse5DeOSnByvGOgW/yJMjIqzwG LjCqJXYMePIkEUYb4kqbbkN1wNFuxKtmACcC3T3/7rAavrIz3I4cT6mCipN9qFlE tzR0mD2IZhExuTzyMaON8krTWWoddx8LIYEfQ03O4eSYObi5fHmGJRGs9D5De0aK XkIeKo7HRAjZsU5fAMGlEKfazemTZMBbnpUD//oFsxf1yFcFTOyiAHddAaG7Rqv3 4SYjYo4dRKAI/yQuA+MVmHDcJUE+KVqVoJZJSVJe+Lz+X1ReRlEgvP0mhaM0yY+R w7FozqQyKSKJW9abmxJTFQ== ... admin urn:oasis:names:tc:SAML:2.0:ac:classes:Password http://keystone.idp/v3/OS-FEDERATION/saml2/idp admin Default admin admin Default ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.438115 keystone-26.0.0/api-ref/source/v3-ext/federation/auth/0000775000175000017500000000000000000000000022515 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/auth/auth.inc0000664000175000017500000000760700000000000024163 0ustar00zuulzuul00000000000000.. -*- rst -*- Request an unscoped OS-FEDERATION token ======================================= .. rest_method:: GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id}/auth A federated ephemeral user may request an unscoped token, which can be used to get a scoped token. If the user is mapped directly (mapped to an existing user), a standard, unscoped token will be issued. Due to the fact that this part of authentication is strictly connected with the SAML2 authentication workflow, a client should not send any data, as the content may be lost when a client is being redirected between Service Provider and Identity Provider. Both HTTP methods - GET and POST should be allowed as Web Single Sign-On (WebSSO) and Enhanced Client Proxy (ECP) mechanisms have different authentication workflows and use different HTTP methods while accessing protected endpoints. The returned token will contain information about the groups to which the federated user belongs. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider_protocol_auth`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/auth/parameters.yaml - idp_id: idp_id - protocol_id: protocol_id Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/auth/parameters.yaml - X-Subject-Token: X-Subject-Token - token: unscoped_token Example ~~~~~~~ .. literalinclude:: federation/auth/samples/unscoped-token-response.json :language: javascript Request a scoped OS-FEDERATION token ==================================== .. rest_method:: POST /v3/auth/tokens A federated user may request a scoped token, by using the unscoped token. A project or domain may be specified by either id or name. An id is sufficient to uniquely identify a project or domain. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/rel/auth_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/auth/parameters.yaml - auth: auth Example ~~~~~~~ .. literalinclude:: federation/auth/samples/scoped-token-request.json :language: javascript Similarly to the returned unscoped token, the returned scoped token will have an ``OS-FEDERATION`` section added to the ``user`` portion of the token. Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/auth/parameters.yaml - X-Subject-Token: X-Subject-Token - token: scoped_token Example ~~~~~~~ .. literalinclude:: federation/auth/samples/scoped-token-response.json :language: javascript Web Single Sign On authentication (New in version 1.2) ====================================================== .. rest_method:: GET /v3/auth/OS-FEDERATION/websso/{protocol_id}?origin=https%3A//horizon.example.com For Web Single Sign On (WebSSO) authentication, users are expected to enter another URL endpoint. Upon successful authentication, instead of issuing a standard unscoped token, keystone will issue JavaScript code that redirects the web browser to the originating Horizon. An unscoped federated token will be included in the form being sent. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/auth/parameters.yaml - protocol_id: protocol_id Web Single Sign On authentication (New in version 1.3) ====================================================== .. rest_method:: GET /v3/auth/OS-FEDERATION/identity_providers/{idp_id}/protocol/{protocol_id}/websso?origin=https%3A//horizon.example.com In contrast to the above route, this route begins a Web Single Sign On request that is specific to the supplied Identity Provider and Protocol. Keystone will issue JavaScript that handles redirections in the same way as the other route. An unscoped federated token will be included in the form being sent. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/auth/parameters.yaml - idp_id: idp_id - protocol_id: protocol_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/auth/parameters.yaml0000664000175000017500000000172300000000000025547 0ustar00zuulzuul00000000000000# variables in header X-Subject-Token: description: | The authentication token. An authentication response returns the token ID in this header rather than in the response body. in: header required: true type: string # variables in path idp_id: description: | Identity Provider's unique ID in: path required: true type: string protocol_id: description: | Federation Protocol's unique ID in: path required: true type: string # variables in query # variables in body auth: description: | Auth data containing user's identity and scope information in: body required: true type: object scoped_token: description: | Federation scoped token containing methods, roles, user, scope, catalog, issuance and expiry information in: body required: true type: object unscoped_token: description: | Federation unscoped token containing methods and user information in: body required: true type: object ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.438115 keystone-26.0.0/api-ref/source/v3-ext/federation/auth/samples/0000775000175000017500000000000000000000000024161 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/auth/samples/scoped-token-request.json0000664000175000017500000000045700000000000031143 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "--federated-token-id--" } }, "scope": { "project": { "id": "263fd9" } } } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/auth/samples/scoped-token-response.json0000664000175000017500000000427400000000000031312 0ustar00zuulzuul00000000000000{ "token": { "methods": [ "token" ], "roles": [ { "id": "36a8989f52b24872a7f0c59828ab2a26", "name": "admin" } ], "expires_at": "2014-08-06T13:43:43.367202Z", "project": { "domain": { "id": "1789d1", "links": { "self": "http://example.com/identity/v3/domains/1789d1" }, "name": "example.com" }, "id": "263fd9", "links": { "self": "http://example.com/identity/v3/projects/263fd9" }, "name": "project-x" }, "catalog": [ { "endpoints": [ { "id": "39dc322ce86c4111b4f06c2eeae0841b", "interface": "public", "region": "RegionOne", "url": "http://example.com/identity" }, { "id": "ec642f27474842e78bf059f6c48f4e99", "interface": "internal", "region": "RegionOne", "url": "http://example.com/identity" }, { "id": "c609fc430175452290b62a4242e8a7e8", "interface": "admin", "region": "RegionOne", "url": "http://example.com/identity" } ], "id": "266c2aa381ea46df81bb05ddb02bd14a", "name": "keystone", "type": "identity" } ], "user": { "domain": { "id": "Federated" }, "id": "username%40example.com", "name": "username@example.com", "OS-FEDERATION": { "identity_provider": "ACME", "protocol": "SAML", "groups": [ {"id": "abc123"}, {"id": "bcd234"} ] } }, "issued_at": "2014-08-06T12:43:43.367288Z" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/auth/samples/unscoped-token-response.json0000664000175000017500000000100100000000000031636 0ustar00zuulzuul00000000000000{ "token": { "methods": [ "mapped" ], "user": { "domain": { "id": "Federated" }, "id": "username%40example.com", "name": "username@example.com", "OS-FEDERATION": { "identity_provider": "ACME", "protocol": "SAML", "groups": [ {"id": "abc123"}, {"id": "bcd234"} ] } } } }././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.438115 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/0000775000175000017500000000000000000000000025235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/idp.inc0000664000175000017500000002232100000000000026504 0ustar00zuulzuul00000000000000.. -*- rst -*- Register an identity provider ============================= .. rest_method:: PUT /v3/OS-FEDERATION/identity_providers/{id} Register an identity provider to be used to authenticate federated users. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - authorization_ttl: authorization_ttl - domain_id: domain_id - description: description - enabled: enabled - id: id_path - remote_ids: remote_ids As a domain may only be associated to a single identity provider, a 409 response code will be returned if the specified ``domain_id`` already maps an existing identity provider. Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/register-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - authorization_ttl: authorization_ttl - domain_id: domain_id - description: description - enabled: enabled - id: id_body - links: links - remote_ids: remote_ids Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/register-response.json :language: javascript List identity providers ======================= .. rest_method:: GET /v3/OS-FEDERATION/identity_providers List registered identity providers. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_providers`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - id: id_query - enabled: enabled_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - identity_providers: identity_providers Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~~ .. literalinclude:: federation/identity-provider/samples/list-response.json :language: javascript Get identity provider ===================== .. rest_method:: GET /v3/OS-FEDERATION/identity_providers/{id} Get registered identity providers. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - id: id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - authorization_ttl: authorization_ttl - domain_id: domain_id - description: description - enabled: enabled - id: id_body - links: links - remote_ids: remote_ids Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/get-response.json :language: javascript Delete identity provider ======================== .. rest_method:: DELETE /v3/OS-FEDERATION/identity_providers/{id} When an identity provider is deleted, any tokens generated by that identity provider will be revoked. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - id: id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Update identity provider ======================== .. rest_method:: PATCH /v3/OS-FEDERATION/identity_providers/{id} When an identity provider is disabled, any tokens generated by that identity provider will be revoked. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider`` Request ------- Except ``domain_id``, any attribute of an Identity Provider may be passed in the request body. To update the ``domain_id``, you will need to delete and recreate the Identity Provider. If ``domain_id`` is included in the request, a 400 response code will be returned. Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - id: id_path Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - authorization_ttl: authorization_ttl - domain_id: domain_id - description: description - enabled: enabled - id: id_body - links: links - remote_ids: remote_ids Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/update-response.json :language: javascript Add protocol to identity provider ================================= .. rest_method:: PUT /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} Add a protocol and attribute mapping to an identity provider. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider_protocol`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - idp_id: id_path - protocol_id: protocol_id - protocol: protocol Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/add-protocol-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - protocol: protocol Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/add-protocol-response.json :language: javascript List protocols of identity provider =================================== .. rest_method:: GET /v3/OS-FEDERATION/identity_providers/{id}/protocols List all protocol and attribute mappings of an identity provider. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider_protocols`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - id: id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - protocols: protocols - links: protocols_links Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/list-protocol-response.json :language: javascript Get protocol for identity provider ================================== .. rest_method:: GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} Get a protocol and attribute mapping for an identity provider. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider_protocol`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - idp_id: id_path - protocol_id: protocol_id Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - protocol: protocol Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/get-protocol-response.json :language: javascript Update attribute mapping for identity provider ============================================== .. rest_method:: PATCH /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} Update the attribute mapping for an identity provider and protocol. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider_protocol`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - idp_id: id_path - protocol_id: protocol_id - protocol: protocol Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/update-protocol-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - protocol: protocol Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/identity-provider/samples/update-protocol-response.json :language: javascript Delete a protocol from identity provider ======================================== .. rest_method:: DELETE /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} Delete a protocol and attribute mapping from an identity provider. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/identity_provider_protocol`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/identity-provider/parameters.yaml - idp_id: id_path - protocol_id: protocol_id Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/parameters.yaml0000664000175000017500000000414400000000000030267 0ustar00zuulzuul00000000000000# variables in header # variables in path id_path: description: | The Identity Provider ID in: path required: true type: string protocol_id: description: | The federation protocol ID in: path required: true type: string # variables in query enabled_query: description: | Filter for Identity Providers' enabled attribute in: query required: false type: bool id_query: description: | Filter for Identity Providers' ID attribute in: query type: string # variables in body authorization_ttl: description: | The length of validity in minutes for group memberships carried over through mapping and persisted in the database. If left unset, the default value configured in keystone will be used, if enabled. in: body required: false type: integer description: description: | The Identity Provider description in: body required: false type: string domain_id: description: | The ID of a domain that is associated with the Identity Provider. Federated users that authenticate with the Identity Provider will be created under the domain specified. in: body required: true type: string enabled: description: | Whether the Identity Provider is enabled or not in: body required: true type: bool id_body: description: | The Identity Provider unique ID in: body required: true type: string identity_providers: description: | List of Identity Providers in: body required: true type: array links: description: | Links containing URI to the Identity Provider resource and its Protocols in: body required: true type: object protocol: description: | The Federation Protocol object in: body required: true type: object protocols: description: | List of Federation Protocols in: body required: true type: array protocols_links: description: | Link containing the URI to the collection of Federation Protocols in: body required: true type: object remote_ids: description: | List of the unique Identity Provider's remote IDs in: body required: true type: array ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/0000775000175000017500000000000000000000000026701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/add-protocol-request.json0000664000175000017500000000007200000000000033650 0ustar00zuulzuul00000000000000{ "protocol": { "mapping_id": "xyz234" } }././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/add-protocol-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/add-protocol-response.jso0000664000175000017500000000050300000000000033637 0ustar00zuulzuul00000000000000{ "protocol": { "id": "saml2", "links": { "identity_provider": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols/saml2" }, "mapping_id": "xyz234" } }././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/get-protocol-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/get-protocol-response.jso0000664000175000017500000000050300000000000033666 0ustar00zuulzuul00000000000000{ "protocol": { "id": "saml2", "links": { "identity_provider": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols/saml2" }, "mapping_id": "xyz234" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/get-response.json0000664000175000017500000000073500000000000032214 0ustar00zuulzuul00000000000000{ "identity_provider": { "authorization_ttl": null, "domain_id": "1789d1", "description": "Stores ACME identities", "remote_ids": ["acme_id_1", "acme_id_2"], "enabled": false, "id": "ACME", "links": { "protocols": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME" } } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/list-protocol-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/list-protocol-response.js0000664000175000017500000000102700000000000033705 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols" }, "protocols": [ { "id": "saml2", "links": { "identity_provider": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols/saml2" }, "mapping_id": "xyz234" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/list-response.json0000664000175000017500000000220200000000000032377 0ustar00zuulzuul00000000000000{ "identity_providers": [ { "domain_id": "1789d1", "description": "Stores ACME identities", "remote_ids": ["acme_id_1", "acme_id_2"], "enabled": true, "id": "ACME", "links": { "protocols": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME" } }, { "domain_id": "2890e2", "description": "Stores contractor identities", "remote_ids": ["sore_id_1", "store_id_2"], "enabled": false, "id": "ACME-contractors", "links": { "protocols": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME-contractors/protocols", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME-contractors" } } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/register-request.json0000664000175000017500000000027700000000000033114 0ustar00zuulzuul00000000000000{ "identity_provider": { "domain_id": "1789d1", "description": "Stores ACME identities.", "remote_ids": ["acme_id_1", "acme_id_2"], "enabled": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/register-response.json0000664000175000017500000000067000000000000033257 0ustar00zuulzuul00000000000000{ "identity_provider": { "domain_id": "1789d1", "description": "Stores ACME identities", "remote_ids": ["acme_id_1", "acme_id_2"], "enabled": true, "id": "ACME", "links": { "protocols": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME" } } }././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/update-protocol-request.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/update-protocol-request.j0000664000175000017500000000007200000000000033662 0ustar00zuulzuul00000000000000{ "protocol": { "mapping_id": "xyz234" } }././@PaxHeader0000000000000000000000000000021000000000000011446 xustar0000000000000000114 path=keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/update-protocol-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/update-protocol-response.0000664000175000017500000000050300000000000033655 0ustar00zuulzuul00000000000000{ "protocol": { "id": "saml2", "links": { "identity_provider": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols/saml2" }, "mapping_id": "xyz234" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/update-request.json0000664000175000017500000000015600000000000032546 0ustar00zuulzuul00000000000000{ "identity_provider": { "remote_ids": ["beta_id_1", "beta_id_2"], "enabled": true } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/identity-provider/samples/update-response.json0000664000175000017500000000072200000000000032713 0ustar00zuulzuul00000000000000{ "identity_provider": { "authorization_ttl": null, "domain_id": "1789d1", "description": "Beta dev idp", "remote_ids": ["beta_id_1", "beta_id_2"], "enabled": true, "id": "ACME", "links": { "protocols": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME/protocols", "self": "http://example.com/identity/v3/OS-FEDERATION/identity_providers/ACME" } } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/0000775000175000017500000000000000000000000023207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/mapping.inc0000664000175000017500000000641400000000000025342 0ustar00zuulzuul00000000000000.. -*- rst -*- Create a mapping ================ .. rest_method:: PUT /v3/OS-FEDERATION/mappings/{id} Create a federated mapping. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/mapping`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - id: id_path - rules: rules Example ~~~~~~~ .. literalinclude:: federation/mapping/samples/create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - id: id_body - links: links - rules: rules Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 Example ~~~~~~~ .. literalinclude:: federation/mapping/samples/create-response.json :language: javascript Get a mapping ============= .. rest_method:: GET /v3/OS-FEDERATION/mappings/{id} Get a specific federated mapping. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/mapping`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - id: id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - id: id_body - links: links - rules: rules Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/mapping/samples/get-response.json :language: javascript Update a mapping ================ .. rest_method:: PATCH /v3/OS-FEDERATION/mappings/{id} Update a federated mapping. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/mapping`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - id: id_path - rules: rules Example ~~~~~~~ .. literalinclude:: federation/mapping/samples/update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - id: id_body - links: links - rules: rules Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/mapping/samples/update-response.json :language: javascript List mappings ============= .. rest_method:: GET /v3/OS-FEDERATION/mappings List all federated mappings. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/mappings`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - links: links_collection - mappings: mappings Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/mapping/samples/list-response.json :language: javascript Delete a mapping ================ .. rest_method:: DELETE /v3/OS-FEDERATION/mappings/{id} Remove a specific federated mapping. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/mapping`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/mapping/parameters.yaml - id: id_path Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/parameters.yaml0000664000175000017500000000144000000000000026235 0ustar00zuulzuul00000000000000# variables in header # variables in path id_path: description: | The Federation Mapping unique ID in: path required: true type: string # variables in query # variables in body id_body: description: | The Federation Mapping unique ID in: body required: true type: string links: description: | Link to the URI where the mapping is located in: body required: true type: object links_collection: description: | Link to the URI where the mapping collection is located in: body required: true type: object mappings: description: | The collection of Federation Mappings in: body required: true type: array rules: description: | The list of rules used to map remote users into local users in: body required: true type: object ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/samples/0000775000175000017500000000000000000000000024653 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/samples/create-request.json0000664000175000017500000000143300000000000030500 0ustar00zuulzuul00000000000000{ "mapping": { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "Contractor", "Guest" ] } ] } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/samples/create-response.json0000664000175000017500000000164000000000000030646 0ustar00zuulzuul00000000000000{ "mapping": { "id": "ACME", "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/mappings/ACME" }, "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "Contractor", "Guest" ] } ] } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/samples/get-response.json0000664000175000017500000000164000000000000030162 0ustar00zuulzuul00000000000000{ "mapping": { "id": "ACME", "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/mappings/ACME" }, "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "Contractor", "Guest" ] } ] } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/samples/list-response.json0000664000175000017500000000231300000000000030354 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-FEDERATION/mappings" }, "mappings": [ { "id": "ACME", "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/mappings/ACME" }, "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] } ] } ] } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/samples/update-request.json0000664000175000017500000000144300000000000030520 0ustar00zuulzuul00000000000000{ "mapping": { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] } ] } ] } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/mapping/samples/update-response.json0000664000175000017500000000165000000000000030666 0ustar00zuulzuul00000000000000{ "mapping": { "id": "ACME", "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/mappings/ACME" }, "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] } ] } ] } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/federation/projects-domains/0000775000175000017500000000000000000000000025035 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/projects-domains/parameters.yaml0000664000175000017500000000115200000000000030063 0ustar00zuulzuul00000000000000# variables in header # variables in path # variables in query # variables in body domains: description: | The list of domains the authenticated user may scope to in: body required: true type: array links_domains: description: | Link to the URI where the domain collection is located in: body required: true type: object links_projects: description: | Link to the URI where the project collection is located in: body required: true type: object projects: description: | The list of projects the authenticated user may scope to in: body required: true type: array ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/projects-domains/projects-domains.inc0000664000175000017500000000400700000000000031012 0ustar00zuulzuul00000000000000.. -*- rst -*- .. **Deprecated in v1.1**. This section is deprecated as the functionality is available in the core Identity API. List projects a federated user can access ========================================= .. rest_method:: GET /v3/OS-FEDERATION/projects **Deprecated in v1.1**. Use core ``GET /auth/projects``. This call has the same response format. Returns a collection of projects to which the federated user has authorization to access. To access this resource, an unscoped token is used, the user can then select a project and request a scoped token. Note that only enabled projects will be returned. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/projects`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/projects-domains/parameters.yaml - links: links_projects - projects: projects Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/projects-domains/samples/project-list-response.json :language: javascript List domains a federated user can access ======================================== .. rest_method:: GET /v3/OS-FEDERATION/domains **Deprecated in v1.1**. Use core ``GET /auth/domains``. This call has the same response format. Returns a collection of domains to which the federated user has authorization to access. To access this resource, an unscoped token is used, the user can then select a domain and request a scoped token. Note that only enabled domains will be returned. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/domains`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/projects-domains/parameters.yaml - domains: domains - links: links_domains Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/projects-domains/samples/domain-list-response.json :language: javascript././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/federation/projects-domains/samples/0000775000175000017500000000000000000000000026501 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/projects-domains/samples/domain-list-response.json0000664000175000017500000000065700000000000033460 0ustar00zuulzuul00000000000000{ "domains": [ { "description": "desc of domain", "enabled": true, "id": "37ef61", "links": { "self": "http://example.com/identity/v3/domains/37ef61" }, "name": "my domain" } ], "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/domains", "previous": null, "next": null } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/projects-domains/samples/project-list-response.json0000664000175000017500000000126300000000000033651 0ustar00zuulzuul00000000000000{ "projects": [ { "domain_id": "37ef61", "enabled": true, "id": "12d706", "links": { "self": "http://example.com/identity/v3/projects/12d706" }, "name": "a project name" }, { "domain_id": "37ef61", "enabled": true, "id": "9ca0eb", "links": { "self": "http://example.com/identity/v3/projects/9ca0eb" }, "name": "another project" } ], "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/projects", "previous": null, "next": null } }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/0000775000175000017500000000000000000000000025044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/parameters.yaml0000664000175000017500000000235100000000000030074 0ustar00zuulzuul00000000000000# variables in header # variables in path id_path: description: | The Service Provider unique ID in: path required: true type: string # variables in query # variables in body auth_url: description: | The URL to authenticate against in: body required: true type: string description: description: | The description of the Service Provider in: body required: false type: string enabled: description: | Whether the Service Provider is enabled or not in: body required: true type: bool id_body: description: | The Service Provider unique ID in: body required: true type: string links: description: | Link to the URI where the Service Provider is located in: body required: true type: string links_collection: description: | Link to the URI where the Service Provider collection is located in: body required: true type: string relay_state_prefix: description: | The prefix of the RelayState SAML attribute in: body required: true type: string service_providers: description: | The list of Service Providers in: body required: true type: array sp_url: description: | The Service Provider's URL in: body required: true type: string ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/samples/0000775000175000017500000000000000000000000026510 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/samples/get-response.json0000664000175000017500000000074200000000000032021 0ustar00zuulzuul00000000000000{ "service_provider": { "auth_url": "https://example.com/identity/v3/OS-FEDERATION/identity_providers/acme/protocols/saml2/auth", "description": "Remote Service Provider", "enabled": true, "id": "ACME", "links": { "self": "https://example.com/identity/v3/OS-FEDERATION/service_providers/ACME" }, "relay_state_prefix": "ss:mem:", "sp_url": "https://example.com/identity/Shibboleth.sso/SAML2/ECP" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/samples/list-response.json0000664000175000017500000000232100000000000032210 0ustar00zuulzuul00000000000000{ "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-FEDERATION/service_providers" }, "service_providers": [ { "auth_url": "https://example.com/identity/v3/OS-FEDERATION/identity_providers/acme/protocols/saml2/auth", "description": "Stores ACME identities", "enabled": true, "id": "ACME", "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/service_providers/ACME" }, "relay_state_prefix": "ss:mem:", "sp_url": "https://example.com/identity/Shibboleth.sso/SAML2/ECP" }, { "auth_url": "https://other.example.com/identity/v3/OS-FEDERATION/identity_providers/acme/protocols/saml2/auth", "description": "Stores contractor identities", "enabled": false, "id": "ACME-contractors", "links": { "self": "http://example.com/identity/v3/OS-FEDERATION/service_providers/ACME-contractors" }, "relay_state_prefix": "ss:mem:", "sp_url": "https://other.example.com/identity/Shibboleth.sso/SAML2/ECP" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/samples/register-request.json0000664000175000017500000000045200000000000032716 0ustar00zuulzuul00000000000000{ "service_provider": { "auth_url": "https://example.com/identity/v3/OS-FEDERATION/identity_providers/acme/protocols/saml2/auth", "description": "Remote Service Provider", "enabled": true, "sp_url": "https://example.com/identity/Shibboleth.sso/SAML2/ECP" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/samples/register-response.json0000664000175000017500000000074200000000000033066 0ustar00zuulzuul00000000000000{ "service_provider": { "auth_url": "https://example.com/identity/v3/OS-FEDERATION/identity_providers/acme/protocols/saml2/auth", "description": "Remote Service Provider", "enabled": true, "id": "ACME", "links": { "self": "https://example.com/identity/v3/OS-FEDERATION/service_providers/ACME" }, "relay_state_prefix": "ss:mem:", "sp_url": "https://example.com/identity/Shibboleth.sso/SAML2/ECP" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/samples/update-request.json0000664000175000017500000000044500000000000032356 0ustar00zuulzuul00000000000000{ "service_provider": { "auth_url": "https://new.example.com/identity/v3/OS-FEDERATION/identity_providers/protocol/saml2/auth", "enabled": true, "relay_state_prefix": "ss:temp:", "sp_auth": "https://new.example.com/identity/Shibboleth.sso/SAML2/ECP" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/samples/update-response.json0000664000175000017500000000074500000000000032527 0ustar00zuulzuul00000000000000{ "service_provider": { "auth_url": "https://new.example.com/identity/v3/OS-FEDERATION/identity_providers/protocol/saml2/auth", "description": "Remote Service Provider", "enabled": true, "id": "ACME", "links": { "self": "https://example.com/identity/v3/OS-FEDERATION/service_providers/ACME" }, "relay_state_prefix": "ss:temp:", "sp_url": "https://new.example.com/identity/Shibboleth.sso/SAML2/ECP" } }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation/service-provider/sp.inc0000664000175000017500000001017100000000000026161 0ustar00zuulzuul00000000000000.. -*- rst -*- Register a service provider =========================== .. rest_method:: PUT /v3/OS-FEDERATION/service_providers/{id} Create a service provider entity. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/service_provider`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - auth_url: auth_url - description: description - enabled: enabled - id: id_path - sp_url: sp_url Example ~~~~~~~ .. literalinclude:: federation/service-provider/samples/register-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - auth_url: auth_url - description: description - enabled: enabled - id: id_body - links: links - relay_state_prefix: relay_state_prefix - sp_url: sp_url Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 Example ~~~~~~~ .. literalinclude:: federation/service-provider/samples/register-response.json :language: javascript List service providers ====================== .. rest_method:: GET /v3/OS-FEDERATION/service_providers List all service providers. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/service_providers`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - links: links_collection - service_providers: service_providers Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/service-provider/samples/list-response.json :language: javascript Get service provider ==================== .. rest_method:: GET /v3/OS-FEDERATION/service_providers/{id} Get a specific service provider reference. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/service_provider`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - id: id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - auth_url: auth_url - description: description - enabled: enabled - id: id_body - links: links - relay_state_prefix: relay_state_prefix - sp_url: sp_url Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: federation/service-provider/samples/get-response.json :language: javascript Delete service provider ======================= .. rest_method:: DELETE /v3/OS-FEDERATION/service_providers/{id} Delete a service provider. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/service_provider`` Request ------- Parameters ~~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - id: id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 Update service provider ======================= .. rest_method:: PATCH /v3/OS-FEDERATION/service_providers/{id} Update a service provider's attributes. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION/1.0/rel/service_provider`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - id: id_path - auth_url: auth_url - description: description - enabled: enabled - sp_url: sp_url Example ~~~~~~~ .. literalinclude:: federation/service-provider/samples/update-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: federation/service-provider/parameters.yaml - auth_url: auth_url - description: description - enabled: enabled - id: id_body - links: links - relay_state_prefix: relay_state_prefix - sp_url: sp_url Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~~ .. literalinclude:: federation/service-provider/samples/update-response.json :language: javascript././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/federation.inc0000664000175000017500000002336700000000000022262 0ustar00zuulzuul00000000000000.. -*- rst -*- =================== OS-FEDERATION API =================== Provide the ability for users to manage Identity Providers (IdPs) and establish a set of rules to map federation protocol attributes to Identity API attributes. Requires v3.0+ of the Identity API. What's New in Version 1.4 ========================= Corresponding to Identity API v3.12 release. - Added `remote_id_attribute` as an attribute of a Protocol. What's New in Version 1.3 ========================= Corresponding to Identity API v3.5 release. - Added Identity Provider specific websso routes. What's New in Version 1.2 ========================= Corresponding to Identity API v3.4 release. - Add websso routes. What's New in Version 1.1 ========================= Corresponding to Identity API v3.3 release. These features are considered stable as of September 4th, 2014. - Deprecate list projects and domains in favour of core functionality available in Identity API v3.3. - Introduced a mechanism to exchange an Identity Token for a SAML assertion. - Introduced a mechanism to retrieve Identity Provider Metadata. Definitions =========== - *Trusted Identity Provider*: An identity provider set up within the Identity API that is trusted to provide authenticated user information. - *Service Provider*: A system entity that provides services to principals or other system entities, in this case, the OpenStack Identity API is the Service Provider. - *Attribute Mapping*: The user information passed by a federation protocol for an already authenticated identity are called ``attributes``. Those ``attributes`` may not align directly with the Identity API concepts. To help overcome such mismatches, a mapping can be done either on the sending side (third party identity provider), on the consuming side (Identity API service), or both. - *Protocol*: A protocol capable of performing federated identity authentication. For example, the OpenID Connect or SAML 2.0 protocols. API Resources ============= Identity Providers ------------------ :: /v3/OS-FEDERATION/identity_providers An Identity Provider (IdP) is a third party service that is trusted by the Identity API to authenticate identities. Optional attributes: - ``domain_id`` (string) The ID of the domain that is associated with the IdP. If a value is not specified by the client, the service will automatically create a domain and associate it to the IdP. The ``domain_id`` is not unique so that users can link multiple IdPs to one domain. - ``description`` (string) Describes the identity provider. If a value is not specified by the client, the service will default this value to ``null``. - ``enabled`` (boolean) Indicates whether this identity provider should accept federated authentication requests. If a value is not specified by the client, the service will default this to ``false``. - ``remote_ids`` (list) Valid remote IdP entity values from Identity Providers. If a value is not specified by the client, the list will be empty. Protocols --------- :: /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols A protocol entry contains information that dictates which mapping rules to use for a given incoming request. An IdP may have multiple supported protocols. Required attributes: - ``mapping_id`` (string) Indicates which mapping should be used to process federated authentication requests. Optional attributes: - ``remote_id_attribute`` (string) Key to obtain the entity ID of the Identity Provider from the HTTPD environment. For `mod_shib`, this would be `Shib-Identity-Provider`. For `mod_auth_openidc`, this could be `HTTP_OIDC_ISS`. For `mod_auth_mellon`, this could be `MELLON_IDP`. This overrides the default value provided in keystone.conf. Mappings -------- :: /v3/OS-FEDERATION/mappings A ``mapping`` is a set of rules to map federation protocol attributes to Identity API objects. An Identity Provider can have a single ``mapping`` specified per protocol. A mapping is simply a list of ``rules``. Required attributes: - ``rules`` (list of objects) Each object contains a rule for mapping attributes to Identity API concepts. A rule contains a ``remote`` attribute description and the destination ``local`` attribute. - ``local`` (list of objects) References a local Identity API resource, such as a ``group`` or ``user`` to which the remote attributes will be mapped. Each object has one of two structures, as follows. To map a remote attribute value directly to a local attribute, identify the local resource type and attribute: :: [ { "local": [ { "user": { "name": "{0}" } } ], } ] If the ``user`` attribute is missing when processing an assertion, server tries to directly map ``REMOTE_USER`` environment variable. If this variable is also unavailable the server returns an HTTP ``401 Unauthorized`` error. If the ``user`` has the attribute ``type`` set to ``local`` as well as a domain specified, the user is treated as existing in the local keystone backend, and the server will attempt to fetch user details (id, name, roles, groups) from the identity backend. If, however, the user does not exist in the backend, the server will respond with an appropriate HTTP error code. If the ``type`` attribute is not set to ``local`` in the local rule and no domain is specified, the user is deemed ephemeral and becomes a member of the identity provider's domain. An example of user object mapping to an existing local user: :: [ { "local": [ { "user": { "name": "username", "type": "local", "domain": { "name": "domain_name" } } } ], } ] For attribute type and value mapping, identify the local resource type, attribute, and value: :: [ { "local": [ { "group": { "id": "89678b" } } ], } ] This assigns authorization attributes, by way of role assignments on the specified group, to ephemeral users. The users are not added to the group, but for the duration of the token they will receive the same authorization as if they were. :: [ { "local": [ { "group_ids": "{0}" } ], } ] It is also possible to map multiple groups by providing a list of group ids. Those group ids can also be white/blacklisted. - ``remote`` (list of objects) At least one object must be included. If more than one object is included, the local attribute is applied only if all remote attributes match. The value identified by ``type`` is always passed through unless a constraint is specified using either ``any_one_of`` or ``not_one_of``. - ``type`` (string) This represents an assertion type keyword. - ``any_one_of`` (list of strings) This is mutually exclusive with ``not_any_of``. The rule is matched only if any of the specified strings appear in the remote attribute ``type``. - ``not_any_of`` (list of strings) This is mutually exclusive with ``any_one_of``. The rule is not matched if any of the specified strings appear in the remote attribute ``type``. - ``regex`` (boolean) If ``true``, then each string will be evaluated as a `regular expression `__ search against the remote attribute ``type``. The ``blacklist`` and ``whitelist`` rules are always used in conjunction with ``type``. - ``blacklist`` (list of strings) This is mutually exclusive with ``whitelist``. The rule works as a filter, removing any specified strings that are listed there from the remote attribute ``type``. - ``whitelist`` (list of strings) This is mutually exclusive with ``blacklist``. The rule works as a filter, allowing only the specified strings in the remote attribute ``type`` to be passed ahead. Service Providers ----------------- :: /v3/OS-FEDERATION/service_providers A service provider is a third party service that is trusted by the Identity Service. Required attributes: - ``auth_url`` (string) Specifies the protected URL where tokens can be retrieved once the user is authenticated. - ``sp_url`` (string) Specifies the URL at the remote peer where assertion should be sent. Optional attributes: - ``description`` (string) Describes the service provider If a value is not specified by the client, the service may default this value to ``null``. - ``enabled`` (boolean) Indicates whether bursting into this service provider is enabled by cloud administrators. If set to ``false`` the SP will not appear in the catalog and requests to generate an assertion will result in a 403 error. If a value is not specified by the client, the service will default this to ``false``. - ``relay_state_prefix`` (string) Indicates the relay state prefix, used in the ECP wrapped SAML messages, by the Service Provider. If a value is not specified by the client, the service will default this value to ``ss:mem:``. APIs ==== .. include:: federation/identity-provider/idp.inc .. include:: federation/mapping/mapping.inc .. include:: federation/service-provider/sp.inc .. include:: federation/projects-domains/projects-domains.inc .. include:: federation/auth/auth.inc .. include:: federation/assertion/assertion.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/index.rst0000664000175000017500000000200700000000000021274 0ustar00zuulzuul00000000000000:tocdepth: 3 -------------------------------------- Identity API v3 extensions (CURRENT) -------------------------------------- ============= Relationships ============= The entries within the operations below contain a relationship link, which appears as a valid URI, however these are actually URN (Uniform Resource Name), which are similar to GUID except it uses a URI syntax so that it is easier to be read. These links do not resolve to anything valid, but exist to show a relationship. ======================= Identity API Extensions ======================= This page describes these Identity API v3 extensions: * `OS-ENDPOINT-POLICY API`_ * `OS-OAUTH1 API`_ * `OS-OAUTH2 API`_ * `OS-TRUST API`_ * `OS-REVOKE API`_ * `OS-EP-FILTER API`_ * `OS-FEDERATION API`_ * `OS-SIMPLE-CERT API`_ .. rest_expand_all:: .. include:: endpoint-policy.inc .. include:: oauth.inc .. include:: oauth2.inc .. include:: trust.inc .. include:: revoke.inc .. include:: ep-filter.inc .. include:: federation.inc .. include:: simple-cert.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/oauth.inc0000664000175000017500000003306600000000000021257 0ustar00zuulzuul00000000000000.. -*- rst -*- =============== OS-OAUTH1 API =============== Provide the ability for identity users to delegate roles to third party consumers via the `OAuth 1.0a specification `__. Requires v3.0+ of the Identity API. An OAuth-derived token will provide a means of acting on behalf of the authorizing user. Overview ======== Definitions ----------- - *User:* An Identity API service user, the entity whose role(s) will be delegated, and the entity that authorizes Request Tokens. - *Request Token:* A token used by the Consumer to obtain authorization from the User, and exchanged with an OAuth Verifier for an Access Token. - *Access Token:* A token used by the Consumer to request new Identity API tokens on behalf of the authorizing User, instead of using the User's credentials. - *Token Key:* A key used by the token to identify itself. Both Request Tokens and Access Tokens have Token Keys. For OpenStack purposes, the Token Key is the Token ID. - *Token Secret:* A secret used by the Consumer to establish ownership of a given Token. Both Request Tokens and Access Tokens have Token Secrets. - *OAuth Verifier:* A string that must be provided with the corresponding Request Token in exchange for an Access Token. Delegated Authentication Flow ----------------------------- Delegated Authentication via OAuth is done in five steps: #. An Identity API service User `creates a Consumer <#create-consumer>`__. #. The Consumer `obtains an unauthorized Request Token <#create-request-token>`__. #. The User `authorizes the Request Token <#authorize-request-token>`__. #. The Consumer `exchanges the Request Token for an Access Token <#create-access-token>`__. #. The Consumer `uses the Access Token to request an Identity API service Token <#authenticate-with-identity-api>`__. Create consumer =============== .. rest_method:: POST /v3/OS-OAUTH1/consumers Enables a user to create a consumer. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/consumers`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - description: consumer_description Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/consumer-create-request.json :language: javascript Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 Status: 201 Created The ``secret`` is only returned once, during consumer creation. Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/consumer-create-response.json :language: javascript Delete consumer =============== .. rest_method:: DELETE /v3/OS-OAUTH1/consumers/{consumer_id} Deletes a consumer. When you delete a consumer, any associated request tokens, access tokens, and Identity API tokens are also revoked. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/consumer`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - consumer_id: consumer_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 List consumers ============== .. rest_method:: GET /v3/OS-OAUTH1/consumers Lists consumers. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/consumers`` Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 405 - 404 - 403 - 401 - 400 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/consumers-list-response.json :language: javascript Show consumer details ===================== .. rest_method:: GET /v3/OS-OAUTH1/consumers/{consumer_id} Shows details for a consumer. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/consumer`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - consumer_id: consumer_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 405 - 404 - 403 - 401 - 400 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/consumer-show-response.json :language: javascript Update consumer =============== .. rest_method:: PATCH /v3/OS-OAUTH1/consumers/{consumer_id} Updates the description for a consumer. If you try to update any attribute other than ``description``, an HTTP 400 Bad Request error is returned. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/consumer`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - consumer_id: consumer_id_path Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/consumer-update-request.json :language: javascript Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/consumer-update-response.json :language: javascript Create request token ==================== .. rest_method:: POST /v3/OS-OAUTH1/request_token Enables a consumer to get an unauthorized request token. Supported signature methods: ``HMAC-SHA1`` The consumer must provide all required OAuth parameters in the request. See `Consumer Obtains a Request Token `_. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/request_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - Requested-Project-Id: requested_project_id Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - oauth_token: oauth_token - oauth_token_secret: oauth_token_secret - oauth_expires_at: oauth_expires_at Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/request-token-create-response.txt :language: javascript Authorize request token ======================= .. rest_method:: PUT /v3/OS-OAUTH1/authorize/{request_token_id} To authorize the Request Token, the authorizing user must have access to the requested project. Upon successful authorization, an OAuth Verifier code is returned. The Consumer receives the OAuth Verifier from the User out-of-band. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/authorize_request_token`` Request ------- Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/authorize-request-token-request.json :language: javascript Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/authorize-request-token-response.json :language: javascript Create access token =================== .. rest_method:: POST /v3/OS-OAUTH1/access_token Enables a consumer to obtain an access token by exchanging a request token. After a user authorizes the request token, the consumer exchanges the authorized request token and OAuth verifier for an access token. Supported signature methods: ``HMAC-SHA1`` The consumer must provide all required OAuth parameters in the request. See `Consumer Requests an Access Token `_. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/access_tokens`` Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - oauth_token: oauth_token - oauth_token_secret: oauth_token_secret - oauth_expires_at: oauth_expires_at Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/access-token-create-response.txt :language: javascript Get access token ================ .. rest_method:: GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} Gets an access token. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/user_access_token`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - access_token_id: access_token_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 405 - 404 - 403 - 401 - 400 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/access-token-show-response.json :language: javascript Revoke access token =================== .. rest_method:: DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} Enables a user to revoke an access token, which prevents the consumer from requesting new Identity Service API tokens. Also, revokes any Identity Service API tokens that were issued to the consumer through that access token. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/user_access_token`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - access_token_id: access_token_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 204 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 List access tokens ================== .. rest_method:: GET /v3/users/{user_id}/OS-OAUTH1/access_tokens Lists authorized access tokens. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/user_access_tokens`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 405 - 404 - 403 - 401 - 400 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/access-tokens-list-response.json :language: javascript List roles for an access token ============================== .. rest_method:: GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles Lists associated roles for an access token. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/user_access_token_roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - access_token_id: access_token_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 415 - 405 - 404 - 403 - 401 - 400 - 503 - 409 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/access-token-roles-list-response.json :language: javascript Show role details for an access token ===================================== .. rest_method:: GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles/{role_id} Shows details for a role for an access token. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0/rel/user_access_token_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - user_id: user_id_path - role_id: role_id_path - access_token_id: access_token_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 405 - 404 - 403 - 401 - 400 - 503 Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH1/access-token-role-show-response.json :language: javascript Authenticate with Identity API ============================== .. rest_method:: POST /v3/auth/tokens Enables a consumer to get an Identity Service authentication token. The token represents the delegated authorization and identity (impersonation) of the authorizing user. The roles and scope of the generated token match those that the consumer initially requested. Supported signature methods: ``HMAC-SHA1`` The consumer must provide required OAuth parameters in the request. See `Accessing Protected Resources `_. The returned token is scoped to the requested project and with the requested roles. In addition to the standard token response, the token has an OAuth-specific object. Example OAuth-specific object in a token: .. code-block:: javascript "OS-OAUTH1": { "access_token_id": "cce0b8be7" } Relationship: ``https://docs.openstack.org/identity/rel/v3/auth_tokens`` Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 413 - 405 - 404 - 403 - 401 - 400 - 503././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/oauth2.inc0000664000175000017500000000564700000000000021345 0ustar00zuulzuul00000000000000.. -*- rst -*- =============== OS-OAUTH2 API =============== Provide the ability for identity users to delegate roles to third party consumers via the `OAuth 2.0 Client Credentials grant specification `__. Requires v3.0+ of the Identity API. An OAuth2-derived access token will provide a means of acting on behalf of the authorizing user. Overview ======== Definitions ----------- - *User:* The end-users who use Identity API service, the entity whose role(s) will be delegated, and the entity that registers Clients. - *Client:* An application making protected resource requests on behalf of the User. The credentials of the client are created though the Application Credentials API. - *Access Token:* A token used by the Client to make protected resource requests with the delegated roles. Delegated Authentication Flow ----------------------------- Delegated Authentication via OAuth 2.0 is done in four steps: #. An Identity API service User creates an `application credential`_. #. The Client authenticates with the authorization server on the Keystone and requests a new Access Token. #. The Client uses the Access Token to make requests for OpenStack Service APIs. #. Keystone Middleware validates the Access Token in an API request to obtain its metadata and validity, and forwards the request to the OpenStack service if the token is active. .. _application credential: https://docs.openstack.org/api-ref/identity/v3/index.html?expanded=create-application-credential-detail#create-application-credential Create Access Token =================== .. rest_method:: POST /identity/v3/OS-OAUTH2/token Enables a user to create an access token. The user makes a request to the token endpoint by adding the following parameters using the "application/x-www-form-urlencoded" format with a character encoding of UTF-8 in the HTTP request entity-body. And the request should use the basic authentication header which contains the application credentials information to authenticate a user through the authorization server. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH2/1.0/rel/token`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - grant_type: request_token_grant_type_body_required Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH2/token-create-request.txt :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - access_token: response_token_access_token_body - token_type: response_token_token_type_body - expires_in: response_token_expires_in_body Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 404 - 500 Status: 200 OK Example ~~~~~~~ .. literalinclude:: ./samples/OS-OAUTH2/token-create-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/parameters.yaml0000664000175000017500000003631200000000000022470 0ustar00zuulzuul00000000000000# variables in header requested_project_id: description: | The ID of the requested project. in: header required: true type: string # variables in path access_token_id_path: description: | The UUID of the access token. in: path required: true type: string consumer_id_path: description: | The UUID of the consumer. in: path required: true type: string domain_id: description: | The UUID of the domain. in: path required: true type: string endpoint_group_id_path: description: | The UUID of the endpoint group. in: path required: true type: string endpoint_id_path: description: | The endpoint ID. in: path required: true type: string group_id: description: | The UUID of the group. in: path required: true type: string name: description: | The name of the group. in: path required: true type: string policy_id_path: description: | The policy ID. in: path required: true type: string project_id_path: description: | The UUID of the project. in: path required: true type: string region_id_path: description: | The region ID. in: path required: true type: string role_id_path: description: | The UUID of the role. in: path required: true type: string service_id_path: description: | The service ID. in: path required: true type: string trust_id_path: description: | The trust ID. in: path required: true type: string user_id_path: description: | The UUID of the user. in: path required: true type: string # variables in query request_endpoint_group_name_query_not_required: description: | Filters the response by an endpoint group name. in: query required: false type: string since_query: description: | A timestamp used to limit the list of results to events that occurred on or after the specified time. (RFC 1123 format date time) in: query required: false type: string trustee_user_id_query: description: | Filters the response by a trustee user ID. In order to list trusts for a given trustee, filter the collection using a query string (e.g., ``?trustee_user_id={user_id}``). in: query required: false type: string trustor_user_id_query: description: | Filters the response by a trustor user ID. In order to list trusts for a given trustor, filter the collection using a query string (e.g., ``?trustor_user_id={user_id}``). in: query required: false type: string # variables in body allow_redelegation: description: | If set to `true` then a trust between a ``trustor`` and any third-party user may be issued by the ``trustee`` just like a regular trust. If set to `false`, stops further redelegation. `false` by default. in: body required: false type: boolean consumer_description: description: | The consumer description. in: body required: false type: string consumer_id: description: | The ID of the consumer. in: body required: true type: string eg_description: description: | The endpoint group description. in: body required: false type: string eg_filters: description: | Describes the filtering performed by the endpoint group. The filter used must be an ``endpoint`` property, such as ``interface``, ``service_id``, ``region_id`` and ``enabled``. Note that if using ``interface`` as a filter, the only available values are ``public``, ``internal`` and ``admin``. in: body required: true type: object eg_name: description: | The name of the endpoint group. in: body required: true type: string endpoint_id: description: | The endpoint UUID. in: body required: true type: string endpoints: description: | An ``endpoints`` object. in: body required: true type: array id: description: | [WIP] in: body required: true type: string impersonation: description: | If set to `true`, then the user attribute of tokens generated based on the trust will represent that of the ``trustor`` rather than the ``trustee``, thus allowing the ``trustee`` to impersonate the ``trustor``. If impersonation is set to `false`, then the token's user attribute will represent that of the ``trustee``. in: body required: true type: boolean interface: description: | The interface type, which describes the visibility of the endpoint. Value is: - ``public``. Visible by end users on a publicly available network interface. - ``internal``. Visible by end users on an unmetered internal network interface. - ``admin``. Visible by administrative users on a secure network interface. in: body required: true type: string links: description: | A links object. in: body required: true type: object next: description: | The ``next`` relative link for the ``endpoints`` resource. in: body required: true type: string oauth_expires_at: description: | The date and time when an oauth token expires. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. If the Identity API does not include this attribute or its value is ``null``, the token never expires. in: body required: false type: string oauth_token: description: | The key value for the oauth token that the Identity API returns. in: body required: true type: string oauth_token_secret: description: | The secret value associated with the oauth Token. in: body required: true type: string policy: description: | A ``policy`` object. in: body required: true type: object policy_blob: description: | The policy rule itself, as a serialized blob. in: body required: true type: object policy_id: description: | The ID of the policy. in: body required: true type: string policy_links: description: | The links for the ``policy`` resource. in: body required: true type: object policy_type: description: | The MIME media type of the serialized policy blob. From the perspective of the Identity API, a policy blob can be based on any technology. In OpenStack, the ``policy.json`` blob (``type="application/json"``) is the conventional solution. However, you might want to use an alternative policy engine that uses a different policy language type. For example, ``type="application/xacml+xml"``. in: body required: true type: string previous: description: | The ``previous`` relative link for the ``endpoints`` resource. in: body required: true type: string project_id: description: | The ID of the project. in: body required: true type: string redelegated_trust_id: description: | Returned with redelegated trust provides information about the predecessor in the trust chain. in: body required: false type: string redelegation_count: description: | Specifies the maximum remaining depth of the redelegated trust chain. Each subsequent trust has this field decremented by `1` automatically. The initial ``trustor`` issuing new trust that can be redelegated, must set ``allow_redelegation`` to `true` and may set ``redelegation_count`` to an integer value less than or equal to ``max_redelegation_count`` configuration parameter in order to limit the possible length of derivated trust chains. The trust issued by the trustor using a project-scoped token (not redelegating), in which ``allow_redelegation`` is set to `true` (the new trust is redelegatable), will be populated with the value specified in the ``max_redelegation_count`` configuration parameter if ``redelegation_count`` is not set or set to `null`. If ``allow_redelegation`` is set to `false` then ``redelegation_count`` will be set to `0` in the trust. If the trust is being issued by the ``trustee`` of a redelegatable trust-scoped token (redelegation case) then ``redelegation_count`` should not be set, as it will automatically be set to the value in the redelegatable trust-scoped token decremented by `1`. Note, if the resulting value is `0`, this means that the new trust will not be redelegatable, regardless of the value of ``allow_redelegation``. in: body required: false type: integer region: description: | (Deprecated in v3.2) The geographic location of the service endpoint. in: body required: true type: string remaining_uses: description: | Specifies how many times the trust can be used to obtain a token. This value is decreased each time a token is issued through the trust. Once it reaches `0`, no further tokens will be issued through the trust. The default value is `null`, meaning there is no limit on the number of tokens issued through the trust. If redelegation is enabled it must not be set. in: body required: false type: boolean request_token_grant_type_body_required: description: | Value MUST be set to "client_credentials". in: body required: true type: string response_token_access_token_body: description: | The authentication token issued by the authorization server. in: body required: true type: string response_token_expires_in_body: description: | The lifetime in seconds of the access token. For example, the value "3600" denotes that the access token will expire in one hour from the time the response was generated. in: body required: true type: integer response_token_token_type_body: description: | The type of the token issued by the authorization server. Currently only the "Bearer" token type is supported. in: body required: true type: string revoke_audit_chain_id: description: | Specifies a group of tokens based upon the ``audit_id`` of the first token in the chain. If a revocation event specifies the ``audit_chain_id`` any token that is part of the token chain (based upon the original token at the start of the chain) will be revoked, including the original token at the start of the chain. If an event is issued for ``audit_chain_id`` then the event cannot contain an ``audit_id``. in: body required: true type: string revoke_audit_id: description: | Specifies the unique identifier (UUID) assigned to the token itself. This will revoke a single token only. This attribute mirrors the use of the Token Revocation List (the mechanism used prior to revocation events) but does not utilize data that could convey authorization (the token id). If an event is issued for ``audit_id`` then the event cannot contain an ``audit_chain_id``. in: body required: true type: string revoke_consumer_id: description: | Revoke tokens issued to a specific OAuth consumer, as part of the OS-OAUTH1 API extension. in: body required: true type: string revoke_domain_id: description: | Revoke tokens scoped to a particular domain. in: body required: true type: string revoke_events: description: | List of recovation events. in: body required: true type: string revoke_expires_at: description: | Specifies the exact expiration time of one or more tokens to be revoked. This attribute is useful for revoking chains of tokens, such as those produced when re-scoping an existing token. When a token is issued based on initial authentication, it is given an expires_at value. When a token is used to get another token, the new token will have the same expires_at value as the original. in: body required: true type: string revoke_issued_before: description: | (string, ISO 8601 extended format date time with microseconds). Tokens issued before this time are considered revoked. This attribute can be used to determine how long the expiration event is valid. It can also be used in queries to filter events, so that only a subset that have occurred since the last request are returned. in: body required: true type: string revoke_project_id: description: | Revoke tokens scoped to a particular project. in: body required: true type: string revoke_role_id: description: | Revoke tokens issued with a specific role. in: body required: true type: string revoke_trust_id: description: | Revoke tokens issued as the result of a particular trust, as part of the OS-TRUST API extension. in: body required: true type: string revoke_user_id: description: | Revoke tokens expressing the identity of a particular user. in: body required: true type: string roles: description: | A roles object. in: body required: true type: array roles_links: description: | A roles links object. Includes ``next``, ``previous``, and ``self`` links for roles. in: body required: true type: object self: description: | The ``self`` relative link for the ``endpoints`` resource. in: body required: true type: string service_id: description: | The UUID of the service to which the endpoint belongs. in: body required: true type: string trust: description: | A trust object. in: body required: true type: object trust_expires_at: description: | Specifies the expiration time of the trust. A trust may be revoked ahead of expiration. If the value represents a time in the past, the trust is deactivated. In the redelegation case it must not exceed the value of the corresponding ``expires_at`` field of the redelegated trust or it may be omitted, then the ``expires_at`` value is copied from the redelegated trust. in: body required: false type: string trust_id: description: | The ID of the trust. in: body required: true type: string trust_links: description: | A trust links object. Includes ``next``, ``previous``, and ``self`` links for trusts. in: body required: true type: object trust_project_id: description: | Identifies the project upon which the trustor is delegating authorization. in: body required: false type: string trust_roles: description: | Specifies the subset of the trustor's roles on the ``project_id`` to be granted to the ``trustee`` when the token is consumed. The ``trustor`` must already be granted these roles in the project referenced by the ``project_id`` attribute. If redelegation is used (when trust-scoped token is used and consumed trust has ``allow_redelegation`` set to `true`) this parameter should contain redelegated trust's roles only. Roles are only provided when the trust is created, and are subsequently available as a separate read-only collection. Each role can be specified by either ``id`` or ``name``. in: body required: false type: array trustee_user_id: description: | Represents the user who is capable of consuming the trust. in: body required: true type: string trustor_user_id: description: | Represents the user who created the trust, and who's authorization is being delegated. in: body required: true type: string trusts: description: | An array of trust objects. in: body required: true type: array url: description: | The endpoint URL. in: body required: true type: string ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/revoke.inc0000664000175000017500000000423500000000000021426 0ustar00zuulzuul00000000000000.. -*- rst -*- =============== OS-REVOKE API =============== This API provides a list of token revocations. Each event expresses a set of criteria which describes a set of tokens that are no longer valid. Requires v3.2+ of the Identity API. What's New in v1.1 ================== * Use of expires_at has been deprecated in favor of using audit_id and audit_chain_id. * Revocation events can use audit_id to revoke an individual token. * Revocation events can use audit_chain_id to revoke all related tokens. A related token is defined by the first (non-rescoped) token. All tokens in the chain will have the same audit_chain_id. API Resources ============= Revocation Events ----------------- Revocation events are objects that contain criteria used to evaluate token validity. Tokens that match all the criteria of a revocation event are considered revoked, and should not be accepted as proof of authorization for the user. Revocation events do not have a unique identifier (id). List revocation events ====================== .. rest_method:: GET /v3/OS-REVOKE/events Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0/rel/events`` List revocation events. The HTTP Date header returned in the response reflects the timestamp of the most recently issued revocation event. Clients can then use this value in the since query parameter to limit the list of events in subsequent requests. Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - since: since_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - events: revoke_events - issued_before: revoke_issued_before - user_id: revoke_user_id - audit_id: revoke_audit_id - audit_chain_id: revoke_audit_chain_id - domain_id: revoke_domain_id - project_id: revoke_project_id - role_id: revoke_role_id - OS-TRUST:trust_id: revoke_trust_id - OS-OAUTH1:consumer_id: revoke_consumer_id - expires_at: revoke_expires_at Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 Example ~~~~~~~ .. literalinclude:: samples/OS-REVOKE/list-revoke-response.json :language: javascript././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/api-ref/source/v3-ext/samples/0000775000175000017500000000000000000000000021100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-ENDPOINT-POLICY/0000775000175000017500000000000000000000000023634 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022000000000000011447 xustar0000000000000000122 path=keystone-26.0.0/api-ref/source/v3-ext/samples/OS-ENDPOINT-POLICY/policy-endpoint-associations-list-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-ENDPOINT-POLICY/policy-endpoint-associations-list-r0000664000175000017500000000155600000000000032610 0ustar00zuulzuul00000000000000{ "endpoints": [ { "id": "1", "interface": "public", "links": { "self": "http://example.com/identity/v3/endpoints/1" }, "region": "north", "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596", "url": "http://example.com/identity/" }, { "id": "1", "interface": "internal", "links": { "self": "http://example.com/identity/v3/endpoints/1" }, "region": "south", "service_id": "9242e05f0c23467bbd1cf1f7a6e5e596", "url": "http://example.com/identity/" } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-ENDPOINT-POLICY/policies/13c92821e4c4476a878d3aae7444f52f/endpoints" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-ENDPOINT-POLICY/policy-show-response.json0000664000175000017500000000053000000000000030636 0ustar00zuulzuul00000000000000{ "policy": { "blob": { "foobar_user": [ "role:compute-user" ] }, "id": "13c92821e4c4476a878d3aae7444f52f", "links": { "self": "http://example.com/identity/v3/policies/13c92821e4c4476a878d3aae7444f52f" }, "type": "application/json" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4421148 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/0000775000175000017500000000000000000000000022706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/create-endpoint-group-request.json0000664000175000017500000000034200000000000031501 0ustar00zuulzuul00000000000000{ "endpoint_group": { "description": "endpoint group description", "filters": { "interface": "admin", "service_id": "1b501a" }, "name": "endpoint group name" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/endpoint-group-response.json0000664000175000017500000000056100000000000030411 0ustar00zuulzuul00000000000000{ "endpoint_group": { "description": "endpoint group description", "filters": { "interface": "admin", "service_id": "1b501a" }, "id": "ac4861", "links": { "self": "http://example.com/identity/v3/OS-EP-FILTER/endpoint_groups/ac4861" }, "name": "endpoint group name" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/endpoint-groups-response.json0000664000175000017500000000213700000000000030575 0ustar00zuulzuul00000000000000{ "endpoint_groups": [ { "endpoint_group": { "description": "endpoint group description #1", "filters": { "interface": "admin", "service_id": "1b501a" }, "id": "ac4861", "links": { "self": "http://example.com/identity/v3/OS-EP-FILTER/endpoint_groups/ac4861" }, "name": "endpoint group name #1" } }, { "endpoint_group": { "description": "endpoint group description #2", "filters": { "interface": "admin" }, "id": "3de68c", "links": { "self": "http://example.com/identity/v3/OS-EP-FILTER/endpoint_groups/3de68c" }, "name": "endpoint group name #2" } } ], "links": { "self": "https://example.com/identity/v3/OS-EP-FILTER/endpoint_groups", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/endpoint-project-response.json0000664000175000017500000000044100000000000030720 0ustar00zuulzuul00000000000000{ "project": { "domain_id": "1789d1", "enabled": true, "id": "263fd9", "links": { "self": "http://example.com/identity/v3/projects/263fd9" }, "name": "project name #1", "description": "project description #1" } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/list-associations-by-endpoint-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/list-associations-by-endpoint-response.js0000664000175000017500000000146700000000000033006 0ustar00zuulzuul00000000000000{ "projects": [ { "domain_id": "1789d1", "enabled": true, "id": "263fd9", "links": { "self": "http://example.com/identity/v3/projects/263fd9" }, "name": "a project name 1", "description": "a project description 1" }, { "domain_id": "1789d1", "enabled": true, "id": "61a1b7", "links": { "self": "http://example.com/identity/v3/projects/61a1b7" }, "name": "a project name 2", "description": "a project description 2" } ], "links": { "self": "http://example.com/identity/v3/OS-EP-FILTER/endpoints/6fedc0/projects", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/list-associations-by-project-response.json 22 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/list-associations-by-project-response.jso0000664000175000017500000000145600000000000033011 0ustar00zuulzuul00000000000000{ "endpoints": [ { "id": "6fedc0", "interface": "public", "url": "http://example.com/identity/", "region": "north", "links": { "self": "http://example.com/identity/v3/endpoints/6fedc0" }, "service_id": "1b501a" }, { "id": "6fedc0", "interface": "internal", "region": "south", "url": "http://example.com/identity/", "links": { "self": "http://example.com/identity/v3/endpoints/6fedc0" }, "service_id": "1b501a" } ], "links": { "self": "http://example.com/identity/v3/OS-EP-FILTER/projects/263fd9/endpoints", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/list-service-endpoints.json0000664000175000017500000000251100000000000030212 0ustar00zuulzuul00000000000000{ "endpoints": [ { "enabled": true, "id": "6fedc0" "interface": "admin", "legacy_endpoint_id": "6fedc0", "links": { "self": "http://example.com/identity/v3/endpoints/6fedc0" }, "region": "RegionOne", "service_id": "1b501a", "url": "http://localhost:9292" }, { "enabled": true, "id": "b501aa" "interface": "internal", "legacy_endpoint_id": "b501aa", "links": { "self": "http://example.com/identity/v3/endpoints/b501aa" }, "region": "RegionOne", "service_id": "1b501a", "url": "http://localhost:9292" }, { "enabled": true, "id": "b7c573" "interface": "public", "legacy_endpoint_id": "b7c573", "links": { "self": "http://example.com/identity/v3/endpoints/b7c573" }, "region": "RegionOne", "service_id": "1b501a", "url": "http://localhost:9292" } ], "links": { "self": "http://example.com/identity/v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints", "previous": null, "next": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/update-endpoint-group-request.json0000664000175000017500000000014300000000000031517 0ustar00zuulzuul00000000000000{ "endpoint_group": { "filters": { "interface": "public" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-EP-FILTER/update-endpoint-group-response.json0000664000175000017500000000056200000000000031672 0ustar00zuulzuul00000000000000{ "endpoint_group": { "description": "endpoint group description", "filters": { "interface": "public", "service_id": "1b501a" }, "id": "ac4861", "links": { "self": "http://example.com/identity/v3/OS-EP-FILTER/endpoint_groups/ac4861" }, "name": "endpoint group name" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4461148 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/0000775000175000017500000000000000000000000022360 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-create-response.txt0000664000175000017500000000013100000000000030570 0ustar00zuulzuul00000000000000oauth_token=accd36&oauth_token_secret=aa47da&oauth_expires_at=2013-09-11T06:07:51.501805Z././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-role-show-response.json0000664000175000017500000000030700000000000031403 0ustar00zuulzuul00000000000000{ "role": { "id": "5ad150", "domain_id": "7cf37b", "links": { "self": "http://example.com/identity/v3/roles/5ad150" }, "name": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-roles-list-response.json0000664000175000017500000000117200000000000031562 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "5ad150", "domain_id": "7cf37b", "links": { "self": "http://example.com/identity/v3/roles/5ad150" }, "name": "admin" }, { "id": "a62eb6", "domain_id": "7cf37b", "links": { "self": "http://example.com/identity/v3/roles/a62eb6" }, "name": "Member" } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/users/ce9e07/OS-OAUTH1/access_tokens/6be26a/roles" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-show-response.json0000664000175000017500000000067600000000000030455 0ustar00zuulzuul00000000000000{ "access_token": { "consumer_id": "7fea2d", "id": "6be26a", "expires_at": "2013-09-11T06:07:51.501805Z", "links": { "roles": "http://example.com/identity/v3/users/ce9e07/OS-OAUTH1/access_tokens/6be26a/roles", "self": "http://example.com/identity/v3/users/ce9e07/OS-OAUTH1/access_tokens/6be26a" }, "project_id": "b9fca3", "authorizing_user_id": "ce9e07" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/access-tokens-list-response.json0000664000175000017500000000122300000000000030620 0ustar00zuulzuul00000000000000{ "access_tokens": [ { "consumer_id": "7fea2d", "id": "6be26a", "expires_at": "2013-09-11T06:07:51.501805Z", "links": { "roles": "http://example.com/identity/v3/users/ce9e07/OS-OAUTH1/access_tokens/6be26a/roles", "self": "http://example.com/identity/v3/users/ce9e07/OS-OAUTH1/access_tokens/6be26a" }, "project_id": "b9fca3", "authorizing_user_id": "ce9e07" } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/users/ce9e07/OS-OAUTH1/access_tokens" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/authorize-request-token-request.json0000664000175000017500000000016700000000000031563 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "a3b29b" }, { "id": "49993e" } ] }././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/authorize-request-token-response.json0000664000175000017500000000007200000000000031724 0ustar00zuulzuul00000000000000{ "token": { "oauth_verifier": "8171" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-create-request.json0000664000175000017500000000010100000000000030025 0ustar00zuulzuul00000000000000{ "consumer": { "description": "My consumer" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-create-response.json0000664000175000017500000000035100000000000030202 0ustar00zuulzuul00000000000000{ "consumer": { "secret": "secretsecret", "description": "My consumer", "id": "7fea2d", "links": { "self": "http://example.com/identity/v3/OS-OAUTH1/consumers/7fea2d" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-show-response.json0000664000175000017500000000030700000000000027720 0ustar00zuulzuul00000000000000{ "consumer": { "id": "7fea2d", "description": "My consumer", "links": { "self": "http://example.com/identity/v3/OS-OAUTH1/consumers/7fea2d" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-update-request.json0000664000175000017500000000010500000000000030050 0ustar00zuulzuul00000000000000{ "consumer": { "description": "My new consumer" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-update-response.json0000664000175000017500000000031300000000000030217 0ustar00zuulzuul00000000000000{ "consumer": { "description": "My new consumer", "id": "7fea2d", "links": { "self": "http://example.com/identity/v3/OS-OAUTH1/consumers/7fea2d" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/consumers-list-response.json0000664000175000017500000000104500000000000030076 0ustar00zuulzuul00000000000000{ "consumers": [ { "id": "0c2a74", "links": { "self": "http://example.com/identity/v3/OS-OAUTH1/consumers/0c2a74" } }, { "description": "My consumer", "id": "7fea2d", "links": { "self": "http://example.com/identity/v3/OS-OAUTH1/consumers/7fea2d" } } ], "links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-OAUTH1/consumers" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH1/request-token-create-response.txt0000664000175000017500000000013100000000000031017 0ustar00zuulzuul00000000000000oauth_token=29971f&oauth_token_secret=238eb8&oauth_expires_at=2013-09-11T06:07:51.501805Z././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4461148 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH2/0000775000175000017500000000000000000000000022361 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH2/token-create-request.txt0000664000175000017500000000003500000000000027167 0ustar00zuulzuul00000000000000grant_type=client_credentials././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-OAUTH2/token-create-response.json0000664000175000017500000000040500000000000027470 0ustar00zuulzuul00000000000000{ "access_token": "gAAAAABhi1cMynG89h8t6TJrxNiZuNzjcIUIxNctoVfuqTw7BpUedLKxjPymClVEnj9GhIT5u2mpjaJATlEAtaa3D6_t8jk_fV-mqo2IUlsmTPTnMwkcjh5FSHQVRdqvDxgY3nSqLA_Hfv-zPmjS5KWX3hmyDE5YWO1ztX6QNVQb4wTPyNL1-7I", "token_type": "Bearer", "expires_in": 3600 }././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4461148 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-REVOKE/0000775000175000017500000000000000000000000022412 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-REVOKE/list-revoke-response.json0000664000175000017500000000111300000000000027401 0ustar00zuulzuul00000000000000{ "events": [ { "issued_before": "2014-02-27T18:30:59.999999Z", "user_id": "f287de" }, { "audit_id": "VcxU2JYqT8OzfUVvrjEITQ", "issued_before": "2014-02-27T18:30:59.999999Z" }, { "audit_chain_id": "VcxU2JYqT8OzfUVvrjEITQ", "issued_before": "2014-02-27T18:30:59.999999Z", "project_id": "976bf9" }, { "domain_id": "be2c70", "issued_before": "2014-02-2805:15:59.999999Z", "user_id": "f287de" } ] } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4461148 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-SIMPLE-CERT/0000775000175000017500000000000000000000000023143 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-SIMPLE-CERT/show-ca-certificate-response.txt0000664000175000017500000000230700000000000031363 0ustar00zuulzuul00000000000000MIIDgTCCAmmgAwIBAgIJAIr3n9+0RSC7MA0GCSqGSIb3DQEBCwUAMFcxCzAJBgNV BAYTAlVTMQ4wDAYDVQQIDAVVbnNldDEOMAwGA1UEBwwFVW5zZXQxDjAMBgNVBAoM BVVuc2V0MRgwFgYDVQQDDA93d3cuZXhhbXBsZS5jb20wHhcNMTYxMDIwMTMwMjE4 WhcNMjYxMDE4MTMwMjE4WjBXMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVW5zZXQx DjAMBgNVBAcMBVVuc2V0MQ4wDAYDVQQKDAVVbnNldDEYMBYGA1UEAwwPd3d3LmV4 YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwoJkYpfJ Bvqfq0eAuqTIziiunNQdnSUX/aMS5UuI6tjzSkYnR5FCdf9UP8OrpA37gthvz3KK XhNLqnnV8MLzEo3+lN5IAr+TE1foXnqGs6vNvj5Jn1lViXXpIeaHxMwkJpJjPwxJ nFLtxL1m9hIx5anV5ZyJWV8RIaMqnzOJ7QYiX07aouRvmtT5O1LQzr2ht2l4EzPY YDt9UV/daSikrmroBnwgWMecaFJOC1pxSyvO2PAnw+yhX6NHgGPJmOu0TSN2IK1p o07ZVM3QJLLbEZFjcUK7FXNRk5ZfzjkCrJA1l0Ys3ByHTb2offffIyTYPuatQtfF 0XvTIwMN5eIAswIDAQABo1AwTjAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTZ4Nls 7DRmUBcrYhYDLSsDM0BCWzAfBgNVHSMEGDAWgBTZ4Nls7DRmUBcrYhYDLSsDM0BC WzANBgkqhkiG9w0BAQsFAAOCAQEALil6WvVii6yNVwu0zgt2iDYqHvnnHWnSVhEJ eKeBFRxpuwiH+UOeygFB0/6lD2r11cD0SdgaMfLAKkKspQucJIsp3BYLwBJ25oxn NL2yB3HLZeEebAQzXQwnRbWUbIpcp/XPlKjybiA3unqE+X/qdQZgxJ2Xgtp7bHhN yzDCSOUZlHrkKNXtFNvqRtoCeMBs2+jfqx2ap64ORSnLihEi57lOcUn2DbAR45OI +wppD5CcUTDsE0r+XbBK3Cm3dn6pVyVcawv5qDidRB7JdsDbx6VC7gcBbdgdbLWz Xf4KS8N77jeGjqKJ7QY5jkHdXhY+gGbeponch4y2VqLgMI0VGQ== ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-SIMPLE-CERT/show-signing-certificate-response.txt0000664000175000017500000000224300000000000032435 0ustar00zuulzuul00000000000000MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQsFADBXMQswCQYDVQQGEwJVUzEO MAwGA1UECAwFVW5zZXQxDjAMBgNVBAcMBVVuc2V0MQ4wDAYDVQQKDAVVbnNldDEY MBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMB4XDTE2MTAyMDEzMDIxOFoXDTI2MTAx ODEzMDIxOFowRzELMAkGA1UEBhMCVVMxDjAMBgNVBAgMBVVuc2V0MQ4wDAYDVQQK DAVVbnNldDEYMBYGA1UEAwwPd3d3LmV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0B AQEFAAOCAQ8AMIIBCgKCAQEAua3cVYSD9KY31+wNXZv3HBS5MyzTfoY+nh4nJ2x8 Ram6liu4gkHYRonTUriIrgDLyo+2fuXrmyFcq1+8ke4KD3n24i8pzcrt6BOGAVYP KdPyXU0EkZECNmH/tKjvVqMLHcq2apsZdZ5ujBtE5G4zbTjVIEzz90AbAmRVJy7S seluCxBKtg3IGa1WwqgU4B5pgog+VDpT8XPKFvHi1cVaX76qS6MOUxXA7kuOQUct JxcyITS26Mxym7wOTI+7JV5A9Ow/dUN6CrGMrfHB59Psx3os/BfoopFmIbbnHdOO ETOeifelkhwLWLfmmOHxWgYYX/aEyW3L/xCU5QDCz9B0wQIDAQABo00wSzAJBgNV HRMEAjAAMB0GA1UdDgQWBBQeoHzsYSUSfGymk6kem/lpGVJS9DAfBgNVHSMEGDAW gBTZ4Nls7DRmUBcrYhYDLSsDM0BCWzANBgkqhkiG9w0BAQsFAAOCAQEAfsH6AN7p XWBg062LUtpfDsRyXqOLYofR4Y0Mzo1rH0jaozJsnOxsj42BdP+hBGjtZB9eUwgP gx+MJQC4pz+Wuc/xMysDT6f0hyjZmsakXM92lsztlW7+Y7u9ATa2lDTER1Fv7X6D I+kN+dhphq0lrIRWZvAf3TlZpEUG38cTxLD8OsdOlq4BxSzmvKFQf4mcbu39OX7i 0fGih0SxSa03idx9NWEOEp9IaGLo/mfL84nb4YjgV9yJj+3CkxYvqPlpiM2rHD/C hMgz/UB52OxbjYjbWoyStZwvlSwKWY75C9iYA04TZrhs5UWvAT+I2Y2UY/krrZ2a Rke2Bj7NAvXPHw== ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4461148 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/0000775000175000017500000000000000000000000022340 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-auth-redelegated-response.json0000664000175000017500000000246400000000000031460 0ustar00zuulzuul00000000000000{ "token": { "expires_at": "2013-02-27T18:30:59.999999Z", "issued_at": "2013-02-27T16:30:59.999999Z", "methods": [ "password" ], "OS-TRUST:trust": { "id": "fe0aef", "impersonation": false, "redelegated_trust_id": "3ba234", "redelegation_count": 2, "links": { "self": "http://example.com/identity/v3/trusts/fe0aef" }, "trustee_user": { "id": "0ca8f6", "links": { "self": "http://example.com/identity/v3/users/0ca8f6" } }, "trustor_user": { "id": "bd263c", "links": { "self": "http://example.com/identity/v3/users/bd263c" } } }, "user": { "domain": { "id": "1789d1", "links": { "self": "http://example.com/identity/v3/domains/1789d1" }, "name": "example.com" }, "email": "joe@example.com", "id": "0ca8f6", "links": { "self": "http://example.com/identity/v3/users/0ca8f6" }, "name": "Joe" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-auth-request.json0000664000175000017500000000045000000000000027040 0ustar00zuulzuul00000000000000{ "auth": { "identity": { "methods": [ "token" ], "token": { "id": "e80b74" } }, "scope": { "OS-TRUST:trust": { "id": "de0945a" } } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-auth-trust-response.json0000664000175000017500000000234100000000000030366 0ustar00zuulzuul00000000000000{ "token": { "expires_at": "2013-02-27T18:30:59.999999Z", "issued_at": "2013-02-27T16:30:59.999999Z", "methods": [ "password" ], "OS-TRUST:trust": { "id": "fe0aef", "impersonation": false, "links": { "self": "http://example.com/identity/v3/trusts/fe0aef" }, "trustee_user": { "id": "0ca8f6", "links": { "self": "http://example.com/identity/v3/users/0ca8f6" } }, "trustor_user": { "id": "bd263c", "links": { "self": "http://example.com/identity/v3/users/bd263c" } } }, "user": { "domain": { "id": "1789d1", "links": { "self": "http://example.com/identity/v3/domains/1789d1" }, "name": "example.com" }, "email": "joe@example.com", "id": "0ca8f6", "links": { "self": "http://example.com/identity/v3/users/0ca8f6" }, "name": "Joe" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-create-request.json0000664000175000017500000000052600000000000027346 0ustar00zuulzuul00000000000000{ "trust": { "expires_at": "2013-02-27T18:30:59.999999Z", "impersonation": true, "allow_redelegation": true, "project_id": "ddef321", "roles": [ { "name": "member" } ], "trustee_user_id": "86c0d5", "trustor_user_id": "a0fdfd" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-create-response.json0000664000175000017500000000147400000000000027517 0ustar00zuulzuul00000000000000{ "trust": { "expires_at": "2013-02-27T18:30:59.999999Z", "id": "1ff900", "impersonation": true, "redelegation_count": 10, "links": { "self": "http://example.com/identity/v3/OS-TRUST/trusts/1ff900" }, "project_id": "ddef321", "remaining_uses": null, "roles": [ { "id": "ed7b78", "links": { "self": "http://example.com/identity/v3/roles/ed7b78" }, "name": "member" } ], "roles_links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-TRUST/trusts/1ff900/roles" }, "trustee_user_id": "86c0d5", "trustor_user_id": "a0fdfd" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-get-response.json0000664000175000017500000000137100000000000027027 0ustar00zuulzuul00000000000000{ "trust": { "id": "987fe8", "expires_at": "2013-02-27T18:30:59.999999Z", "impersonation": true, "links": { "self": "http://example.com/identity/v3/OS-TRUST/trusts/987fe8" }, "roles": [ { "id": "ed7b78", "links": { "self": "http://example.com/identity/v3/roles/ed7b78" }, "name": "member" } ], "roles_links": { "next": null, "previous": null, "self": "http://example.com/identity/v3/OS-TRUST/trusts/1ff900/roles" }, "project_id": "0f1233", "trustee_user_id": "be34d1", "trustor_user_id": "56ae32" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-get-role-delegated-response.json0000664000175000017500000000025200000000000031677 0ustar00zuulzuul00000000000000{ "role": { "id": "c1648e", "links": { "self": "http://example.com/identity/v3/roles/c1648e" }, "name": "manager" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-list-response.json0000664000175000017500000000132200000000000027217 0ustar00zuulzuul00000000000000{ "trusts": [ { "id": "1ff900", "expires_at": "2013-02-27T18:30:59.999999Z", "impersonation": true, "links": { "self": "http://example.com/identity/v3/OS-TRUST/trusts/1ff900" }, "project_id": "0f1233", "trustee_user_id": "86c0d5", "trustor_user_id": "a0fdfd" }, { "id": "f4513a", "impersonation": false, "links": { "self": "http://example.com/identity/v3/OS-TRUST/trusts/f45513a" }, "project_id": "0f1233", "trustee_user_id": "86c0d5", "trustor_user_id": "3cd2ce" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/samples/OS-TRUST/trust-list-roles-delegated-response.json0000664000175000017500000000061500000000000032261 0ustar00zuulzuul00000000000000{ "roles": [ { "id": "c1648e", "links": { "self": "http://example.com/identity/v3/roles/c1648e" }, "name": "manager" }, { "id": "ed7b78", "links": { "self": "http://example.com/identity/v3/roles/ed7b78" }, "name": "member" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/simple-cert.inc0000664000175000017500000000202100000000000022346 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================== OS-SIMPLE-CERT API ==================== Allows the retrieval of information for Certificate Authorities and certificates. Requires v3.0+ of the Identity API. Show CA Certificate =================== .. rest_method:: GET /v3/OS-SIMPLE-CERT/ca Show the availbable CA certificate. Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 - 500 Example ~~~~~~~ .. literalinclude:: ./samples/OS-SIMPLE-CERT/show-ca-certificate-response.txt :language: text Show Signing Certificate ======================== .. rest_method:: GET /v3/OS-SIMPLE-CERT/certificates Show the available signing certificate. Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 401 - 500 Example ~~~~~~~ .. literalinclude:: ./samples/OS-SIMPLE-CERT/show-signing-certificate-response.txt :language: text././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/api-ref/source/v3-ext/trust.inc0000664000175000017500000002351500000000000021316 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== OS-TRUST API ============== Trusts provide project-specific role delegation between users, with optional impersonation. API Resources ============= Trusts ------ A trust represents a user's (the `trustor`) authorization to delegate roles to another user (the `trustee`), and optionally allow the trustee to impersonate the trustor. After the trustor has created a trust, the trustee can specify the trust's id attribute as part of an authentication request to then create a token representing the delegated authority of the trustor. The trust contains constraints on the delegated attributes. A token created based on a trust will convey a subset of the trustor's roles on the specified project. Optionally, the trust may only be valid for a specified time period, as defined by ``expires_at``. If no ``expires_at`` is specified, then the trust is valid until it is explicitly revoked. The ``impersonation`` flag allows the trustor to optionally delegate impersonation abilities to the trustee. To services validating the token, the trustee will appear as the trustor, although the token will also contain the ``impersonation`` flag to indicate that this behavior is in effect. A ``project_id`` may not be specified without at least one role, and vice versa. In other words, there is no way of implicitly delegating all roles to a trustee, in order to prevent users accidentally creating trust that are much more broad in scope than intended. A trust without a ``project_id`` or any delegated roles is unscoped, and therefore does not represent authorization on a specific resource. Trusts are immutable. If the trustee or trustor wishes to modify the attributes of the trust, they should create a new trust and delete the old trust. If a trust is deleted, any tokens generated based on the trust are immediately revoked. If the trustor loses access to any delegated attributes, the trust becomes immediately invalid and any tokens generated based on the trust are immediately revoked. Trusts can also be chained, meaning, a trust can be created by using a trust scoped token. For more information, see `Use trusts `_. Consuming a trust ================= .. rest_method:: POST /v3/auth/tokens Consuming a trust effectively assumes the scope as delegated in the trust. No other scope attributes may be specified. The user specified by authentication must match the trust's ``trustee_user_id`` attribute. If the trust has the ``impersonation`` attribute set to `true`, then the resulting token's user attribute will also represent the trustor, rather than the authenticating user (the trustee). Request ------- Example ~~~~~~~ .. literalinclude:: samples/OS-TRUST/trust-auth-request.json :language: javascript A token created from a trust will have an ``OS-TRUST:trust`` section containing the ``id`` of the trust, the ``impersonation`` flag, the ``trustee_user_id`` and the ``trustor_user_id``. Response -------- Example ~~~~~~~ .. literalinclude:: samples/OS-TRUST/trust-auth-trust-response.json :language: javascript A token created from a redelegated trust will have an ``OS-TRUST:trust`` section containing the same fields as a regular trust token, only ``redelegated_trust_id`` and ``redelegation_count`` are added. .. literalinclude:: samples/OS-TRUST/trust-auth-redelegated-response.json :language: javascript Create trust ============ .. rest_method:: POST /v3/OS-TRUST/trusts Creates a trust. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trusts`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust: trust - impersonation: impersonation - trustee_user_id: trustee_user_id - trustor_user_id: trustor_user_id - allow_redelegation: allow_redelegation - expires_at: trust_expires_at - project_id: trust_project_id - redelegation_count: redelegation_count - remaining_uses: remaining_uses - roles: trust_roles Example ~~~~~~~ Status: 201 Created .. literalinclude:: samples/OS-TRUST/trust-create-request.json :language: javascript Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust: trust - id: trust_id - impersonation: impersonation - trustee_user_id: trustee_user_id - trustor_user_id: trustor_user_id - allow_redelegation: allow_redelegation - expires_at: trust_expires_at - project_id: trust_project_id - redelegated_trust_id: redelegated_trust_id - redelegation_count: redelegation_count - remaining_uses: remaining_uses - roles: trust_roles - roles_links: roles_links - links: trust_links Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 201 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 409 - 413 - 415 - 503 Example ~~~~~~~ Status: 201 Created .. literalinclude:: samples/OS-TRUST/trust-create-response.json :language: javascript List trusts =========== .. rest_method:: GET /v3/OS-TRUST/trusts Lists all trusts. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trusts`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trustor_user_id: trustor_user_id_query - trustee_user_id: trustee_user_id_query Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust: trust - id: trust_id - impersonation: impersonation - trustee_user_id: trustee_user_id - trustor_user_id: trustor_user_id - allow_redelegation: allow_redelegation - expires_at: trust_expires_at - project_id: trust_project_id - redelegated_trust_id: redelegated_trust_id - redelegation_count: redelegation_count - remaining_uses: remaining_uses - roles: trust_roles - roles_links: roles_links - links: trust_links Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-TRUST/trust-list-response.json :language: javascript Get trust ========= .. rest_method:: GET /v3/OS-TRUST/trusts/{trust_id} Gets the trust information for ``{trust_id}``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trust`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust_id: trust_id_path Response -------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust: trust - id: trust_id - impersonation: impersonation - trustee_user_id: trustee_user_id - trustor_user_id: trustor_user_id - allow_redelegation: allow_redelegation - expires_at: trust_expires_at - project_id: trust_project_id - redelegated_trust_id: redelegated_trust_id - redelegation_count: redelegation_count - remaining_uses: remaining_uses - roles: trust_roles - roles_links: roles_links - links: trust_links Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-TRUST/trust-get-response.json :language: javascript Delete trust ============ .. rest_method:: DELETE /v3/OS-TRUST/trusts/{trust_id} Deletes a trust with ``{trust_id}``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trust`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust_id: trust_id_path Response -------- Example ~~~~~~~ Status: 204 No Content List roles delegated by a trust =============================== .. rest_method:: GET /v3/OS-TRUST/trusts/{trust_id}/roles Lists roles delegated by a trust with ``{trust_id}``. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trust_roles`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust_id: trust_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-TRUST/trust-list-roles-delegated-response.json :language: javascript Check if a role is delegated by a trust ======================================= .. rest_method:: HEAD /v3/OS-TRUST/trusts/{trust_id}/roles/{role_id} Checks if a role is delegated by a trust. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trust_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust_id: trust_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ Status: 200 OK Get role delegated by a trust ============================= .. rest_method:: GET /v3/OS-TRUST/trusts/{trust_id}/roles/{role_id} Gets a role with delegated by a trust. Relationship: ``https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trust_role`` Request ------- Parameters ~~~~~~~~~~ .. rest_parameters:: parameters.yaml - trust_id: trust_id_path - role_id: role_id_path Response -------- Status Codes ~~~~~~~~~~~~ .. rest_status_code:: success ../v3/status.yaml - 200 .. rest_status_code:: error ../v3/status.yaml - 400 - 401 - 403 - 404 - 405 - 413 - 503 Example ~~~~~~~ Status: 200 OK .. literalinclude:: samples/OS-TRUST/trust-get-role-delegated-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/bindep.txt0000664000175000017500000000202700000000000015466 0ustar00zuulzuul00000000000000# See openstack-infra/project-config:jenkins/data/bindep-fallback.txt # This is used by bindep: sudo [apt-get | yum] install $(bindep -b) gettext postgresql libffi-dev [platform:dpkg] libffi-devel [platform:rpm] libldap2-dev [platform:dpkg] libsasl2-dev [platform:dpkg] libsqlite3-dev [platform:dpkg] libssl-dev [platform:dpkg] libxml2-dev [platform:dpkg] libxslt1-dev [platform:dpkg] mysql-client [platform:dpkg !platform:debian] mariadb-client [platform:debian] mysql-server [platform:dpkg !platform:debian] mariadb-server [platform:debian] postgresql-client [platform:dpkg] postgresql-server-dev-all [platform:dpkg] python3-dev [platform:dpkg] cyrus-sasl-devel [platform:rpm] libxml2-devel [platform:rpm] libxslt-devel [platform:rpm] mariadb [platform:rpm] mariadb-devel [platform:redhat] mariadb-server [platform:rpm] openldap-devel [platform:redhat] openssl-devel [platform:rpm] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] python3-devel [platform:rpm] libmariadb-devel [platform:suse] openldap2-devel [platform:suse] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4701145 keystone-26.0.0/config-generator/0000775000175000017500000000000000000000000016714 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/config-generator/keystone-policy-generator.conf0000664000175000017500000000011500000000000024702 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/keystone.policy.yaml.sample namespace = keystone ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/config-generator/keystone.conf0000664000175000017500000000060500000000000021425 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/keystone.conf.sample wrap_width = 79 namespace = keystone namespace = oslo.cache namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy namespace = oslo.db namespace = oslo.middleware namespace = osprofiler # We don't use oslo.concurrency config options in # keystone now, just in case it slips through unnoticed. #namespace = oslo.concurrency ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4701145 keystone-26.0.0/devstack/0000775000175000017500000000000000000000000015267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/devstack/files/0000775000175000017500000000000000000000000016371 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4701145 keystone-26.0.0/devstack/files/federation/0000775000175000017500000000000000000000000020511 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/files/federation/attribute-map.xml0000664000175000017500000000732200000000000024015 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/files/federation/shib_apache_alias.txt0000664000175000017500000000016300000000000024651 0ustar00zuulzuul00000000000000 WSGIScriptAliasMatch ^(/v3/OS-FEDERATION/identity_providers/.*?/protocols/.*?/auth)$ /var/www/keystone/main/$1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/files/federation/shib_apache_handler.txt0000664000175000017500000000125200000000000025175 0ustar00zuulzuul00000000000000 SetHandler shib ShibRequestSetting requireSession 1 AuthType shibboleth ShibExportAssertion Off Require valid-user ShibRequireSession On ShibRequireAll On ShibRequestSetting requireSession 1 AuthType shibboleth ShibExportAssertion Off Require valid-user ShibRequireSession On ShibRequireAll On ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/files/federation/shibboleth2.xml0000664000175000017500000000727100000000000023447 0ustar00zuulzuul00000000000000 SAML2 SAML1 SAML2 Local ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4701145 keystone-26.0.0/devstack/files/oidc/0000775000175000017500000000000000000000000017307 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/devstack/files/oidc/apache_oidc.conf0000664000175000017500000000310100000000000022370 0ustar00zuulzuul00000000000000# DO NOT USE THIS IN PRODUCTION ENVIRONMENTS! OIDCSSLValidateServer Off OIDCOAuthSSLValidateServer Off OIDCCookieSameSite On OIDCClaimPrefix "OIDC-" OIDCResponseType "id_token" OIDCScope "openid email profile" OIDCProviderMetadataURL "%OIDC_METADATA_URL%" OIDCClientID "%OIDC_CLIENT_ID%" OIDCClientSecret "%OIDC_CLIENT_SECRET%" OIDCPKCEMethod "S256" OIDCCryptoPassphrase "openstack" OIDCRedirectURI "https://%HOST_IP%/identity/v3/auth/OS-FEDERATION/identity_providers/%IDP_ID%/protocols/openid/websso" OIDCRedirectURI "https://%HOST_IP%/identity/v3/auth/OS-FEDERATION/websso/openid" AuthType "openid-connect" Require valid-user LogLevel debug AuthType "openid-connect" Require valid-user LogLevel debug AuthType "openid-connect" Require valid-user LogLevel debug AuthType oauth20 Require valid-user OIDCOAuthClientID "%OIDC_CLIENT_ID%" OIDCOAuthClientSecret "%OIDC_CLIENT_SECRET%" OIDCOAuthIntrospectionEndpoint "%OIDC_INTROSPECTION_URL%" # Horizon favors the referrer to the Keystone URL that is set. # https://github.com/openstack/horizon/blob/5e4ca1a9fdec04db08552e9e93fe372b8b8b45ae/openstack_auth/views.py#L192 Header always set Referrer-Policy "no-referrer" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4741144 keystone-26.0.0/devstack/lib/0000775000175000017500000000000000000000000016035 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/lib/federation.sh0000664000175000017500000002067200000000000020520 0ustar00zuulzuul00000000000000# Copyright 2016 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. DOMAIN_NAME=${DOMAIN_NAME:-federated_domain} PROJECT_NAME=${PROJECT_NAME:-federated_project} GROUP_NAME=${GROUP_NAME:-federated_users} IDP_ID=${IDP_ID:-samltest} IDP_USERNAME=${IDP_USERNAME:-morty} IDP_PASSWORD=${IDP_PASSWORD:-panic} IDP_REMOTE_ID=${IDP_REMOTE_ID:-https://samltest.id/saml/idp} IDP_ECP_URL=${IDP_ECP_URL:-https://samltest.id/idp/profile/SAML2/SOAP/ECP} IDP_METADATA_URL=${IDP_METADATA_URL:-https://samltest.id/saml/idp} KEYSTONE_IDP_METADATA_URL=${KEYSTONE_IDP_METADATA_URL:-"http://$HOST_IP/identity/v3/OS-FEDERATION/saml2/metadata"} MAPPING_REMOTE_TYPE=${MAPPING_REMOTE_TYPE:-uid} MAPPING_USER_NAME=${MAPPING_USER_NAME:-"{0}"} PROTOCOL_ID=${PROTOCOL_ID:-mapped} # File paths FEDERATION_FILES="$KEYSTONE_PLUGIN/files/federation" SHIBBOLETH_XML="/etc/shibboleth/shibboleth2.xml" ATTRIBUTE_MAP="/etc/shibboleth/attribute-map.xml" function configure_apache { if [[ "$WSGI_MODE" == "uwsgi" ]]; then local keystone_apache_conf=$(apache_site_config_for keystone-wsgi-public) echo "ProxyPass /Shibboleth.sso !" | sudo tee -a $keystone_apache_conf else local keystone_apache_conf=$(apache_site_config_for keystone) # Add WSGIScriptAlias directive to vhost configuration for port 5000 sudo sed -i -e " //r $KEYSTONE_PLUGIN/files/federation/shib_apache_alias.txt " $keystone_apache_conf fi # Append to the keystone.conf vhost file a directive for the Shibboleth module # and a directive for the identity provider cat $KEYSTONE_PLUGIN/files/federation/shib_apache_handler.txt | sudo tee -a $keystone_apache_conf sudo sed -i -e "s|%IDP_ID%|$IDP_ID|g;" $keystone_apache_conf restart_apache_server } function configure_shibboleth { # Copy a templated /etc/shibboleth/shibboleth2.xml file... sudo cp $FEDERATION_FILES/shibboleth2.xml $SHIBBOLETH_XML # ... and replace the %HOST_IP%, %IDP_REMOTE_ID%,and %IDP_METADATA_URL% placeholders sudo sed -i -e " s|%HOST_IP%|$HOST_IP|g; s|%IDP_METADATA_URL%|$IDP_METADATA_URL|g; s|%KEYSTONE_METADATA_URL%|$KEYSTONE_IDP_METADATA_URL|g; " $SHIBBOLETH_XML sudo cp "$FEDERATION_FILES/attribute-map.xml" $ATTRIBUTE_MAP restart_service shibd } function install_federation { if is_ubuntu; then install_package libapache2-mod-shib xmlsec1 # Create a new keypair for Shibboleth sudo shib-keygen -f # Enable the Shibboleth module for Apache sudo a2enmod shib elif is_fedora; then # NOTE(knikolla): For CentOS/RHEL, installing shibboleth is tricky # It requires adding a separate repo not officially supported # Add Shibboleth repository with curl curl https://download.opensuse.org/repositories/security://shibboleth/CentOS_7/security:shibboleth.repo \ | sudo tee /etc/yum.repos.d/shibboleth.repo >/dev/null # Install Shibboleth install_package shibboleth xmlsec1-openssl # Create a new keypair for Shibboleth sudo /etc/shibboleth/keygen.sh -f -o /etc/shibboleth # Start Shibboleth module start_service shibd elif is_suse; then # Install Shibboleth install_package shibboleth-sp # Install xmlsec dependency needed only for opensuse install_package libxmlsec1-openssl1 # Create a new keypair for Shibboleth sudo /etc/shibboleth/keygen.sh -f -o /etc/shibboleth # Start Shibboleth module start_service shibd else echo "Skipping installation of shibboleth for non ubuntu nor fedora nor suse host" fi pip_install pysaml2 # xmlsec1 needed for k2k install_package xmlsec1 } function upload_sp_metadata_to_samltest { local metadata_fname=${HOST_IP//./}_"$RANDOM"_sp local metadata_url=http://$HOST_IP/Shibboleth.sso/Metadata wget $metadata_url -O $FILES/$metadata_fname if [[ $? -ne 0 ]]; then echo "Not found: $metadata_url" return fi curl --form userfile=@"$FILES/${metadata_fname}" --form "submit=OK" "https://samltest.id/upload.php" } function configure_federation { # Specify the header that contains information about the identity provider iniset $KEYSTONE_CONF mapped remote_id_attribute "Shib-Identity-Provider" # Configure certificates and keys for Keystone as an IdP if is_service_enabled tls-proxy; then iniset $KEYSTONE_CONF saml certfile "$INT_CA_DIR/$DEVSTACK_CERT_NAME.crt" iniset $KEYSTONE_CONF saml keyfile "$INT_CA_DIR/private/$DEVSTACK_CERT_NAME.key" else openssl genrsa -out /etc/keystone/ca.key 4096 openssl req -new -x509 -days 1826 -key /etc/keystone/ca.key -out /etc/keystone/ca.crt \ -subj "/C=US/ST=Denial/L=Springfield/O=Dis/CN=www.example.com" iniset $KEYSTONE_CONF saml certfile "/etc/keystone/ca.crt" iniset $KEYSTONE_CONF saml keyfile "/etc/keystone/ca.key" fi iniset $KEYSTONE_CONF saml idp_entity_id "$KEYSTONE_AUTH_URI/v3/OS-FEDERATION/saml2/idp" iniset $KEYSTONE_CONF saml idp_sso_endpoint "$KEYSTONE_AUTH_URI/v3/OS-FEDERATION/saml2/sso" iniset $KEYSTONE_CONF saml idp_metadata_path "/etc/keystone/keystone_idp_metadata.xml" if [[ "$WSGI_MODE" == "uwsgi" ]]; then restart_service "devstack@keystone" fi keystone-manage saml_idp_metadata > /etc/keystone/keystone_idp_metadata.xml configure_shibboleth configure_apache # TODO(knikolla): We should not be relying on an external service. This # will be removed once we have an idp deployed during devstack install. if [[ "$IDP_ID" == "samltest" ]]; then upload_sp_metadata_to_samltest fi } function register_federation { local federated_domain=$(get_or_create_domain $DOMAIN_NAME) local federated_project=$(get_or_create_project $PROJECT_NAME $DOMAIN_NAME) local federated_users=$(get_or_create_group $GROUP_NAME $DOMAIN_NAME) local member_role=$(get_or_create_role Member) openstack role add --group $federated_users --domain $federated_domain $member_role openstack role add --group $federated_users --project $federated_project $member_role } function configure_tests_settings { # Enable the mapped auth method in /etc/keystone.conf iniset $KEYSTONE_CONF auth methods "external,password,token,mapped" # Here we set any settings that might be need by the fed_scenario set of tests iniset $TEMPEST_CONFIG identity-feature-enabled federation True # If not using samltest as an external IdP, tell tempest not to test that scenario if [[ "$IDP_ID" != "samltest" ]] ; then iniset $TEMPEST_CONFIG identity-feature-enabled external_idp false fi # Identity provider settings iniset $TEMPEST_CONFIG fed_scenario idp_id $IDP_ID iniset $TEMPEST_CONFIG fed_scenario idp_remote_ids $IDP_REMOTE_ID iniset $TEMPEST_CONFIG fed_scenario idp_username $IDP_USERNAME iniset $TEMPEST_CONFIG fed_scenario idp_password $IDP_PASSWORD iniset $TEMPEST_CONFIG fed_scenario idp_ecp_url $IDP_ECP_URL # Mapping rules settings iniset $TEMPEST_CONFIG fed_scenario mapping_remote_type $MAPPING_REMOTE_TYPE iniset $TEMPEST_CONFIG fed_scenario mapping_user_name $MAPPING_USER_NAME iniset $TEMPEST_CONFIG fed_scenario mapping_group_name $GROUP_NAME iniset $TEMPEST_CONFIG fed_scenario mapping_group_domain_name $DOMAIN_NAME iniset $TEMPEST_CONFIG fed_scenario enable_k2k_groups_mapping True # Protocol settings iniset $TEMPEST_CONFIG fed_scenario protocol_id $PROTOCOL_ID } function uninstall_federation { if is_ubuntu; then uninstall_package libapache2-mod-shib2 elif is_fedora; then uninstall_package shibboleth # Remove Shibboleth repository sudo rm /etc/yum.repos.d/shibboleth.repo elif is_suse; then unistall_package shibboleth-sp else echo "Skipping uninstallation of shibboleth for non ubuntu nor fedora nor suse host" fi } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/lib/oidc.sh0000664000175000017500000001457400000000000017322 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. DOMAIN_NAME=${DOMAIN_NAME:-federated_domain} PROJECT_NAME=${PROJECT_NAME:-federated_project} GROUP_NAME=${GROUP_NAME:-federated_users} OIDC_CLIENT_ID=${CLIENT_ID:-devstack} OIDC_CLIENT_SECRET=${OIDC_CLIENT_SECRET:-nomoresecret} OIDC_ISSUER=${OIDC_ISSUER:-"https://$HOST_IP:8443"} OIDC_ISSUER_BASE="${OIDC_ISSUER}/realms/master" OIDC_METADATA_URL=${OIDC_METADATA_URL:-"https://$HOST_IP:8443/realms/master/.well-known/openid-configuration"} OIDC_INTROSPECTION_URL=${OIDC_INTROSPECTION_URL:-"https://$HOST_IP:8443/realms/master/protocol/openid-connect/token/introspect"} IDP_ID=${IDP_ID:-sso} IDP_USERNAME=${IDP_USERNAME:-admin} IDP_PASSWORD=${IDP_PASSWORD:-nomoresecret} MAPPING_REMOTE_TYPE=${MAPPING_REMOTE_TYPE:-OIDC-preferred_username} MAPPING_USER_NAME=${MAPPING_USER_NAME:-"{0}"} PROTOCOL_ID=${PROTOCOL_ID:-openid} REDIRECT_URI="https://$HOST_IP/identity/v3/auth/OS-FEDERATION/identity_providers/$IDP_ID/protocols/openid/websso" OIDC_PLUGIN="$DEST/keystone/devstack" function install_federation { if is_ubuntu; then install_package libapache2-mod-auth-openidc sudo a2enmod headers install_package docker.io install_package docker-compose elif is_fedora; then install_package mod_auth_openidc install_package podman install_package podman-docker install_package docker-compose sudo systemctl start podman.socket else echo "Skipping installation. Only supported on Ubuntu and RHEL based." fi } function configure_federation { # Specify the header that contains information about the identity provider iniset $KEYSTONE_CONF openid remote_id_attribute "HTTP_OIDC_ISS" iniset $KEYSTONE_CONF auth methods "password,token,openid,application_credential" iniset $KEYSTONE_CONF federation trusted_dashboard "https://$HOST_IP/auth/websso/" cp $DEST/keystone/etc/sso_callback_template.html /etc/keystone/ if [[ "$WSGI_MODE" == "uwsgi" ]]; then restart_service "devstack@keystone" fi if [[ "$OIDC_ISSUER_BASE" == "https://$HOST_IP:8443/realms/master" ]]; then # Assuming we want to setup a local keycloak here. sed -i "s#DEVSTACK_DEST#${DATA_DIR}#" ${OIDC_PLUGIN}/tools/oidc/docker-compose.yaml sudo docker-compose --file ${OIDC_PLUGIN}/tools/oidc/docker-compose.yaml up -d # wait for the server to be up attempt_counter=0 max_attempts=100 until $(curl --output /dev/null --silent --fail $OIDC_METADATA_URL); do if [ ${attempt_counter} -eq ${max_attempts} ];then echo "Keycloak server failed to come up in time" exit 1 fi attempt_counter=$(($attempt_counter+1)) sleep 5 done KEYCLOAK_URL="https://$HOST_IP:8443" \ KEYCLOAK_USERNAME="admin" \ KEYCLOAK_PASSWORD="nomoresecret" \ HOST_IP="$HOST_IP" \ python3 $OIDC_PLUGIN/tools/oidc/setup_keycloak_client.py fi local keystone_apache_conf=$(apache_site_config_for keystone-wsgi-public) cat $OIDC_PLUGIN/files/oidc/apache_oidc.conf | sudo tee -a $keystone_apache_conf sudo sed -i -e " s|%OIDC_CLIENT_ID%|$OIDC_CLIENT_ID|g; s|%OIDC_CLIENT_SECRET%|$OIDC_CLIENT_SECRET|g; s|%OIDC_METADATA_URL%|$OIDC_METADATA_URL|g; s|%OIDC_INTROSPECTION_URL%|$OIDC_INTROSPECTION_URL|g; s|%HOST_IP%|$HOST_IP|g; s|%IDP_ID%|$IDP_ID|g; " $keystone_apache_conf restart_apache_server } function register_federation { local federated_domain=$(get_or_create_domain $DOMAIN_NAME) local federated_project=$(get_or_create_project $PROJECT_NAME $DOMAIN_NAME) local federated_users=$(get_or_create_group $GROUP_NAME $DOMAIN_NAME) local member_role=$(get_or_create_role Member) openstack role add --group $federated_users --domain $federated_domain $member_role openstack role add --group $federated_users --project $federated_project $member_role openstack identity provider create \ --remote-id $OIDC_ISSUER_BASE \ --domain $DOMAIN_NAME $IDP_ID } function configure_tests_settings { # Here we set any settings that might be need by the fed_scenario set of tests iniset $TEMPEST_CONFIG identity-feature-enabled federation True # we probably need an oidc version of this flag based on local oidc iniset $TEMPEST_CONFIG identity-feature-enabled external_idp True # Identity provider settings iniset $TEMPEST_CONFIG fed_scenario idp_id $IDP_ID iniset $TEMPEST_CONFIG fed_scenario idp_remote_ids $OIDC_ISSUER_BASE iniset $TEMPEST_CONFIG fed_scenario idp_username $IDP_USERNAME iniset $TEMPEST_CONFIG fed_scenario idp_password $IDP_PASSWORD iniset $TEMPEST_CONFIG fed_scenario idp_oidc_url $OIDC_ISSUER iniset $TEMPEST_CONFIG fed_scenario idp_client_id $OIDC_CLIENT_ID iniset $TEMPEST_CONFIG fed_scenario idp_client_secret $OIDC_CLIENT_SECRET # Mapping rules settings iniset $TEMPEST_CONFIG fed_scenario mapping_remote_type $MAPPING_REMOTE_TYPE iniset $TEMPEST_CONFIG fed_scenario mapping_user_name $MAPPING_USER_NAME iniset $TEMPEST_CONFIG fed_scenario mapping_group_name $GROUP_NAME iniset $TEMPEST_CONFIG fed_scenario mapping_group_domain_name $DOMAIN_NAME iniset $TEMPEST_CONFIG fed_scenario enable_k2k_groups_mapping False # Protocol settings iniset $TEMPEST_CONFIG fed_scenario protocol_id $PROTOCOL_ID } function uninstall_federation { # Ensure Keycloak is stopped and the containers are cleaned up sudo docker-compose --file ${OIDC_PLUGIN}/tools/oidc/docker-compose.yaml down if is_ubuntu; then sudo docker rmi $(sudo docker images -a -q) uninstall_package docker-compose elif is_fedora; then sudo podman rmi $(sudo podman images -a -q) uninstall_package podman else echo "Skipping uninstallation of OIDC federation for non ubuntu nor fedora nor suse host" fi } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/lib/scope.sh0000664000175000017500000000166700000000000017514 0ustar00zuulzuul00000000000000# Copyright 2019 SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. function configure_enforce_scope { iniset $KEYSTONE_CONF oslo_policy enforce_scope true iniset $KEYSTONE_CONF oslo_policy enforce_new_defaults true iniset $KEYSTONE_CONF oslo_policy policy_file policy.yaml sudo systemctl restart devstack@keystone } function configure_protection_tests { iniset $TEMPEST_CONFIG identity-feature-enabled enforce_scope true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/plugin.sh0000664000175000017500000000714600000000000017131 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # Copyright 2016 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. KEYSTONE_PLUGIN=$DEST/keystone/devstack if is_service_enabled keystone-saml2-federation; then source $KEYSTONE_PLUGIN/lib/federation.sh elif is_service_enabled keystone-oidc-federation; then source $KEYSTONE_PLUGIN/lib/oidc.sh fi source $KEYSTONE_PLUGIN/lib/scope.sh # For more information on Devstack plugins, including a more detailed # explanation on when the different steps are executed please see: # https://docs.openstack.org/devstack/latest/plugins.html if [[ "$1" == "stack" && "$2" == "install" ]]; then # This phase is executed after the projects have been installed echo "Keystone plugin - Install phase" if is_service_enabled keystone-saml2-federation; then echo "installing saml2 federation" install_federation elif is_service_enabled keystone-oidc-federation; then echo "installing oidc federation" install_federation fi elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then # This phase is executed after the projects have been configured and # before they are started echo "Keystone plugin - Post-config phase" if is_service_enabled keystone-saml2-federation; then echo "configuring saml2 federation" configure_federation elif is_service_enabled keystone-oidc-federation; then echo "configuring oidc federation" configure_federation fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # This phase is executed after the projects have been started echo "Keystone plugin - Extra phase" if is_service_enabled keystone-saml2-federation; then echo "registering saml2 federation" register_federation elif is_service_enabled keystone-oidc-federation; then echo "registering oidc federation" register_federation fi elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then # This phase is executed after Tempest was configured echo "Keystone plugin - Test-config phase" if is_service_enabled keystone-saml2-federation; then echo "config tests settings for saml" configure_tests_settings elif is_service_enabled keystone-oidc-federation; then echo "config tests settings for oidc" configure_tests_settings fi if [[ "$(trueorfalse False KEYSTONE_ENFORCE_SCOPE)" == "True" ]] ; then # devstack and tempest assume enforce_scope is false, so need to wait # until the final phase to turn it on configure_enforce_scope configure_protection_tests fi fi if [[ "$1" == "unstack" ]]; then # Called by unstack.sh and clean.sh # Undo what was performed during the "post-config" and "extra" phases : fi if [[ "$1" == "clean" ]]; then # Called by clean.sh after the "unstack" phase # Undo what was performed during the "install" phase if is_service_enabled keystone-saml2-federation; then echo "uninstalling saml" uninstall_federation elif is_service_enabled keystone-oidc-federation; then echo "uninstalling oidc" uninstall_federation fi fi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4181154 keystone-26.0.0/devstack/tools/0000775000175000017500000000000000000000000016427 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4741144 keystone-26.0.0/devstack/tools/oidc/0000775000175000017500000000000000000000000017345 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/tools/oidc/__init__.py0000664000175000017500000000000000000000000021444 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/tools/oidc/docker-compose.yaml0000664000175000017500000000223400000000000023144 0ustar00zuulzuul00000000000000version: "3" services: keycloak: image: quay.io/keycloak/keycloak:latest command: start-dev --log-level debug --log=console,file --https-certificate-file=/etc/certs/devstack-cert.pem --https-certificate-key-file=/etc/certs/devstack-cert.pem container_name: oidc_keycloak_1 environment: KEYCLOAK_ADMIN: admin KEYCLOAK_ADMIN_PASSWORD: nomoresecret KEYCLOAK_USER: admin KEYCLOAK_PASSWORD: nomoresecret KEYCLOAK_LOG_LEVEL: DEBUG DB_VENDOR: mariadb DB_DATABASE: keycloak DB_USER: keycloak DB_PASSWORD: "nomoresecret" DB_ADDR: "keycloak-database" DB_PORT: "3306" JAVA_OPTS: "-server -Xms128m -Xmx1024m -XX:MetaspaceSize=128M -XX:MaxMetaspaceSize=512m -Djava.net.preferIPv4Stack=true -Djboss.modules.system.pkgs=org.jboss.byteman -Djava.awt.headless=true" ports: - "8088:8080" # host:container - "8443:8443" volumes: - DEVSTACK_DEST:/etc/certs:rw keycloak-database: image: quay.io/metal3-io/mariadb:latest environment: MYSQL_ROOT_PASSWORD: nomoresecret MYSQL_DATABASE: keycloak MYSQL_USER: keycloak MYSQL_PASSWORD: nomoresecret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/devstack/tools/oidc/setup_keycloak_client.py0000664000175000017500000000356600000000000024311 0ustar00zuulzuul00000000000000import os import requests KEYCLOAK_USERNAME = os.environ.get('KEYCLOAK_USERNAME') KEYCLOAK_PASSWORD = os.environ.get('KEYCLOAK_PASSWORD') KEYCLOAK_URL = os.environ.get('KEYCLOAK_URL') HOST_IP = os.environ.get('HOST_IP', 'localhost') class KeycloakClient: def __init__(self): self.session = requests.session() @staticmethod def construct_url(realm, path): return f'{KEYCLOAK_URL}/admin/realms/{realm}/{path}' @staticmethod def token_endpoint(realm): return f'{KEYCLOAK_URL}/realms/{realm}/protocol/openid-connect/token' def _admin_auth(self, realm): params = { 'grant_type': 'password', 'client_id': 'admin-cli', 'username': KEYCLOAK_USERNAME, 'password': KEYCLOAK_PASSWORD, 'scope': 'openid', } r = requests.post(self.token_endpoint(realm), data=params).json() headers = { 'Authorization': ("Bearer %s" % r['access_token']), 'Content-Type': 'application/json', } self.session.headers.update(headers) return r def create_client(self, realm, client_id, client_secret, redirect_uris): self._admin_auth(realm) data = { 'clientId': client_id, 'secret': client_secret, 'redirectUris': redirect_uris, 'implicitFlowEnabled': True, 'directAccessGrantsEnabled': True, } return self.session.post( self.construct_url(realm, 'clients'), json=data ) def main(): c = KeycloakClient() redirect_uris = [ f'http://{HOST_IP}/identity/v3/auth/OS-FEDERATION/identity_providers/sso/protocols/openid/websso', f'http://{HOST_IP}/identity/v3/auth/OS-FEDERATION/websso/openid', ] c.create_client('master', 'devstack', 'nomoresecret', redirect_uris) if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4741144 keystone-26.0.0/doc/0000775000175000017500000000000000000000000014230 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/Makefile0000664000175000017500000001317100000000000015673 0ustar00zuulzuul00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build SOURCEDIR = source SPHINXAPIDOC = sphinx-apidoc # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " autodoc generate the autodoc templates" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* autodoc: $(SPHINXAPIDOC) -f -o $(SOURCEDIR) ../keystone html: autodoc $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/keystone.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/keystone.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/keystone" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/keystone" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/README.rst0000664000175000017500000000034600000000000015722 0ustar00zuulzuul00000000000000Building Docs ============= Developer documentation is generated using Sphinx. To build this documentation, run the following from the root of the repository:: $ tox -e docs The documentation will be built at ``doc/build/``. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4741144 keystone-26.0.0/doc/ext/0000775000175000017500000000000000000000000015030 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/ext/__init__.py0000664000175000017500000000000000000000000017127 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/requirements.txt0000664000175000017500000000103500000000000017513 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. openstackdocstheme>=2.2.1 # Apache-2.0 sphinx>=2.0.0,!=2.1.0 # BSD sphinxcontrib-apidoc>=0.2.0 # BSD sphinxcontrib-seqdiag>=0.8.4 # BSD sphinx-feature-classification>=0.3.2 # Apache-2.0 sphinxcontrib-blockdiag>=1.5.5 # BSD reno>=3.1.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 python-ldap>=3.0.0 # PSF ldappool>=2.0.0 # MPL ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4741144 keystone-26.0.0/doc/source/0000775000175000017500000000000000000000000015530 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4741144 keystone-26.0.0/doc/source/_static/0000775000175000017500000000000000000000000017156 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/_static/horizon-login-idp.png0000664000175000017500000001416200000000000023240 0ustar00zuulzuul00000000000000PNG  IHDR!w pHYs+$IDATx{TWo yT<idD*R]l*]-vխ_VEX(xz(9b[D| EȫT`5 $$$GfuN '~;swr'4L!LDmP P P P QL&v#fRY&x2EQQQdd@ |][nCjqӦMؐLj5O0555999{e2omA1jyU||P(d#dm6^zo;(.. ,ؼy3B}kD"w### 6[[[9ή]LLL-ٝ;v111AAA .TTT\xGGGK$f8/\wQ-[58͊Qm\*vZlcqqŋڜ:`0srrfϞ}3g466ܽ{7==͝;7""b4Omڴ)99… Ό;v$%%ܼyS,gggGDD0NM@'X[[[:;;SSS7lp9 #'O<==y<^bbb||<}d+WdjD"]&%%%QHnnnIMM~?666zzz[ggg6|ɓ'SSSd@{{ɓ'?Ӝkk뤤$DzⅡaff&J=zhDDĹsx<ޫW>쳬,gg焄%Gɓ'5?/;99QԠ_U"<}ƆFٳ'(( --wߥR:;;5<L&D^ϟOR׬YLhѢ꾾>A*++===Lf{{gtuu<9N@*fZ_yyy orcإ455}ֳf $HXUUU cǏ;99yxxhВt/H$R``kaa1k,>_VVF>gϞbEG˗/ ohmm i3ftttD"H$200Pl&˳JKKE"L&stt`344`>}갰0Mww̙3mmm+**X, OKK[pb{qh4HH fffQQQqqq]]] eݚw筹qƙ3gɽ{nݺuif";Q$=~k'2#2<00ݍ ۸qӧ/_ky昘k׮oݺʕ+<o%U (_e˖]paΝ$ޣR{쉊RS-L&SD" ڵkGP֯_s̙3ۗ+KK>lH;ƑgΜ7o޽{G?VR A^xqFڔ`TYYYYYYrB O8;>э%P# ==$88xժUڎeL@f3}<!jf~jf~j"2zcb2|Go7R3"e=Uv#^Oc4j$''gՏ񣻻ȑ#k bii~zCCsΧ~mζ\hXVL35c2ޑJ.\yk)dͫ] ݸq,ۣǓMMWcNNN/F߽{A6r`@ZZlKK N_nS|||[[[lllll?\655ݿL&WUU͙3g۶m E` B$ 3g΄XYYΝ;?D"ݹs'<<<33[OMoSSfSiv?IPƪn[KK-B"|%jF<lΜ9jZ*))i͚5hx^xBtܹsfYNAގV3FLLL455B Vl6o޼ʠ C*** 4,Nboպ`aƌ,ɓ'6K~0˪;9A;U #44 ^H$Y\\,e[n͝;700@ [YYUUUuuuD2KKKUwj] =z$ѥQ̞>}k.>.D_՝z(ommm}}}/]s%K$''KR777t`cc}||Ƌ/i4B144$Hǎ;~{||< nnnX[[&sU?~nn+[:;;ƚ`U^u8AkOwrL&J꼽/&A} 7dgff֮_J2"&7nܨ S?@5CT3?)\.J>>uuu6l}}}JJJPkT-?}>p8>>>!!!O>bӦM\.7**jvvv'N-ݻݶׯ?yGΟ?SVV{~fH$RkkL&֮]{EZ_drkk+J駟V^k o|ՖJJJl7F L&[reIIIoo/ EEE+W422jmm-++R VRR`׿Z, ikk̗Zd2944F/iiil2٘;;;.]J&%%%hy 766?̙;FFF~7GY|EGjj2.V`07T*JG0ѹBdggWWW߾}0k֬e˖)`ll (,,s玿?+Vp8-''38sL>j Kղ@ @{{{{{{ 8dh5tSSӘSNuuu-Z///oΝhe˖UUUݾ}$Ȉ:::L&@ H$LQ .]bj]j ˳$IVVpd@6b=z˗۶mCPԅ ?~@@ $$NǏ֢gjQK2ظի1tk.X,:::֘000R\\ 'p\6 Q-UKzzzbbb^^Z%W3a2նSNݸqC,ggg /211ҥKlvttDDD8::[^N }1dFFFJJ dqNKfe=bIII6mb0o߶}JթZ/߾};kpʕdLKKC %ڿYm@ PƓgϞy@ J .`322bm߾iR\.Pcs2U&٩جÇP.bkk}H4j%!!L&d2(qBjƊOT/^\YYYVV&:& X,o޼1|?GGhA^N Xd *h<>hÁox𡉉jqLL̑#GBCCW\ۻjժX|!}>|Z/EYd_|pǜi]I HSS###Z8*E`9-Θֹy:0r!!!ajYt`YGtttULtuur3t:]}2i 3 L&rB݈z0;,HDgIL.LFgOD"gO53 $[ z2ID" VL8C@&(&_r0bc1tnV4mD$Y{Yi\{) jD T3 яD"iLzP{2R{<1;Br''xcFpuZFX 6t,7㏑Q)(Ƀ_(%#B P͓E5c&m8hR P4x3'JVz g,AwE < (1lc(5PvJRT& hfȔ]L&$P%IENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/_static/horizon-login-sp.png0000664000175000017500000003771600000000000023120 0ustar00zuulzuul00000000000000PNG  IHDR}& pHYs+ IDATxyx3i-hIږe)ErTY( X@Q^^AJARV,mڲemtM23d6Idr73aЮ.@ p"A p"G*4NG EQbϊDTyyyii^p|uN!H=<Da4Mf͐РIf͚:N"86&Ht:]RYYВ\j@ 8D@ 8D@ 8D@ 8D@vz`ʝG+7e渺܆ tBC Ls>rAevu-Pa(IOC_*B }FTo1K]]4HDSuK 2qaN-IDϽiMt:'B Jl,# "A p"A p"A p(a``0T: ju`` ME N"A p0@,lɵ]tZ lOE9xA cn8}gsDpjob])gsDp3s~厄k 8 S-~zW^93mXDp$[Y P/a@ 8D@ 8DC6qu Ay\]4HD# vu Apu !ASVf<H7su ! UJY9MWW 7:. K9ePJW nC ܆VD@ 8D@ 8D Pd^4}!C]UPdn~#Bu۰fu: zB BLO5}$K "A peTBHnNBw`26#-%K$q1SBP* *8r),UgueFcz ˁo+۠Vj5l8^?!k#2RSZSmU!-Rdo\ك~V9wױE(1cƽZ痢 !~G4Di)~lw4[ 1c_o٬'rfOUED\ !ǏaLۂahΜ:ͯAPED?h-c`]g}OiS[:pXa5oYsjR/6IOMٸ+ŌB>*nWEڙ lGo퍛9g׶-mԙzFX`fq9٫-Y! r˭m)%rWͲsϞ:)BS .7kA}`ϮGͣDlXl~=;N%rYf[>)4|[ȹzS4{}ɊhmHXϰVm˦&j5Vf9sߛ!W.[bK+-ٰvՠj5-6jWV u46IHK˖X;E3&O1 7]m˺RdO|e-iHHK>ibzje)--Y?{㑆@b%Y^䪳XSEsߛaK.B=ajiV.[bmsۖM6NhV-[bKZfؿ{g$ `5_? UpM_uaLd-l|7YZf3j-{fzע5@ ŗ3+m{wNN-,,+,jmB?zĖ2+W߷Ej4fRqMK06;?zj45MCVhx;M|͚4K5m X8b:'{: \1`pld߳NQd kVO42D{08VPB mojْvsA#+>2MGŒmZoȸ!J س?$Ԑ&M1NhN''K8l܃a֎BdK8l6k|7?2ӳw[l|Bj5l }f|$} ei)){frŜ w*}~4{S؂!-&t{L)Gm۲D?8Ǝ\=xXZͱ,+>]´_?i3?i+@MAQq?ٕrb~檳g~4 ! rcSSmٔ:fx~)f)8_?vrX0iʱ֬.+Ӛ-:'=p͈ƌ#g_hyׯ}tq~l 6? c;f|{h3n{v=RTXy{&Hm zvskw:Y EV;ʜ _(r}Bݷ3L\{Sߝ%T!>_?znUyy'}XUYiEK^^5j \Qq88~c֘qE:cƍUg?0!U/MKXzJ˟ D#Ǡ6 xrMOMZQT.Z–e>.}Y-hպMF?l =VV4SG L`2TdfJ%{~5 ]5=W.-Wo#ɱߏ?l9كoL-c( ]hO-;T!aH'tïEΝՖgX=lf˹,Ǐ^-O=a.Sf+kd*ZHߚ=8(vذt[8"A 3:JLW !mpE#K@&<3Krs/)~v+xe3}vsuRqQaAnNY̅ul V(V?$v&wp' 2ccg8C K.ճlAi!nKgm9ޫwǏ>~zNrm[6r[Ȯ>3?Ć`;"B3>i'2ٙ'~m>pU\ `uc;B%lGwt+XMf :dP=5@21 "K8^n; Q)m6O U3΃sBIw_!FGDGD49Dѯ۾Kvh8[.qD/L/ڏKOKTD=eZk/h~ٌG oMuKp=g8<۪BޛOjԁs,C؇(YQa3槒XM%h5~(~' /O}w)[֕Uo?ûQOdܜlК |D [iygOZ=/bzrB'"l( jᐈm'[/쩓Adq&M`sCDaA=b&8Փ@bh4ca\D\TX`u9"jD"g?{tl N:t4}dttw0@&8g'mXq8[LƳگz̿ZyVf9'O㾤Vތhe-bSx0Y-<"CHK3 GD;EN9rْ9ͰqQML,p? FaWr` Z`59%e_?5&Zسժ/r'\Ry"x4?rfc:Dzs+mX kG^zj3R -j+o~3v֬޿{gACF}U)}8lфIS̜nvhQagu l9pX jvrۖMw?8VTB ,%H,W֔';O4.zN''/xղ% )?=czFU!8 {2@lG5EElv,i)|4YoYQa_x$gX{س1; 2#-ƻMQ4| )8k6Lvc :B Z 0iN4͈j*4<-j5Vވ܂CTΞgYZeW`K\gN^O+45G3LrԔ!mC}͙p¸f?H,ػ+,"Җ̌7hv NF*w^^dٰf5b <ê4gB[M4Et,+l=juL\1BgtLM/a?j2Rya4P*?]F&+HlJ|sMpFSoOD+LG(Yj-,c.]^Tfr ,\FsFgۛg~`ώx7,U|lE{ޖ.qwvsF3:f¤)i)~ݣ h݀+nAKngІ-φcsav[5r7^fC]ʖ7YGhxʵV#nG|}R7Tr͆c_Ķoe6R(wM[iR(DA50!??_RqM:'Lr$Fc>ӝ8f{3:v‚'=̿Ѕmgtȷ?{@Id5} {CZe<;2fO:Qd劶!ڳ:7i*x8ajBbW~zZ {z澾 =E>ju`` M!xYZj4l?]=j5M8RYN嬨[4"G@QZ>vrƅkڼpC 8D@ 88NGDZ:\=D@ 8D@ 8D@ 8D@ 8D@ 8D@ 8D@ 8D@ 8D#-: :Y:HgϞ}(W #pM >|/1bDϞ=3vح[zi'MԧO>}L0֭JJJԬ>ŋGEE]reqqq}} x0I~>|WΝ۵kʄ+V/^ouBBBRRR>cB3dEo~ŊM4INN={ZޱcEQNZ@h\C\~͛7g͚շo_oo͛˱gΜaٸqcUUٳ;w޽{O>ҥK5yRt٭Zٳg^^իW` ;&JkSO=E9qٳg=<HLLܾ}{HHkGV4mBz:ݻccc8`|Deff^b^t:ݪUN>]QQq ޾}{ʔ)d'ׯ_߸q޽ѱc1cƘ]`;7C{D@ 8D@ 8D@ 8D@XZ:\z blx4Xa@ 8D@ 8D@ 8D@ 8N #FDEE]EEEmذ;G bkҥK7nhٲebb[o%>ԩSf͚e|M?/Vr|111QTN2%///;;}QQQ{%u֭[7bĈ;v?LN4)&&fƌeeeѣGGGG녅K.EEE}wS6e5;رccbb^|'Nsٺu3<3ydMJ35\BȦM =vؔbC,sAm۶jpO>ٷo_\(2?@y7m>;~̙3;tp̙Cd>>Z|2U!Ǐ?uԇ~yfg >s !dĈ#GZ'q>ģGB}]#!!!6ۛե.))!L<8AaaJ c4zA||ŋ+**!Z2{L}}}EʎxWv믿?7_B5jDiҤHPg .$t~%&&B!l؂u=-6mJ)..6}p֭NZ|yLL̘1cX:S#K 2uɓ'gff~ׯk1v}ʑYYY7o4T*`[nMoVZZz!vwwwnݺe׻woLvQFUVV N)T߾}e2ϟsڵrBɓ'oܱ4KtIDATQ^^0g* G8p`nnnhhhvR)o>F{n.8#;]֭{I&ovVVK/ԫW/B0RtԨQgΜtvҥKaÆxKA#Tf;7n :4###>>M6/BHHȴi~ 盾\| ҷo~M4O>g}n׋hݺ/^U\bF a C~~>nTl.];v|Zej:00iK\Mp.#[5z10 Pa܉h_ Rd0z\{X&e=Dpahvw^{Xg!;AѮ䱿+G1J+TQÆChqȶ-08\+(/s>O ~p`sU ͛S˖-Otx{*a۹v횥zBSf͚5s̥K׮;??,XލS0wܩzn߾MQ\.Q%iӦM/4qQFuܙyyY=e[jUNNS:i^ DǏL8_'''O6mܹwJ?~Bq֭[[ly޽~֭QQQ /r! uƍ+ٳgRs@ۺu9sLtR7xǏ>|X.3cFFY7ofl_׾}LpZmXXؘ1c ŋ,..nݺ/'8SRBN֭[.]R`Y ?HzW^mU!Ç.]JOO2eD"YrD"ټy#-Z}vBD"sNPPԩSis`㏣>L)++ۿĉ-Z흐`iŋQQQOr}~iܸ1~/X~_? /pBJuVK35H$Æ [nݷ~{ԩb6 7]ϑl#w/@k.88wpppv7/Wy;MӣG޻wGmzKT*UΝe2Ylllff^'0 ӻwoa B\._xq@@{.]ݻgK.) gffڲ _&>|PN._R===xXK۷o/ _QQau3gNǎsrr.]ca]fff̌Rk5b:{H>֭[{xxPղeK/]4m4#"ˍNr6CR`  }]xQ[*̙3)))/^d,++94QNZk4R[&IR[fڸq說?<++znڴiFu:oƍy /ZFc'ѩCR!B233i 5>>aӉ/LII?~f͚Νի_|٨5 Mk/RVPԢ~ooo1 N}}y=x ۻؖn5WWWn/ /@-8oȪ_vܹyo>''իΝ[v- jZooo//‹/ ?ڷoϦ!!D"t?~[JKKg% -P[өS'Z}UVyC arss^ZUUuJ%̃V\ZYYYYYlK/_6^ ]v͛gСC؍OFj׮N:v;vܿE/-֭۹sU*s=z۷?Sf={_~t%!!o߾}Y|Wll,{Lӓ-饗^:iԨK/e˖;_;v }||^~۷; 7n\BBOt-&LGZN2Qw UQ=C@LYASŬ>Yi?@8 Q~.WNiw %.lz BD@d( !P o@d P1\bcٔ}pEET7uD7qiBS!RM1Rx$)Khm26dRnWx{H4ES [}b6E%AJ4E%8ZN70n^+H)))Mӎ8aW REBQDBSǝ*3 ˓_Xmwm{{н}%L}d/ݣ0I(a7;{t 3X<(ڝK#ۏM͹.[yFG!F.d}'Τ\m޾xۡ ~:TN}Oeڮڼm;TYUxGem؛/;|Lx;!Z_O;ynqiDp웅Rm/f-]z=ү~anRɾә9m[4?708tBfk&~sn-ؼᄐ%']s?2?J\ƥ.0-ARc;m,`R ͯJwh.h˖nNmZ|$=,-ڮRX]p~ =IH¹FJϜ[wjYܛײ'()Jq|Ooy]f_+1UJ[oX, }zQ.sj>]ڳTf6 WCCk>kҡ'6[Z^ñ3){~@H$ſ;ťٓ8nPo=>пzuyB`s>#HFkwXz+ya`6[r٥ !Y7ڶ$Ww/[ӟՇeO/Y#%A\vz+-,XLM򯧏p~a}ċYJKamjyޤ 4tĸ̼[֤D"釜I |0l_{Ƃ2oAജU:! P_غEmxٷ *k~<ѡmRqrwl"2'L[`wk!6P/ʻ.u7kHKEXLYnB2ro1+NorsPKBaȳ"fAK_|mһ#>S EY.sغE4o[PSf_pnu l[1Ζb->O-Z=E#נ~F y}}x5o,V72w7D[.ވlML>.-aAJ)MB4U?Yk|IIYY/O(BT"3 !D ٬;56{w_OTń8qb)x߯+lG.5I4[QiYcJdzK+MA!B_UYTn9I$[)i.! ֠s ی?]4j%b /"!gT7i;4,ο>UVYU{+34pEKc: oVܰKQaJpI(Zd= \M}[^˯[w] P[.EiA!Vp}_.Z㳖OjJ|_j&3!AWn۳s0!y*t{~cK~X_xX[xOJ+)=)><@SV[rZ]V{Z[ڕYJ }Lo~?%l<[YeD"Q%^-ńL#s`KB|_nC-27KWI9W`ƥ6:zW3PݱD. С_fͼe, '5_ɶ=ce e=D4medSoO?x⡦<#lަ|boߎD'_ZpO^ۣᣔOS=tU&"Fe(}*+mL|?7.c5>jUPݱR/` ~͞R][O#R]&'2eĀPvJm\{Ƃ}aHu[6scao0!??_Rֱ’nv,Bܸ{Wc{GZ<֮oV.)WW >!ZHӴn]۟K i<յh]v!ǃ*.@ 8DC񉏯'5K H||=gmpy7gLd*Bȅ?8xbܩO}PZֿلO4o|F'OM1;NmRF݃.'w:p*տIwF X܂t|阜[Ev';Yv.xITU+z'.\' gڊџ7ǯ;)W=&"[Z:!3HGN|E/ l29q$A#, E'2 i"zh,O|,?K_:.ٮAyWfPtc6S剏gq:k\CSEgPt#剏'>=";aS/٠o.ߜ{٠+4cLF}ԸA*9q(Y`D<nvҊ^=Ð:lja2`U)Z"z[W@ph7Q@=@|}蓯}%wp"A p"A p"qd *$ađ!ȶd4U8lVqeA9 `'S1 5CRf9bcwiZSРUgQJ$4 Kجqvk;G8D@ 8D@ 8D@ 8D@ 8D@ 8D@ 8D@HNV*p6;l2p"A p"A p"A pvkkQW޸WZĴj*oSm[7r`D]+}JR///W#a%sf-iyKX+e"62!@I(ru-VP榧$+8Y"pJ*҆(J + 8oh("S 8D@Ŏvp^ !oХm[aJXK ;/Bz|>7W mHsuiB @ Hh­\WQm |dj=Oѳ!l&".ʿJ]bZMjpsOM̺7W*s/7nd΃Mf6Q㇇<ެ­ZϘNs\ۥk%W j+ sz[jɜPcEUJFC聶}CLMVRi|IV mz's2Ipsϊj}mu3reUz/ oF>D?o1kF{ú4Bv8ڮmy6S uaMf;e_rP+h+qKڃwV@[†0O0 c0U*U.}qDžIV+eՆ7t]Ɖju`` MӖAZo۾GφHi*FNLCC #EB y.6pP@ 8DxZ:~bӁ "T\]E t:Ӌd@~ADa)`__ library which allows flexible cache back ends. The majority of the caching configuration options are set in the ``[cache]`` section of the ``/etc/keystone/keystone.conf`` file. The ``enabled`` option of the ``[cache]`` section must be set to ``True`` in order for any subsystem to cache responses. Each section that has the capability to be cached will have a ``caching`` boolean value that toggles caching behavior of that particular subsystem. So to enable only the token back end caching, set the values as follows: .. code-block:: ini [cache] enabled=true [catalog] caching=false [domain_config] caching=false [federation] caching=false [resource] caching=false [revoke] caching=false [role] caching=false [token] caching=true .. note:: Each subsystem is configured to cache by default. However, the global toggle for caching defaults to ``False``. A subsystem is only able to cache responses if the global toggle is enabled. Current functional back ends are: ``dogpile.cache.null`` A "null" backend that effectively disables all cache operations.(Default) ``dogpile.cache.memcached`` Memcached back end using the standard ``python-memcached`` library. ``dogpile.cache.pylibmc`` Memcached back end using the ``pylibmc`` library. ``dogpile.cache.bmemcached`` Memcached using the ``python-binary-memcached`` library. ``dogpile.cache.redis`` Redis back end. ``dogpile.cache.dbm`` Local DBM file back end. ``dogpile.cache.memory`` In-memory cache, not suitable for use outside of testing as it does not cleanup its internal cache on cache expiration and does not share cache between processes. This means that caching and cache invalidation will not be consistent or reliable. ``dogpile.cache.memory_pickle`` In-memory cache, but serializes objects with pickle lib. It's not suitable for use outside of testing. The reason is the same with ``dogpile.cache.memory`` ``oslo_cache.mongo`` MongoDB as caching back end. ``oslo_cache.memcache_pool`` Memcached backend that does connection pooling. ``oslo_cache.etcd3gw`` Uses etcd 3.x for storage. ``oslo_cache.dict`` A DictCacheBackend based on dictionary, not suitable for use outside of testing as it does not share cache between processes.This means that caching and cache invalidation will not be consistent or reliable. Caching for tokens and tokens validation ---------------------------------------- The token subsystem is OpenStack Identity's most heavily used API. As a result, all types of tokens benefit from caching, including Fernet tokens. Although Fernet tokens do not need to be persisted, they should still be cached for optimal token validation performance. The token system has a separate ``cache_time`` configuration option, that can be set to a value above or below the global ``expiration_time`` default, allowing for different caching behavior from the other systems in OpenStack Identity. This option is set in the ``[token]`` section of the configuration file. The token revocation list cache time is handled by the configuration option ``revocation_cache_time`` in the ``[token]`` section. The revocation list is refreshed whenever a token is revoked. It typically sees significantly more requests than specific token retrievals or token validation calls. Here is a list of actions that are affected by the cached time: * getting a new token * revoking tokens * validating tokens * checking v3 tokens The delete token API calls invalidate the cache for the tokens being acted upon, as well as invalidating the cache for the revoked token list and the validate/check token calls. Token caching is configurable independently of the ``revocation_list`` caching. Lifted expiration checks from the token drivers to the token manager. This ensures that cached tokens will still raise a ``TokenNotFound`` flag when expired. For cache consistency, all token IDs are transformed into the short token hash at the provider and token driver level. Some methods have access to the full ID (PKI Tokens), and some methods do not. Cache invalidation is inconsistent without token ID normalization. Caching for non-token resources ------------------------------- Various other keystone components have a separate ``cache_time`` configuration option, that can be set to a value above or below the global ``expiration_time`` default, allowing for different caching behavior from the other systems in Identity service. This option can be set in various sections (for example, ``[role]`` and ``[resource]``) of the configuration file. The create, update, and delete actions for domains, projects and roles will perform proper invalidations of the cached methods listed above. For more information about the different back ends (and configuration options), see: - `dogpile.cache.memory `__ - `dogpile.cache.memcached `__ .. note:: The memory back end is not suitable for use in a production environment. - `dogpile.cache.redis `__ - `dogpile.cache.dbm `__ .. _cache_invalidation: Cache invalidation ------------------ A common concern with caching is relaying inaccurate information after updating or deleting a resource. Most subsystems within OpenStack Identity invalidate specific cache entries once they have changed. In cases where a specific cache entry cannot be invalidated from the cache, the cache region will be invalidated instead. This invalidates all entries within the cache to prevent returning stale or misleading data. A subsequent request for the resource will be fully processed and cached. .. WARNING:: Be aware that if a read-only back end is in use for a particular subsystem, the cache will not immediately reflect changes performed through the back end. Any given change may take up to the ``cache_time`` (if set in the subsystem section of the configuration) or the global ``expiration_time`` (set in the ``[cache]`` section of the configuration) before it is reflected. If this type of delay is an issue, we recommend disabling caching for that particular subsystem. Configure the Memcached back end example ---------------------------------------- The following example shows how to configure the memcached back end: .. code-block:: ini [cache] enabled = true backend = dogpile.cache.memcached backend_argument = url:127.0.0.1:11211 You need to specify the URL to reach the ``memcached`` instance with the ``backend_argument`` parameter. Verbose cache logging --------------------- We do not recommend using verbose cache logging by default in production systems since it's extremely noisy. However, you may need to debug cache issues. One way to see how keystone is interacting with a cache backend is to enhance logging. The following configuration will aggregate oslo and dogpile logs into keystone's log file with increased verbosity: .. code-block:: ini [DEFAULT] default_log_levels = oslo.cache=DEBUG,dogpile.core.dogpile=DEBUG [cache] debug_cache_backend = True These logs will include cache hits and misses, making it easier to diagnose cache configuration and connectivity issues. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/case-insensitive.rst0000664000175000017500000000642500000000000022632 0ustar00zuulzuul00000000000000============================== Case-Insensitivity in keystone ============================== Keystone currently handles the case-sensitivity for the naming of each resource a bit differently, depending on the resource itself, and the backend used. For example, depending on whether a user is backed by local SQL or LDAP, the case-sensitivity can be different. When it is case-insensitive, the casing will be preserved. For instance, a project with the name "myProject" will not end up changing to either all lower or upper case. Resources in keystone ===================== Below are examples of case-insensitivity in keystone for users, projects, and roles. Users ----- If a user with the name "MyUser" already exists, then the following call which creates a new user by the name of "myuser" will return a ``409 Conflict``: .. code-block:: console POST /v3/users .. code-block:: json { "user": { "name": "myuser" } } Projects -------- If a project with the name "Foobar" already exists, then the following call which creates a new project by the name of "foobar" will return a ``409 Conflict``: .. code-block:: console POST /v3/projects .. code-block:: json { "project": { "name": "foobar" } } Project Tags ^^^^^^^^^^^^ While project names are case-insensitive, project tags are case-sensitive. A tag with the value of ``mytag`` is different than ``MyTag``, and both values can be stored in the same project. Roles ----- Role names are case-insensitive. for example, when keystone bootstraps default roles, it creates "admin", "member", and "reader". If another role, "Member" (note the upper case 'M') is created, keystone will return a ``409 Conflict`` since it considers the name "Member" equivalent to "member". Note that case is preserved in this event. .. note:: As of the Rocky release, keystone will create three default roles when `keystone-manage bootstrap` is run: (``admin``, ``member``, ``reader``). For existing deployments, this can cause issues if an existing role matches one of these roles. Even if the casing is not an exact match (``member`` vs ``Member``), it will report an error since roles are considered case-insensitive. Backends ======== For each of these examples, we will refer to an existing project with the name "mYpRoJeCt" and user with the name "mYuSeR". The examples here are exaggerated to help display the case handling for each backend. MySQL & SQLite -------------- By default, MySQL/SQLite are case-insensitive but case-preserving for `varchar`. This means that setting a project name of "mYpRoJeCt" will cause attempting to create a new project named "myproject" to fail with keystone returning a ``409 Conflict``. However, the original value of "mYpRoJeCt" will still be returned since case is preserved. Users will be treated the same, if another user is added with the name "myuser", keystone will respond with ``409 Conflict`` since another user with the (same) name exists ("mYuSeR"). PostgreSQL ---------- PostgreSQL is case-sensitive by default, so if a project by the name of "myproject" is created with the existing "mYpRoJeCt", it will be created successfully. LDAP ---- By default, LDAP DNs are case-insensitive, so the example with users under MySQL will apply here as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/cli-manage-projects-users-and-roles.rst0000664000175000017500000003270000000000000026221 0ustar00zuulzuul00000000000000================================= Manage projects, users, and roles ================================= As an administrator, you manage projects, users, and roles. Projects are organizational units in the cloud to which you can assign users. Projects are also known as *tenants* or *accounts*. Users can be members of one or more projects. Roles define which actions users can perform. You assign roles to user-project pairs. You can define actions for OpenStack service roles in the ``/etc/PROJECT/policy.yaml`` files. For example, define actions for Compute service roles in the ``/etc/nova/policy.yaml`` file. You can manage projects, users, and roles independently from each other. During cloud set up, the operator defines at least one project, user, and role. You can add, update, and delete projects and users, assign users to one or more projects, and change or remove the assignment. To enable or temporarily disable a project or user, update that project or user. You can also change quotas at the project level. Before you can delete a user account, you must remove the user account from its primary project. Before you can run client commands, you need to have a cloud config file or you can download and source an OpenStack RC file. See the :python-openstackclient-doc:`Configuration ` documentation from the python-openstackclient project for more details. Projects ~~~~~~~~ A project is a group of zero or more users. In Compute, a project owns virtual machines. In Object Storage, a project owns containers. Users can be associated with more than one project. Each project and user pairing can have a role associated with it. List projects ------------- List all projects with their ID, name, and whether they are enabled or disabled: .. code-block:: console $ openstack project list +----------------------------------+--------------------+ | ID | Name | +----------------------------------+--------------------+ | f7ac731cc11f40efbc03a9f9e1d1d21f | admin | | c150ab41f0d9443f8874e32e725a4cc8 | alt_demo | | a9debfe41a6d4d09a677da737b907d5e | demo | | 9208739195a34c628c58c95d157917d7 | invisible_to_admin | | 3943a53dc92a49b2827fae94363851e1 | service | | 80cab5e1f02045abad92a2864cfd76cb | test_project | +----------------------------------+--------------------+ Create a project ---------------- Create a project named ``new-project``: .. code-block:: console $ openstack project create --description 'my new project' new-project \ --domain default +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | my new project | | domain_id | e601210181f54843b51b3edff41d4980 | | enabled | True | | id | 1a4a0618b306462c9830f876b0bd6af2 | | is_domain | False | | name | new-project | | parent_id | e601210181f54843b51b3edff41d4980 | | tags | [] | +-------------+----------------------------------+ - Creating a project without using a domain scoped token, i.e. using a project scoped token or a system scoped token, and also without specifying a domain or domain_id, the project will automatically be created on the default domain. Update a project ---------------- Specify the project ID to update a project. You can update the name, description, and enabled status of a project. - To temporarily disable a project: .. code-block:: console $ openstack project set PROJECT_ID --disable - To enable a disabled project: .. code-block:: console $ openstack project set PROJECT_ID --enable - To update the name of a project: .. code-block:: console $ openstack project set PROJECT_ID --name project-new - To verify your changes, show information for the updated project: .. code-block:: console $ openstack project show PROJECT_ID +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | my new project | | domain_id | e601210181f54843b51b3edff41d4980 | | enabled | True | | id | 0b0b995694234521bf93c792ed44247f | | is_domain | False | | name | new-project | | parent_id | e601210181f54843b51b3edff41d4980 | | tags | [] | +-------------+----------------------------------+ Delete a project ---------------- Specify the project ID to delete a project: .. code-block:: console $ openstack project delete PROJECT_ID Users ~~~~~ List users ---------- List all users: .. code-block:: console $ openstack user list +----------------------------------+----------+ | ID | Name | +----------------------------------+----------+ | 352b37f5c89144d4ad0534139266d51f | admin | | 86c0de739bcb4802b8dc786921355813 | demo | | 32ec34aae8ea432e8af560a1cec0e881 | glance | | 7047fcb7908e420cb36e13bbd72c972c | nova | +----------------------------------+----------+ Create a user ------------- To create a user, you must specify a name. Optionally, you can specify a project ID, password, and email address. It is recommended that you include the project ID and password because the user cannot log in to the dashboard without this information. Create the ``new-user`` user: .. code-block:: console $ openstack user create --project new-project --password PASSWORD new-user +------------+----------------------------------+ | Field | Value | +------------+----------------------------------+ | email | None | | enabled | True | | id | 6322872d9c7e445dbbb49c1f9ca28adc | | name | new-user | | project_id | 0b0b995694234521bf93c792ed44247f | | username | new-user | +------------+----------------------------------+ Update a user ------------- You can update the name, email address, and enabled status for a user. - To temporarily disable a user account: .. code-block:: console $ openstack user set USER_NAME --disable If you disable a user account, the user cannot log in to the dashboard. However, data for the user account is maintained, so you can enable the user at any time. - To enable a disabled user account: .. code-block:: console $ openstack user set USER_NAME --enable - To change the name and description for a user account: .. code-block:: console $ openstack user set USER_NAME --name user-new --email new-user@example.com User has been updated. Delete a user ------------- Delete a specified user account: .. code-block:: console $ openstack user delete USER_NAME Roles and role assignments ~~~~~~~~~~~~~~~~~~~~~~~~~~ List available roles -------------------- List the available roles: .. code-block:: console $ openstack role list +----------------------------------+---------------+ | ID | Name | +----------------------------------+---------------+ | 71ccc37d41c8491c975ae72676db687f | member | | 149f50a1fe684bfa88dae76a48d26ef7 | ResellerAdmin | | 9fe2ff9ee4384b1894a90878d3e92bab | reader | | 6ecf391421604da985db2f141e46a7c8 | admin | | deb4fffd123c4d02a907c2c74559dccf | anotherrole | +----------------------------------+---------------+ Create a role ------------- Users can be members of multiple projects. To assign users to multiple projects, define a role and assign that role to a user-project pair. Create the ``new-role`` role: .. code-block:: console $ openstack role create new-role +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | None | | domain_id | None | | id | a34425c884c74c8881496dc2c2e84ffc | | name | new-role | +-------------+----------------------------------+ .. note:: If you are using identity v3, you may need to use the ``--domain`` option with a specific domain name. Assign a role ------------- To assign a user to a project, you must assign the role to a user-project pair. #. Assign a role to a user-project pair: .. code-block:: console $ openstack role add --user USER_NAME --project PROJECT_NAME ROLE_NAME For example, assign the ``new-role`` role to the ``demo`` user and ``test-project`` project pair: .. code-block:: console $ openstack role add --user demo --project test-project new-role #. Verify the role assignment: .. code-block:: console $ openstack role assignment list --user USER_NAME \ --project PROJECT_NAME --names +-------------+--------------+-------+--------------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +-------------+--------------+-------+--------------+--------+--------+-----------+ | new-role | demo@Default | | demo@Default | | | False | | member | demo@Default | | demo@Default | | | False | | anotherrole | demo@Default | | demo@Default | | | False | +-------------+--------------+-------+--------------+--------+--------+-----------+ .. note:: Before the Newton release, users would run the :command:`openstack role list --user USER_NAME --project TENANT_ID` command to verify the role assignment. View role details ----------------- View details for a specified role: .. code-block:: console $ openstack role show ROLE_NAME +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | None | | domain_id | None | | id | a34425c884c74c8881496dc2c2e84ffc | | name | new-role | +-------------+----------------------------------+ Remove a role ------------- Remove a role from a user-project pair: #. Run the :command:`openstack role remove` command: .. code-block:: console $ openstack role remove --user USER_NAME --project PROJECT_NAME ROLE_NAME #. Verify the role removal: .. code-block:: console $ openstack role assignment list --user USER_NAME --project PROJECT_NAME --names If the role was removed, the command output omits the removed role. Creating implied roles ---------------------- It is possible to build role hierarchies by having roles imply other roles. These are called implied roles, or role inference rules. To illustrate the capability, let's have the ``admin`` role imply the ``member`` role. In this example, if a user was assigned the prior role, which in this case is the ``admin`` role, they would also get the ``member`` role that it implies. .. code-block:: console $ openstack implied role create admin --implied-role member +------------+----------------------------------+ | Field | Value | +------------+----------------------------------+ | implies | 71ccc37d41c8491c975ae72676db687f | | prior_role | 29c09e68e6f741afa952a837e29c700b | +------------+----------------------------------+ .. note:: Role implications only go one way, from a "prior" role to an "implied" role. Therefore assigning a user the ``member`` will not grant them the ``admin`` role. This makes it easy to break up large roles into smaller pieces, allowing for fine grained permissions, while still having an easy way to assign all the pieces as if they were a single one. For example, you can have a ``member`` role imply ``compute_member``, ``network_member``, and ``volume_member``, and then assign either the full-blown ``member`` role to users or any one of the subsets. Listing implied roles --------------------- To list implied roles: .. code-block:: console $ openstack implied role list +----------------------------------+-----------------+----------------------------------+-------------------+ | Prior Role ID | Prior Role Name | Implied Role ID | Implied Role Name | +----------------------------------+-----------------+----------------------------------+-------------------+ | 29c09e68e6f741afa952a837e29c700b | admin | 71ccc37d41c8491c975ae72676db687f | member | +----------------------------------+-----------------+----------------------------------+-------------------+ Deleting implied roles ---------------------- To delete a role inference rule: .. code-block:: console $ openstack implied role delete admin --implied-role member .. note:: Deleting an implied role removes the role inference rule. It does not delete the prior or implied role. Therefore if a user was assigned the prior role, they will no longer have the roles that it implied. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/configuration.rst0000664000175000017500000000125400000000000022223 0ustar00zuulzuul00000000000000====================== Keystone Configuration ====================== Information and recommendations for general configuration of keystone for keystone administrators. See the main :ref:`Configuration ` section for complete keystone configuration documentation and sample config files. .. include:: troubleshoot.inc .. include:: logging.inc .. include:: domain-specific-config.inc .. include:: integrate-with-ldap.inc .. include:: caching-layer.inc .. include:: security-compliance.inc .. include:: performance.inc .. include:: url-safe-naming.inc .. include:: limit-list-size.inc .. include:: endpoint-filtering.inc .. include:: endpoint-policy.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/configure-https.rst0000664000175000017500000000615100000000000022476 0ustar00zuulzuul00000000000000Configure HTTPS in Identity Service ----------------------------------- The following part describes steps to enable both HTTP and HTTPS with a self-signed certificate. 1. Generate an RSA private key. .. code-block:: console stack@oauth2-0-server:/$ openssl genrsa -out keystone.key 2048 Generating RSA private key, 2048 bit long modulus (2 primes) .........................................+++++ .........................+++++ e is 65537 (0x010001) 2. Create a certificate signing request. .. code-block:: console stack@oauth2-0-server:/$ openssl req -new -key keystone.key -out keystone.csr You are about to be asked to enter information that will be incorporated into your certificate request. What you are about to enter is what is called a Distinguished Name or a DN. There are quite a few fields but you can leave some blank For some fields there will be a default value, If you enter '.', the field will be left blank. ----- Country Name (2 letter code) [AU]: State or Province Name (full name) [Some-State]: Locality Name (eg, city) []: Organization Name (eg, company) [Internet Widgits Pty Ltd]: Organizational Unit Name (eg, section) []: Common Name (e.g. server FQDN or YOUR name) []:keystone.host Email Address []: Please enter the following 'extra' attributes to be sent with your certificate request A challenge password []: An optional company name []: 3. Generate a self-signed certificate. .. code-block:: console stack@oauth2-0-server:/$ openssl x509 -req -days 365 -in keystone.csr \ -signkey keystone.key -out keystone.host.crt Signature ok subject=C = , ST = , L = , O = , OU = , CN = keystone.host, emailAddress = Getting Private key 4. Append the configuration file for setting the HTTPS port service under the directory ``/etc/apache2/sites-enabled/``. .. code-block:: console stack@oauth2-0-server:/$ sudo ln -s \ /etc/apache2/sites-available/000-default.conf \ /etc/apache2/sites-enabled/000-default.conf 5. Modify the apache configuration file and add proxy rules to implement HTTPS support for the Keystone service. .. code-block:: console stack@oauth2-0-server:/$ vi 000-default.conf DocumentRoot /var/www/html SSLCertificateFile /etc/ssl/certs/keystone.host.crt SSLCertificateKeyFile /etc/ssl/certs/keystone.key SSLEngine on SSLProtocol all -SSLv2 -SSLv3 SSLCipherSuite ECDH:AESGCM:HIGH:!RC4:!DH:!MD5:!aNULL:!eNULL SSLHonorCipherOrder on ProxyPass "/identity" "unix:/var/run/uwsgi/keystone-wsgi-public.socket|uwsgi://uwsgi-uds-keystone-wsgi-public" retry=0 6. Restart apache service so that the modified configuration information takes effect. .. code-block:: console stack@oauth2-0-server:/$ systemctl restart apache2.service ==== AUTHENTICATING FOR org.freedesktop.systemd1.manage-units === Authentication is required to restart 'apache2.service'. Authenticating as: Ubuntu (ubuntu) Password: ==== AUTHENTICATION COMPLETE === .. _RFC6749: https://datatracker.ietf.org/doc/html/rfc6749././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/configure_tokenless_x509.rst0000664000175000017500000005576700000000000024233 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================ Configuring Keystone for Tokenless Authorization ================================================ ----------- Definitions ----------- * `X.509 Tokenless Authorization`: Provides a means to authorize client operations within Keystone by using an X.509 SSL client certificate without having to issue a token. This feature is designed to reduce the complexity of user token validation in Keystone ``auth_token`` middleware by eliminating the need for service user token for authentication and authorization. Therefore, there's no need to having to create and maintain a service user account for the sole purpose of user token validation. Furthermore, this feature improves efficiency by avoiding service user token handling (i.e. request, cache, and renewal). By not having to deal with service user credentials in the configuration files, deployers are relieved of the burden of having to protect the server user passwords throughout the deployment lifecycle. This feature also improve security by using X.509 certificate instead of password for authentication. For details, please refer to the specs `Tokenless Authorization with X.509 Client SSL Certificate`_ * `Public Key Infrastructure or PKI`: a system which utilize public key cryptography to achieve authentication, authorization, confidentiality, integrity, non-repudiation. In this system, the identities are represented by public key certificates. Public key certificate handling is governed by the `X.509`_ standard. See `Public Key Infrastructure`_ and `X.509`_ for more information. * `X.509 Certificate`: a time bound digital identity, which is certified or digitally signed by its issuer using cryptographic means as defined by the `X.509`_ standard. It contains information which can be used to uniquely identify its owner. For example, the owner of the certificate is identified by the ``Subject`` attribute while the issuer is identified by ``Issuer`` attribute. In operation, certificates are usually stored in `Privacy-Enhanced Mail`_ (PEM) format. Here's an example of what a certificate typically contains: .. code-block:: javascript Certificate: Data: Version: 3 (0x2) Serial Number: 4098 (0x1002) Signature Algorithm: sha256WithRSAEncryption Issuer: DC = com, DC = somedemo, O = openstack, OU = keystone, CN = Intermediate CA Validity Not Before: Jul 5 18:42:01 2019 GMT Not After : Jul 2 18:42:01 2029 GMT Subject: DC = com, DC = somedemo, O = Default, OU = keystone, CN = glance Subject Public Key Info: Public Key Algorithm: rsaEncryption Public-Key: (2048 bit) Modulus: 00:cf:35:8b:cd:4f:17:28:38:25:f7:e2:ac:ce:4e: d7:05:74:2f:99:04:f8:c2:13:14:50:18:70:d6:b0: 53:62:15:60:59:99:90:47:e2:7e:bf:ca:30:4a:18: f5:b8:29:1e:cc:d4:b8:49:9c:4a:aa:d9:10:b9:d7: 9f:55:85:cf:e3:44:d2:3c:95:42:5a:b0:53:3e:49: 9d:6b:b2:a0:9f:72:9d:76:96:55:8b:ee:c4:71:46: ab:bd:12:71:42:a0:60:29:7a:66:16:e1:fd:03:17: af:a3:c7:26:c3:c3:8b:a7:f9:c0:22:08:2d:e4:5c: 07:e1:44:58:c1:b1:88:ae:45:5e:03:10:bb:b4:c2: 42:52:da:4e:b5:1b:d6:6f:49:db:a4:5f:8f:e5:79: 9f:73:c2:37:de:99:a7:4d:6f:cb:b5:f9:7e:97:e0: 77:c8:40:21:40:ef:ab:d3:55:72:37:6c:28:0f:bd: 37:8c:3a:9c:e9:a0:21:6b:63:3f:7a:dd:1b:2c:90: 07:37:66:86:66:36:ef:21:bb:43:df:d5:37:a9:fa: 4b:74:9a:7c:4b:cd:8b:9d:3b:af:6d:50:fe:c9:0a: 25:35:c5:1d:40:35:1d:1f:f9:10:fd:b6:5c:45:11: bb:67:11:81:3f:ed:d6:27:04:98:8f:9e:99:a1:c8: c1:2d Exponent: 65537 (0x10001) X509v3 extensions: X509v3 Basic Constraints: CA:FALSE Netscape Cert Type: SSL Client, S/MIME Netscape Comment: OpenSSL Generated Client Certificate X509v3 Subject Key Identifier: EE:38:FB:60:65:CD:81:CE:B2:01:E3:A5:99:1B:34:6C:1A:74:97:BB X509v3 Authority Key Identifier: keyid:64:17:77:31:00:F2:ED:90:9A:A8:1D:B5:7D:75:06:03:B5:FD:B9:C0 X509v3 Key Usage: critical Digital Signature, Non Repudiation, Key Encipherment X509v3 Extended Key Usage: TLS Web Client Authentication, E-mail Protection Signature Algorithm: sha256WithRSAEncryption 82:8b:17:c6:f4:63:eb:8d:69:03:7a:bf:54:7f:37:02:eb:94: ef:57:fd:27:8f:f8:67:e9:0e:3b:0a:40:66:11:68:e6:04:1a: 8a:da:47:ed:83:eb:54:34:3b:5b:70:18:cf:62:e2:6d:7c:74: 4c:cf:14:b3:a9:70:b2:68:ed:19:19:71:6f:7d:87:22:38:8d: 83:c6:59:15:74:19:5b:a2:64:6f:b9:9a:81:3d:0a:67:58:d1: e2:b2:9b:9b:8f:60:7a:8c:0e:61:d9:d7:04:63:cc:58:af:36: a4:61:86:44:1c:64:e2:9b:bd:f3:21:87:dd:18:81:80:af:0f: d6:4c:9f:ae:0f:01:e0:0e:38:4d:5d:71:da:0b:11:39:bd:c3: 5d:0c:db:14:ca:bf:7f:07:37:c9:36:bd:22:a5:73:c6:e1:13: 53:15:de:ac:4a:4b:dc:48:90:47:06:fa:d4:d2:5d:c6:d2:d4: 3f:0f:49:0f:27:de:21:b0:bd:a3:92:c3:cb:69:b6:8d:94:e1: e3:40:b4:80:c7:e6:e2:df:0a:94:52:d1:16:41:0f:bc:29:a8: 93:40:1b:77:28:a3:f2:cb:3c:7f:bb:ae:a6:0e:b3:01:78:09: d3:2b:cf:2f:47:83:91:36:37:43:34:6e:80:2b:81:10:27:95: 95:ae:1e:93:42:94:a6:23:b8:07:c0:0f:38:23:70:b0:8e:79: 14:cd:72:8a:90:bf:77:ad:74:3c:23:9e:67:5d:0e:26:15:6e: 20:95:6d:d0:89:be:a3:6c:4a:13:1d:39:fb:21:e3:9c:9f:f3: ff:15:da:0a:28:29:4e:f4:7f:5e:0f:70:84:80:7c:09:5a:1c: f4:ac:c9:1b:9d:38:43:dd:27:00:95:ef:14:a0:57:3e:26:0b: d8:bb:40:d6:1f:91:92:f0:4e:5d:93:1c:b7:3d:bd:83:ef:79: ee:47:ca:61:04:00:e6:39:05:ab:f0:cd:47:e9:25:c8:3a:4c: e5:62:9f:aa:8a:ba:ea:46:10:ef:bd:1e:24:5f:0c:89:8a:21: bb:9d:c7:73:0f:b9:b5:72:1f:1f:1b:5b:ff:3a:cb:d8:51:bc: bb:9a:40:91:a9:d5:fe:95:ac:73:a5:12:6a:b2:e3:b1:b2:7d: bf:e7:db:cd:9f:24:63:6e:27:cf:d8:82:d9:ac:d8:c9:88:ea: 4f:1c:ae:7d:b7:c7:81:b2:1c:f8:6b:6b:85:3b:f2:14:cb:c7: 61:81:ad:64:e7:d9:90:a3:ea:69:7e:26:7a:0a:29:7b:1b:2a: e0:38:f7:58:d1:90:82:44:01:ab:05:fd:68:0c:ab:9e:c6:94: 76:34:46:8b:66:bb:02:07 See `public key certificate`_ for more information. * `Issuer`: the issuer of a X.509 certificate. It is also known as `Certificate Authority (CA)`_ or Certification Authority. Issuer is typically represented in `RFC 2253`_ format. Throughout this document, ``issuer``, ``issuer DN``, ``CA``, and ``trusted issuer`` are used interchangeably. .. _`Tokenless Authorization with X.509 Client SSL Certificate`: https://specs.openstack.org/openstack/keystone-specs/specs/liberty/keystone-tokenless-authz-with-x509-ssl-client-cert.html .. _`Public Key Infrastructure`: https://en.wikipedia.org/wiki/Public_key_infrastructure .. _`X.509`: https://en.wikipedia.org/wiki/X.509 .. _`public key certificate`: https://en.wikipedia.org/wiki/Public_key_certificate .. _`Privacy-Enhanced Mail`: https://en.wikipedia.org/wiki/Public_key_certificate .. _`RFC 2253`: https://tools.ietf.org/html/rfc2253 .. _`Certificate Authority (CA)`: https://en.wikipedia.org/wiki/Certificate_authority Prerequisites ------------- This feature requires Keystone API proxy SSL terminator to validate the incoming X.509 SSL client certificate and pass the certificate information (i.e. subject DN, issuer DN, etc) to the Keystone application as part of the request environment. At the time of this writing the feature has been tested with either HAProxy or Apache as Keystone API proxy SSL terminator only. The rest of this document required readers to familiar with: * `Public Key Infrastructure (PKI) and certificate management`_ * `SSL with client authentication`_, or commonly known as two-way SSL * `Public Key Infrastructure (PKI) and certificate management`_ * `Apache SSL configuration`_ * `HAProxy SSL configuration`_ .. _`Public Key Infrastructure (PKI) and certificate management`: https://en.wikipedia.org/wiki/Public_key_infrastructure .. _`SSL with client authentication`: https://tools.ietf.org/html/rfc5246#section-7.4.6 .. _`Apache SSL configuration`: https://httpd.apache.org/docs/trunk/mod/mod_ssl.html#ssloptions .. _`HAProxy SSL configuration`: http://cbonte.github.io/haproxy-dconv/1.7/configuration.html#7.3.4 Configuring this feature requires `OpenSSL Command Line Tool (CLI)`_. Please refer to the respective OS installation guide on how to install it. .. _`OpenSSL Command Line Tool (CLI)`: https://www.openssl.org/docs/manmaster/man1/openssl.html ---------------------- Keystone Configuration ---------------------- This feature utilizes Keystone federation capability to determine the authorization associated with the incoming X.509 SSL client certificate by mapping the certificate attributes to a Keystone identity. Therefore, the direct issuer or trusted Certification Authority (CA) of the client certificate is the remote Identity Provider (IDP), and the hexadecimal output of the SHA256 hash of the issuer distinguished name (DN) is used as the IDP ID. .. NOTE:: Client certificate issuer DN may be formatted differently depending on the SSL terminator. For example, Apache mod_ssl may use `RFC 2253`_ while HAProxy may use the old format. The old format is used by applications that linked with an older version of OpenSSL where the string representation of the distinguished name has not yet become a de facto standard. For more information on the old formation, please see the `nameopt`_ in the OpenSSL CLI manual. Therefore, it is critically important to keep the format consistent throughout the configuration as Keystone does exact string match when comparing certificate attributes. .. _`nameopt`: https://www.openssl.org/docs/manmaster/man1/x509.html .. _`RFC 2253`: https://tools.ietf.org/html/rfc2253 How to obtain trusted issuer DN ------------------------------- If SSL terminates at either HAProxy or Apache, the client certificate issuer DN can be obtained by using the OpenSSL CLI. Since version 2.3.11, Apache mod_ssl by default uses `RFC 2253`_ when handling certificate distinguished names. However, deployer have the option to use the old format by configuring the `LegacyDNStringFormat`_ option. .. _`RFC 2253`: https://tools.ietf.org/html/rfc2253 .. _`LegacyDNStringFormat`: https://httpd.apache.org/docs/trunk/mod/mod_ssl.html#ssloptions HAProxy, on the other hand, only supports the old format. To obtain issuer DN in RFC 2253 format: .. code-block:: bash $ openssl x509 -issuer -noout -in client_cert.pem -nameopt rfc2253 | sed 's/^\s*issuer=//' To obtain issuer DN in old format: .. code-block:: bash $ openssl x509 -issuer -noout -in client_cert.pem -nameopt compat | sed 's/^\s*issuer=//' How to calculate the IDP ID from trusted issuer DN -------------------------------------------------- The hexadecimal output of the SHA256 hash of the trusted issuer DN is being used as the Identity Provider ID in Keystone. It can be obtained using OpenSSL CLI. To calculate the IDP ID for issuer DN in RFC 2253 format: .. code-block:: bash $ openssl x509 -issuer -noout -in client_cert.pem -nameopt rfc2253 | tr -d '\n' | sed 's/^\s*issuer=//' | openssl dgst -sha256 -hex | awk '{print $2}' To calculate the IDP ID for issuer DN in old format: .. code-block:: bash $ openssl x509 -issuer -noout -in client_cert.pem -nameopt compat | tr -d '\n' | sed 's/^\s*issuer=//' | openssl dgst -sha256 -hex | awk '{print $2}' Keystone Configuration File Changes ----------------------------------- The following options in the ``tokenless_auth`` section of the Keystone configuration file `keystone.conf` are used to enable the X.509 tokenless authorization feature: * ``trusted_issuer`` - A list of trusted issuers for the X.509 SSL client certificates. More specifically the list of trusted issuer DNs mentioned in the `How to obtain trusted issuer DN`_ section above. The format of the trusted issuer DNs must match exactly with what the SSL terminator passed into the request environment. For example, if SSL terminates in Apache mod_ssl, then the issuer DN should be in RFC 2253 format. Whereas if SSL terminates in HAProxy, then the issuer DN is expected to be in the old format. This is a multi-string list option. The absence of any trusted issuers means the X.509 tokenless authorization feature is effectively disabled. * ``protocol`` - The protocol name for the X.509 tokenless authorization along with the option `issuer_attribute` below can look up its corresponding mapping. It defaults to ``x509``. * ``issuer_attribute`` - The issuer attribute that is served as an IdP ID for the X.509 tokenless authorization along with the protocol to look up its corresponding mapping. It is the environment variable in the WSGI environment that references to the Issuer of the client certificate. It defaults to ``SSL_CLIENT_I_DN``. This is a sample configuration for two `trusted_issuer` and a `protocol` set to ``x509``. .. code-block:: ini [tokenless_auth] trusted_issuer = emailAddress=admin@foosigner.com,CN=Foo Signer,OU=eng,O=abc,L=San Jose,ST=California,C=US trusted_issuer = emailAddress=admin@openstack.com,CN=OpenStack Cert Signer,OU=keystone,O=openstack,L=Sunnyvale,ST=California,C=US protocol = x509 ------------- Setup Mapping ------------- Like federation, X.509 tokenless authorization also utilizes the mapping mechanism to formulate an identity. The identity provider must correspond to the issuer of the X.509 SSL client certificate. The protocol for the given identity is ``x509`` by default, but can be configurable. Create an Identity Provider (IDP) --------------------------------- As mentioned, the Identity Provider ID is the hexadecimal output of the SHA256 hash of the issuer distinguished name (DN). .. NOTE:: If there are multiple trusted issuers, there must be multiple IDP created, one for each trusted issuer. To create an IDP for a given trusted issuer, follow the instructions in the `How to calculate the IDP ID from trusted issuer DN`_ section to calculate the IDP ID. Then use OpenStack CLI to create the IDP. i.e. .. code-block:: bash $ openstack identity provider create --description 'IDP foo' Create a Map ------------ A mapping needs to be created to map the ``Subject DN`` in the client certificate as a user to yield a valid local user if the user's ``type`` defined as ``local`` in the mapping. For example, the client certificate has ``Subject DN`` as ``CN=alex,OU=eng,O=nice-network,L=Sunnyvale, ST=California,C=US``, in the following examples, ``user_name`` will be mapped to``alex`` and ``domain_name`` will be mapped to ``nice-network``. And it has user's ``type`` set to ``local``. If user's ``type`` is not defined, it defaults to ``ephemeral``. Please refer to `mod_ssl`_ for the detailed mapping attributes. .. _`mod_ssl`: http://httpd.apache.org/docs/current/mod/mod_ssl.html .. code-block:: javascript [ { "local": [ { "user": { "name": "{0}", "domain": { "name": "{1}" }, "type": "local" } } ], "remote": [ { "type": "SSL_CLIENT_S_DN_CN", "whitelist": ["glance", "nova", "swift", "neutron"] }, { "type": "SSL_CLIENT_S_DN_O", "whitelist": ["Default"] } ] } ] When user's ``type`` is not defined or set to ``ephemeral``, the mapped user does not have to be a valid local user but the mapping must yield at least one valid local group. For example: .. code-block:: javascript [ { "local": [ { "user": { "name": "{0}", "type": "ephemeral" }, "group": { "domain": { "name": "{1}" }, "name": "openstack_services" } } ], "remote": [ { "type": "SSL_CLIENT_S_DN_CN", "whitelist": ["glance", "nova", "swift", "neutron"] }, { "type": "SSL_CLIENT_S_DN_O", "whitelist": ["Default"] } ] } ] .. NOTE:: The above mapping assume openstack_services group already exist and have the proper role assignments (i.e. allow token validation) If not, it will need to be created. To create a mapping using OpenStack CLI, assuming the mapping is saved into a file ``x509_tokenless_mapping.json``: .. code-block:: bash $ openstack mapping create --rules x509_tokenless_mapping.json x509_tokenless .. NOTE:: The mapping ID is arbitrary and it can be any string as opposed to IDP ID. Create a Protocol ----------------- The name of the protocol must be the same as the one specified by the ``protocol`` option in ``tokenless_auth`` section of the Keystone configuration file. The protocol name is user designed and it can be any name as opposed to IDP ID. A protocol name and an IDP ID will uniquely identify a mapping. To create a protocol using OpenStack CLI: .. code-block:: bash $ openstack federation protocol create --identity-provider --mapping x509_tokenless x509 .. NOTE:: If there are multiple trusted issuers, there must be multiple protocol created, one for each IDP. All IDP can share a same mapping but the combination of IDP ID and protocol must be unique. ---------------------------- SSL Terminator Configuration ---------------------------- Apache Configuration -------------------- If SSL terminates at Apache mod_ssl, Apache must be configured to handle two-way SSL and pass the SSL certificate information to the Keystone application as part of the request environment. The Client authentication attribute ``SSLVerifyClient`` should be set as ``optional`` to allow other token authentication methods and attribute ``SSLOptions`` needs to set as ``+StdEnvVars`` to allow certificate attributes to be passed. For example, .. code-block:: ini WSGIScriptAlias / /var/www/cgi-bin/keystone/main ErrorLog /var/log/apache2/keystone.log CustomLog /var/log/apache2/access.log combined SSLEngine on SSLCertificateFile /etc/apache2/ssl/apache.cer SSLCertificateKeyFile /etc/apache2/ssl/apache.key SSLCACertificatePath /etc/apache2/capath SSLOptions +StdEnvVars SSLVerifyClient optional HAProxy and Apache Configuration -------------------------------- If SSL terminates at HAProxy and Apache is the API proxy for the Keystone application, HAProxy must configured to handle two-way SSL and convey the SSL certificate information via the request headers. Apache in turn will need to bring those request headers into the request environment. Here's an example on how to configure HAProxy to handle two-way SSL and pass the SSL certificate information via the request headers. .. code-block:: ini frontend http-frontend mode http option forwardfor bind 10.1.1.1:5000 ssl crt /etc/keystone/ssl/keystone.pem ca-file /etc/keystone/ssl/ca.pem verify optional reqadd X-Forwarded-Proto:\ https if { ssl_fc } http-request set-header X-SSL %[ssl_fc] http-request set-header X-SSL-Client-Verify %[ssl_c_verify] http-request set-header X-SSL-Client-SHA1 %{+Q}[ssl_c_sha1] http-request set-header X-SSL-Client-DN %{+Q}[ssl_c_s_dn] http-request set-header X-SSL-Client-CN %{+Q}[ssl_c_s_dn(cn)] http-request set-header X-SSL-Client-O %{+Q}[ssl_c_s_dn(o)] http-request set-header X-SSL-Issuer %{+Q}[ssl_c_i_dn] http-request set-header X-SSL-Issuer-CN %{+Q}[ssl_c_i_dn(cn)] When the request gets to the Apache Keystone API Proxy, Apache will need to bring those SSL headers into the request environment. Here's an example on how to configure Apache to achieve that. .. code-block:: ini WSGIScriptAlias / /var/www/cgi-bin/keystone/main # Bring the needed SSL certificate attributes from HAProxy into the # request environment SetEnvIf X-SSL-Issuer "^(.*)$" SSL_CLIENT_I_DN=$0 SetEnvIf X-SSL-Issuer-CN "^(.*)$" SSL_CLIENT_I_DN_CN=$0 SetEnvIf X-SSL-Client-CN "^(.*)$" SSL_CLIENT_S_DN_CN=$0 SetEnvIf X-SSL-Client-O "^(.*)$" SSL_CLIENT_S_DN_O=$0 ------------------------------- Setup ``auth_token`` middleware ------------------------------- In order to use ``auth_token`` middleware as the service client for X.509 tokenless authorization, both configurable options and scope information will need to be setup. Configurable Options -------------------- The following configurable options in ``auth_token`` middleware should set to the correct values: * ``auth_type`` - Must set to ``v3tokenlessauth``. * ``certfile`` - Set to the full path of the certificate file. * ``keyfile`` - Set to the full path of the private key file. * ``cafile`` - Set to the full path of the trusted CA certificate file. * ``project_name`` or ``project_id`` - set to the scoped project. * ``project_domain_name`` or ``project_domain_id`` - if ``project_name`` is specified. Here's an example of ``auth_token`` middleware configuration using X.509 tokenless authorization for user token validation. .. code-block:: ini [keystone_authtoken] memcached_servers = localhost:11211 cafile = /etc/keystone/ca.pem project_domain_name = Default project_name = service auth_url = https://192.168.0.10/identity/v3 auth_type = v3tokenlessauth certfile = /etc/glance/certs/glance.pem keyfile = /etc/glance/private/glance_private_key.pem ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/credential-encryption.rst0000664000175000017500000001051600000000000023657 0ustar00zuulzuul00000000000000===================== Credential Encryption ===================== As of the Newton release, keystone encrypts all credentials stored in the default ``sql`` backend. Credentials are encrypted with the same mechanism used to encrypt Fernet tokens, ``fernet``. Keystone provides only one type of credential encryption but the encryption provider is pluggable in the event you wish to supply a custom implementation. This document details how credential encryption works, how to migrate existing credentials in a deployment, and how to manage encryption keys for credentials. Configuring credential encryption --------------------------------- The configuration for credential encryption is straightforward. There are only two configuration options needed: .. code-block:: ini [credential] provider = fernet key_repository = /etc/keystone/credential-keys/ ``[credential] provider`` defaults to the only option supplied by keystone, ``fernet``. There is no reason to change this option unless you wish to provide a custom credential encryption implementation. The ``[credential] key_repository`` location is a requirement of using ``fernet`` but will default to the ``/etc/keystone/credential-keys/`` directory. Both ``[credential] key_repository`` and ``[fernet_tokens] key_repository`` define locations for keys used to encrypt things. One holds the keys to encrypt and decrypt credentials and the other holds keys to encrypt and decrypt tokens. It is imperative that these repositories are managed separately and they must not share keys. Meaning they cannot share the same directory path. The ``[credential] key_repository`` is only allowed to have three keys. This is not configurable and allows for credentials to be re-encrypted periodically with a new encryption key for the sake of security. How credential encryption works ------------------------------- The implementation of this feature did not change any existing credential API contracts. All changes are transparent to the user unless you're inspecting the credential backend directly. When creating a credential, keystone will encrypt the ``blob`` attribute before persisting it to the backend. Keystone will also store a hash of the key that was used to encrypt the information in that credential. Since Fernet is used to encrypt credentials, a key repository consists of multiple keys. Keeping track of which key was used to encrypt each credential is an important part of encryption key management. Why this is important is detailed later in the `Encryption key management` section. When updating an existing credential's ``blob`` attribute, keystone will encrypt the new ``blob`` and update the key hash. When listing or showing credentials, all ``blob`` attributes are decrypted in the response. Neither the cipher text, nor the hash of the key used to encrypt the ``blob`` are exposed through the API. Furthermore, the key is only used internally to keystone. Encryption key management ------------------------- Key management of ``[credential] key_repository`` is handled with three ``keystone-manage`` commands: 1. ``keystone-manage credential_setup`` 2. ``keystone-manage credential_rotate`` 3. ``keystone-manage credential_migrate`` ``keystone-manage credential_setup`` will populate ``[credential] key_repository`` with new encryption keys. This must be done in order for proper credential encryption to work, with the exception of the null key. This step should only be done once. ``keystone-manage credential_rotate`` will create and rotate a new encryption key in the ``[credential] key_repository``. This will only be done if all credential key hashes match the hash of the current primary key. If any credential has been encrypted with an older key, or secondary key, the rotation will fail. Failing the rotation is necessary to prevent overrotation, which would leave some credentials indecipherable since the key used to encrypt it no longer exists. If this step fails, it is possible to forcibly re-key all credentials using the same primary key with ``keystone-manage credential_migrate``. ``keystone-manage credential_migrate`` will check the backend for credentials whose key hash doesn't match the hash of the current primary key. Any credentials with a key hash mismatching the current primary key will be re-encrypted with the current primary key. The new cipher text and key hash will be updated in the backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/domain-specific-config.inc0000664000175000017500000002322600000000000023615 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _domain_specific_configuration: Domain-specific configuration ============================= The Identity service supports domain-specific Identity drivers. The drivers allow a domain to have its own LDAP or SQL back end. By default, domain-specific drivers are disabled. Domain-specific Identity configuration options can be stored in domain-specific configuration files, or in the Identity SQL database using API REST calls. .. note:: Storing and managing configuration options in an SQL database is experimental in Kilo, and added to the Identity service in the Liberty release. .. _enable_drivers_for_domain: Enable drivers for domain-specific configuration files ------------------------------------------------------ To enable domain-specific drivers, set these options in the ``/etc/keystone/keystone.conf`` file: .. code-block:: ini [identity] domain_specific_drivers_enabled = True domain_config_dir = /etc/keystone/domains When you enable domain-specific drivers, Identity looks in the ``domain_config_dir`` directory for configuration files that are named as ``keystone.DOMAIN_NAME.conf``. A domain without a domain-specific configuration file uses options in the primary configuration file. Enable drivers for storing configuration options in SQL database ---------------------------------------------------------------- To enable domain-specific drivers, set these options in the ``/etc/keystone/keystone.conf`` file: .. code-block:: ini [identity] domain_specific_drivers_enabled = True domain_configurations_from_database = True Any domain-specific configuration options specified through the Identity v3 API will override domain-specific configuration files in the ``/etc/keystone/domains`` directory. Unlike the file-based method of specifying domain-specific configurations, options specified via the Identity API will become active without needing to restart the keystone server. For performance reasons, the current state of configuration options for a domain are cached in the keystone server, and in multi-process and multi-threaded keystone configurations, the new configuration options may not become active until the cache has timed out. The cache settings for domain config options can be adjusted in the general keystone configuration file (option ``cache_time`` in the ``domain_config`` group). .. NOTE:: It is important to notice that when using either of these methods of specifying domain-specific configuration options, the main keystone configuration file is still maintained. Only those options that relate to the Identity driver for users and groups (i.e. specifying whether the driver for this domain is SQL or LDAP, and, if LDAP, the options that define that connection) are supported in a domain-specific manner. Further, when using the configuration options via the Identity API, the driver option must be set to an LDAP driver (attempting to set it to an SQL driver will generate an error when it is subsequently used). For existing installations that already use file-based domain-specific configurations who wish to migrate to the SQL-based approach, the ``keystone-manage`` command can be used to upload all configuration files to the SQL database: .. code-block:: bash $ keystone-manage domain_config_upload --all Once uploaded, these domain-configuration options will be visible via the Identity API as well as applied to the domain-specific drivers. It is also possible to upload individual domain-specific configuration files by specifying the domain name: .. code-block:: bash $ keystone-manage domain_config_upload --domain-name DOMAINA .. NOTE:: It is important to notice that by enabling either of the domain-specific configuration methods, the operations of listing all users and listing all groups are not supported, those calls will need either a domain filter to be specified or usage of a domain scoped token. .. NOTE:: Keystone does not support moving the contents of a domain (i.e. "its" users and groups) from one backend to another, nor group membership across backend boundaries. .. NOTE:: When using the file-based domain-specific configuration method, to delete a domain that uses a domain specific backend, it's necessary to first disable it, remove its specific configuration file (i.e. its corresponding keystone..conf) and then restart the Identity server. When managing configuration options via the Identity API, the domain can simply be disabled and deleted via the Identity API; since any domain-specific configuration options will automatically be removed. .. NOTE:: Although keystone supports multiple LDAP backends via the above domain-specific configuration methods, it currently only supports one SQL backend. This could be either the default driver or a single domain-specific backend, perhaps for storing service users in a predominantly LDAP installation. .. NOTE:: Keystone has deprecated the ``keystone-manage domain_config_upload`` option. The keystone team recommends setting domain config options via the API instead. Due to the need for user and group IDs to be unique across an OpenStack installation and for keystone to be able to deduce which domain and backend to use from just a user or group ID, it dynamically builds a persistent identity mapping table from a public ID to the actual domain, local ID (within that backend) and entity type. The public ID is automatically generated by keystone when it first encounters the entity. If the local ID of the entity is from a backend that does not guarantee to generate UUIDs, a hash algorithm will generate a public ID for that entity, which is what will be exposed by keystone. The use of a hash will ensure that if the public ID needs to be regenerated then the same public ID will be created. This is useful if you are running multiple keystones and want to ensure the same ID would be generated whichever server you hit. .. NOTE:: In case of the LDAP backend, the names of users and groups are not hashed. As a result, these are length limited to 255 characters. Longer names will result in an error. While keystone will dynamically maintain the identity mapping, including removing entries when entities are deleted via the keystone, for those entities in backends that are managed outside of keystone (e.g. a read-only LDAP), keystone will not know if entities have been deleted and hence will continue to carry stale identity mappings in its table. While benign, keystone provides an ability for operators to purge the mapping table of such stale entries using the keystone-manage command, for example: .. code-block:: bash $ keystone-manage mapping_purge --domain-name DOMAINA --local-id abc@de.com A typical usage would be for an operator to obtain a list of those entries in an external backend that had been deleted out-of-band to keystone, and then call keystone-manage to purge those entries by specifying the domain and local-id. The type of the entity (i.e. user or group) may also be specified if this is needed to uniquely identify the mapping. Since public IDs can be regenerated **with the correct generator implementation**, if the details of those entries that have been deleted are not available, then it is safe to simply bulk purge identity mappings periodically, for example: .. code-block:: bash $ keystone-manage mapping_purge --domain-name DOMAINA will purge all the mappings for DOMAINA. The entire mapping table can be purged with the following command: .. code-block:: bash $ keystone-manage mapping_purge --all Generating public IDs in the first run may take a while, and most probably first API requests to fetch user list will fail by timeout. To prevent this, ``mapping_populate`` command should be executed. It should be executed right after LDAP has been configured or after ``mapping_purge``. .. code-block:: bash $ keystone-manage mapping_populate --domain DOMAINA Public ID Generators -------------------- Keystone supports a customizable public ID generator and it is specified in the ``[identity_mapping]`` section of the configuration file. Keystone provides a sha256 generator as default, which produces regenerable public IDs. The generator algorithm for public IDs is a balance between key size (i.e. the length of the public ID), the probability of collision and, in some circumstances, the security of the public ID. The maximum length of public ID supported by keystone is 64 characters, and the default generator (sha256) uses this full capability. Since the public ID is what is exposed externally by keystone and potentially stored in external systems, some installations may wish to make use of other generator algorithms that have a different trade-off of attributes. A different generator can be installed by configuring the following property: * ``generator`` - identity mapping generator. Defaults to ``sha256`` (implemented by :class:`keystone.identity.id_generators.sha256.Generator`) .. WARNING:: Changing the generator may cause all existing public IDs to be become invalid, so typically the generator selection should be considered immutable for a given installation. Migrate domain-specific configuration files to the SQL database --------------------------------------------------------------- You can use the ``keystone-manage`` command to migrate configuration options in domain-specific configuration files to the SQL database: .. code-block:: console # keystone-manage domain_config_upload --all To upload options from a specific domain-configuration file, specify the domain name: .. code-block:: console # keystone-manage domain_config_upload --domain-name DOMAIN_NAME ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/endpoint-filtering.inc0000664000175000017500000000130400000000000023112 0ustar00zuulzuul00000000000000.. -*- rst -*- Endpoint Filtering ================== Endpoint Filtering enables creation of ad-hoc catalogs for each project-scoped token request. Configure the endpoint filter catalog driver in the ``[catalog]`` section. For example: .. code-block:: ini [catalog] driver = catalog_sql In the ``[endpoint_filter]`` section, set ``return_all_endpoints_if_no_filter`` to ``False`` to return an empty catalog if no associations are made. For example: .. code-block:: ini [endpoint_filter] return_all_endpoints_if_no_filter = False See `API Specification for Endpoint Filtering `_ for the details of API definition. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/endpoint-policy.inc0000664000175000017500000000103700000000000022431 0ustar00zuulzuul00000000000000.. -*- rst -*- Endpoint Policy =============== The Endpoint Policy feature provides associations between service endpoints and policies that are already stored in the Identity server and referenced by a policy ID. Configure the endpoint policy backend driver in the ``[endpoint_policy]`` section. For example: .. code-block:: ini [endpoint_policy] driver = sql See `API Specification for Endpoint Policy `_ for the details of API definition. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/event_notifications.rst0000664000175000017500000004675700000000000023447 0ustar00zuulzuul00000000000000 .. Copyright 2013 IBM Corp. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Keystone Event Notifications ============================ Keystone provides notifications about usage data so that 3rd party applications can use the data for billing, monitoring, or quota purposes. This document describes the current inclusions and exclusions for Keystone notifications. Keystone currently supports two notification formats: a Basic Notification, and a Cloud Auditing Data Federation (`CADF`_) Notification. The supported operations between the two types of notification formats are documented below. Common Notification Structure ============================= Notifications generated by Keystone are generated in JSON format. An external application can format them into ATOM format and publish them as a feed. Currently, all notifications are immediate, meaning they are generated when a specific event happens. Notifications all adhere to a specific top level format: .. code-block:: javascript { "event_type": "identity..", "message_id": "", "payload": {}, "priority": "INFO", "publisher_id": "identity.", "timestamp": "" } Where ```` is a Keystone resource, such as user or project, and ```` is a Keystone operation, such as created, deleted. The key differences between the two notification formats (Basic and CADF), lie within the ``payload`` portion of the notification. The ``priority`` of the notification being sent is not configurable through the Keystone configuration file. This value is defaulted to INFO for all notifications sent in Keystone's case. Auditing with CADF ================== Keystone uses the `PyCADF`_ library to emit CADF notifications, these events adhere to the DMTF `CADF`_ specification. This standard provides auditing capabilities for compliance with security, operational, and business processes and supports normalized and categorized event data for federation and aggregation. .. _PyCADF: https://docs.openstack.org/pycadf/latest .. _CADF: http://www.dmtf.org/standards/cadf CADF notifications include additional context data around the ``resource``, the ``action`` and the ``initiator``. CADF notifications may be emitted by changing the ``notification_format`` to ``cadf`` in the configuration file. The ``payload`` portion of a CADF Notification is a CADF ``event``, which is represented as a JSON dictionary. For example: .. code-block:: javascript { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "" }, "target": { "typeURI": "", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", } Where the following are defined: * ````: ID of the user that performed the operation * ````: CADF specific target URI, (i.e.: data/security/project) * ````: The action being performed, typically: ````. ```` .. note:: The ``eventType`` property of the CADF payload is different from the ``event_type`` property of a notifications. The former (``eventType``) is a CADF keyword which designates the type of event that is being measured, this can be: `activity`, `monitor` or `control`. Whereas the latter (``event_type``) is described in previous sections as: `identity..` Additionally there may be extra keys present depending on the operation being performed, these will be discussed below. Reason ------ There is a specific ``reason`` object that will be present for the following PCI-DSS related events: .. list-table:: :widths: 45 10 45 :header-rows: 1 * - PCI-DSS Section - reasonCode - reasonType * - 8.1.6 Limit repeated access attempts by locking out the user after more than X failed attempts. - 401 - Maximum number of login attempts exceeded. * - 8.2.3 Passwords must meet the established criteria. - 400 - Password does not meet expected requirements: * - 8.2.4 Password must be changed every X days. - 401 - Password for expired and must be changed * - 8.2.5 Do not let users reuse the last X passwords. - 400 - Changed password cannot be identical to the last passwords. * - Other - Prevent passwords from being changed for a minimum of X days. - 401 - Cannot change password before minimum age days is met The reason object will contain the following keys: * ``reasonType``: Description of the PCI-DSS event * ``reasonCode``: HTTP response code for the event For more information, see :ref:`security_compliance` for configuring PCI-DSS in keystone. Supported Events ---------------- The following table displays the compatibility between resource types and operations. .. list-table:: :widths: 6 8 8 :header-rows: 1 * - Resource Type - Supported Operations - typeURI * - group - create,update,delete - data/security/group * - project - create,update,delete - data/security/project * - role - create,update,delete - data/security/role * - domain - create,update,delete - data/security/domain * - user - create,update,delete - data/security/account/user * - trust - create,delete - data/security/trust * - region - create,update,delete - data/security/region * - endpoint - create,update,delete - data/security/endpoint * - service - create,update,delete - data/security/service * - policy - create,update,delete - data/security/policy * - role assignment - add,remove - data/security/account/user * - None - authenticate - data/security/account/user Example Notification - Project Create ------------------------------------- The following is an example of a notification that is sent when a project is created. This example can be applied for any ``create``, ``update`` or ``delete`` event that is seen in the table above. The ```` and ``typeURI`` fields will be change. The difference to note is the inclusion of the ``resource_info`` field which contains the ```` that is undergoing the operation. Thus creating a common element between the CADF and Basic notification formats. .. code-block:: javascript { "event_type": "identity.project.created", "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "data/security/project", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "created.project", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f", "resource_info": "671da331c47d4e29bb6ea1d270154ec3" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2013-08-29 19:03:45.960280" } Example Notification - Authentication ------------------------------------- The following is an example of a notification that is sent when a user authenticates with Keystone. Note that this notification will be emitted if a user successfully authenticates, and when a user fails to authenticate. .. code-block:: javascript { "event_type": "identity.authenticate", "message_id": "1371a590-d5fd-448f-b3bb-a14dead6f4cb", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "service/security/account/user", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "authenticate", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2014-02-14T01:20:47.932842" } Example Notification - Federated Authentication ----------------------------------------------- The following is an example of a notification that is sent when a user authenticates with Keystone via Federation. This example is similar to the one seen above, however the ``initiator`` portion of the ``payload`` contains a new ``credential`` section. .. code-block:: javascript { "event_type": "identity.authenticate", "message_id": "1371a590-d5fd-448f-b3bb-a14dead6f4cb", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "credential": { "type": "http://docs.oasis-open.org/security/saml/v2.0", "token": "671da331c47d4e29bb6ea1d270154ec3", "identity_provider": "ACME", "user": "c9f76d3c31e142af9291de2935bde98a", "groups": [ "developers" ] }, "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "service/security/account/user", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-02-14T01:20:47.932842+00:00", "action": "authenticate", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2014-02-14T01:20:47.932842" } Example Notification - Role Assignment -------------------------------------- The following is an example of a notification that is sent when a role is granted or revoked to a project or domain, for a user or group. It is important to note that this type of notification has many new keys that convey the necessary information. Expect the following in the ``payload``: ``role``, ``inherited_to_project``, ``project`` or ``domain``, ``user`` or ``group``. With the exception of ``inherited_to_project``, each will represent the unique identifier of the resource type. .. code-block:: javascript { "event_type": "identity.role_assignment.created", "message_id": "a5901371-d5fd-b3bb-448f-a14dead6f4cb", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "agent": "curl/7.22.0(x86_64-pc-linux-gnu)", "address": "127.0.0.1" }, "id": "c9f76d3c31e142af9291de2935bde98a" }, "target": { "typeURI": "service/security/account/user", "id": "openstack:1c2fc591-facb-4479-a327-520dade1ea15" }, "observer": { "typeURI": "service/security", "id": "openstack:3d4a50a9-2b59-438b-bf19-c231f9c7625a" }, "eventType": "activity", "eventTime": "2014-08-20T01:20:47.932842+00:00", "role": "0e6b990380154a2599ce6b6e91548a68", "project": "24bdcff1aab8474895dbaac509793de1", "inherited_to_projects": false, "group": "c1e22dc67cbd469ea0e33bf428fe597a", "action": "created.role_assignment", "outcome": "success", "id": "openstack:f5352d7b-bee6-4c22-8213-450e7b646e9f" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2014-08-20T01:20:47.932842" } Example Notification - Expired Password --------------------------------------- The following is an example of a notification that is sent when a user attempts to authenticate but their password has expired. In this example, the ``payload`` contains a ``reason`` portion which contains both a ``reasonCode`` and ``reasonType``. .. code-block:: javascript { "priority": "INFO", "_unique_id": "222441bdc958423d8af6f28f9c558614", "event_type": "identity.authenticate", "timestamp": "2016-11-11 18:31:11.290821", "publisher_id": "identity.host1234", "payload": { "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event", "initiator": { "typeURI": "service/security/account/user", "host": { "address": "127.0.0.1" }, "id": "73a19db6-e26b-5313-a6df-58d297fa652e" }, "target": { "typeURI": "service/security/account/user", "id": "c23e6cb7-abe0-5e42-b7f7-4c4104ea77b0" }, "observer": { "typeURI": "service/security", "id": "9bdddeda6a0b451e9e0439646e532afd" }, "eventType": "activity", "eventTime": "2016-11-11T18:31:11.156356+0000", "reason": { "reasonCode": 401, "reasonType": "The password is expired and needs to be reset for user: ed1ab0b40f284fb48fea9e25d0d157fc" }, "action": "authenticate", "outcome": "failure", "id": "78cd795f-5850-532f-9ab1-5adb04e30c0f" }, "message_id": "9a97e9d0-fef1-4852-8e82-bb693358bc46" } Basic Notifications =================== All basic notifications contain a limited amount of information, specifically, just the resource type, operation, and resource id. The ``payload`` portion of a Basic Notification is a single key-value pair. .. code-block:: javascript { "resource_info": } Where ```` is the unique identifier assigned to the ``resource_type`` that is undergoing the ````. Supported Events ---------------- The following table displays the compatibility between resource types and operations. .. list-table:: :widths: 6 8 :header-rows: 1 * - Resource Type - Supported Operations * - group - create,update,delete * - project - create,update,delete * - role - create,update,delete * - domain - create,update,delete * - user - create,update,delete * - trust - create,delete * - region - create,update,delete * - endpoint - create,update,delete * - service - create,update,delete * - policy - create,update,delete Note, ``trusts`` are an immutable resource, they do not support ``update`` operations. Example Notification -------------------- This is an example of a notification sent for a newly created user: .. code-block:: javascript { "event_type": "identity.user.created", "message_id": "0156ee79-b35f-4cef-ac37-d4a85f231c69", "payload": { "resource_info": "671da331c47d4e29bb6ea1d270154ec3" }, "priority": "INFO", "publisher_id": "identity.host1234", "timestamp": "2013-08-29 19:03:45.960280" } If the operation fails, the notification won't be sent, and no special error notification will be sent. Information about the error is handled through normal exception paths. Recommendations for consumers ============================= One of the most important notifications that Keystone emits is for project deletions (``event_type`` = ``identity.project.deleted``). This event should indicate to the rest of OpenStack that all resources (such as virtual machines) associated with the project should be deleted. Projects can also have update events (``event_type`` = ``identity.project.updated``), wherein the project has been disabled. Keystone ensures this has an immediate impact on the accessibility of the project's resources by revoking tokens with authorization on the project, but should **not** have a direct impact on the projects resources (in other words, virtual machines should **not** be deleted). Opting out of certain notifications =================================== There are many notifications that Keystone emits and some deployers may only care about certain events. In Keystone there is a way to opt-out of certain notifications. In ``/etc/keystone/keystone.conf`` you can set ``opt_out`` to the event you wish to opt-out of. It is possible to opt-out of multiple events. Example: .. code-block:: ini [DEFAULT] notification_opt_out = identity.user.created notification_opt_out = identity.role_assignment.created notification_opt_out = identity.authenticate.pending This will opt-out notifications for user creation, role assignment creation and successful authentications. For a list of event types that can be used, refer to: `Telemetry Measurements`_. By default, messages for the following authentication events are suppressed since they are too noisy: ``identity.authenticate.success``, ``identity.authenticate.pending`` and ``identity.authenticate.failed``. .. _Telemetry Measurements: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-identity ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/external-authentication.rst0000664000175000017500000001011700000000000024211 0ustar00zuulzuul00000000000000=========================================== Using external authentication with Keystone =========================================== When Keystone is executed in a web server like Apache HTTPD, it is possible to have the web server also handle authentication. This enables support for additional methods of authentication that are not provided by the identity store backend and the authentication plugins that Keystone supports. Having the web server handle authentication is not exclusive, and both Keystone and the web server can provide different methods of authentication at the same time. For example, the web server can provide support for X.509 or Kerberos authentication, while Keystone provides support for password authentication (with SQL or an identity store as the backend). When the web server authenticates a user, it sets environment variables, usually ``REMOTE_USER``, which can be used in the underlying application. Keystone can be configured to use these environment variables to determine the identity of the user. Configuration ============= In order to activate the external authentication mechanism for Identity API v3, the ``external`` method must be in the list of enabled authentication methods. By default it is enabled, so if you don't want to use external authentication, remove it from the ``methods`` option in the ``auth`` section. To configure the plugin that should be used set the ``external`` option again in the ``auth`` section. There are two external authentication method plugins provided by Keystone: * ``DefaultDomain``: This plugin won't take into account the domain information that the external authentication method may pass down to Keystone and will always use the configured default domain. The ``REMOTE_USER`` variable is the username. This is the default if no plugin is given. * ``Domain``: This plugin expects that the ``REMOTE_DOMAIN`` variable contains the domain for the user. If this variable is not present, the configured default domain will be used. The ``REMOTE_USER`` variable is the username. .. CAUTION:: You should disable the external auth method if you are currently using federation. External auth and federation both use the ``REMOTE_USER`` variable. Since both the mapped and external plugin are being invoked to validate attributes in the request environment, it can cause conflicts. For example, imagine there are two distinct users with the same username `foo`, one in the `Default` domain while the other is in the `BAR` domain. The external Federation modules (i.e. mod_shib) sets the ``REMOTE_USER`` attribute to `foo`. The external auth module also tries to set the ``REMOTE_USER`` attribute to `foo` for the `Default` domain. The federated mapping engine maps the incoming identity to `foo` in the `BAR` domain. This results in user_id conflict since both are using different user_ids to set `foo` in the `Default` domain and the `BAR` domain. To disable this, simply remove `external` from the `methods` option in `keystone.conf`:: methods = external,password,token,oauth1 Using HTTPD authentication ========================== Web servers like Apache HTTP support many methods of authentication. Keystone can profit from this feature and let the authentication be done in the web server, that will pass down the authenticated user to Keystone using the ``REMOTE_USER`` environment variable. This user must exist in advance in the identity backend to get a token from the controller. To use this method, Keystone should be running on HTTPD. X.509 example ------------- The following snippet for the Apache conf will authenticate the user based on a valid X.509 certificate from a known CA:: SSLEngine on SSLCertificateFile /etc/ssl/certs/ssl.cert SSLCertificateKeyFile /etc/ssl/private/ssl.key SSLCACertificatePath /etc/ssl/allowed_cas SSLCARevocationPath /etc/ssl/allowed_cas SSLUserName SSL_CLIENT_S_DN_CN SSLVerifyClient require SSLVerifyDepth 10 (...) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4821143 keystone-26.0.0/doc/source/admin/federation/0000775000175000017500000000000000000000000020740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/federation/configure_federation.rst0000664000175000017500000006731200000000000025664 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Configuring Keystone for Federation =================================== .. _keystone-as-sp: ----------------------------------- Keystone as a Service Provider (SP) ----------------------------------- .. _sp-prerequisites: Prerequisites ------------- If you are not familiar with the idea of federated identity, see the :ref:`federation_introduction` first. In this section, we will configure keystone as a Service Provider, consuming identity properties issued by an external Identity Provider, such as SAML assertions or OpenID Connect claims. For testing purposes, we recommend using `samltest.id`_ as a SAML Identity Provider, or Google as an OpenID Connect Identity Provider, and the examples here will references those providers. If you plan to set up `Keystone as an Identity Provider (IdP)`_, it is easiest to set up keystone with a dummy SAML provider first and then reconfigure it to point to the keystone Identity Provider later. The following configuration steps were performed on a machine running Ubuntu 16.04 and Apache 2.4.18. To enable federation, you'll need to run keystone behind a web server such as Apache rather than running the WSGI application directly with uWSGI or Gunicorn. See the installation guide for :ref:`SUSE `, :ref:`RedHat ` or :ref:`Ubuntu ` to configure the Apache web server for keystone. Throughout the rest of the guide, you will need to decide on three pieces of information and use them consistently throughout your configuration: 1. The protocol name. This must be a valid keystone auth method and must match one of: ``saml2``, ``openid``, ``mapped`` or a :ref:`custom auth method ` for which you must :ref:`register as an external driver `. 2. The identity provider name. This can be arbitrary. 3. The entity ID of the service provider. This should be a URN but need not resolve to anything. You will also need to decide what HTTPD module to use as a Service Provider. This guide provides examples for ``mod_shib`` and ``mod_auth_mellon`` as SAML service providers, and ``mod_auth_openidc`` as an OpenID Connect Service Provider. .. note:: In this guide, the keystone Service Provider is configured on a host called sp.keystone.example.org listening on the standard HTTPS port. All keystone paths will start with the keystone version prefix, ``/v3``. If you have configured keystone to listen on port 5000, or to respond on the path ``/identity`` (for example), take this into account in your own configuration. .. _samltest.id: https://samltest.id Creating federation resources in keystone ----------------------------------------- You need to create three resources via the keystone API to identify the Identity Provider to keystone and align remote user attributes with keystone objects: * `Create an Identity Provider`_ * `Create a Mapping`_ * `Create a Protocol`_ See also the `keystone federation API reference`_. .. _keystone federation API reference: https://docs.openstack.org/api-ref/identity/v3-ext/#os-federation-api Create an Identity Provider ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Create an Identity Provider object in keystone, which represents the Identity Provider we will use to authenticate end users: .. code-block:: console $ openstack identity provider create --remote-id https://samltest.id/saml/idp samltest The value for the ``remote-id`` option is the unique identifier provided by the Identity Provider, called the `entity ID` or the `remote ID`. For a SAML Identity Provider, it can found by querying its metadata endpoint: .. code-block:: console $ curl -s https://samltest.id/saml/idp | grep -o 'entityID=".*"' entityID="https://samltest.id/saml/idp" For an OpenID Connect IdP, it is the Identity Provider's Issuer Identifier. A remote ID must be globally unique: two identity providers cannot be associated with the same remote ID. The remote ID will usually appear as a URN but need not be a resolvable URL. The local name, called ``samltest`` in our example, is decided by you and will be used by the mapping and protocol, and later for authentication. .. note:: An identity provider keystone object may have multiple ``remote-ids`` specified, this allows the same *keystone* identity provider resource to be used with multiple external identity providers. For example, an identity provider resource ``university-idp``, may have the following ``remote_ids``: ``['university-x', 'university-y', 'university-z']``. This removes the need to configure N identity providers in keystone. See also the `API reference on identity providers`_. .. _API reference on identity providers: https://docs.openstack.org/api-ref/identity/v3-ext/#identity-providers .. _create_a_mapping: Create a Mapping ~~~~~~~~~~~~~~~~ Next, create a mapping. A mapping is a set of rules that link the attributes of a remote user to user properties that keystone understands. It is especially useful for granting remote users authorization to keystone resources, either by associating them with a local keystone group and inheriting its role assignments, or dynamically provisioning projects within keystone based on these rules. .. note:: By default, group memberships that a user gets from a mapping are only valid for the duration of the token. It is possible to persist these groups memberships for a limited period of time. To enable this, either set the ``authorization_ttl` attribute of the identity provider, or the ``[federation] default_authorization_ttl`` in the keystone.conf file. This value is in minutes, and will result in a lag from when a user is removed from a group in the identity provider, and when that will happen in keystone. Please consider your security requirements carefully. An Identity Provider has exactly one mapping specified per protocol. Mapping objects can be used multiple times by different combinations of Identity Provider and Protocol. As a simple example, create a mapping with a single rule to map all remote users to a local user in a single group in keystone: .. code-block:: console $ cat > rules.json <`, :ref:`RedHat ` or :ref:`Ubuntu `. Configure protected endpoints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ There is a minimum of one endpoint that must be protected in the VirtualHost configuration for the keystone service: .. code-block:: apache Require valid-user AuthType [...] ... This is the endpoint for federated users to request an unscoped token. If configuring WebSSO, you should also protect one or both of the following endpoints: .. code-block:: apache Require valid-user AuthType [...] ... Require valid-user AuthType [...] ... The first example only specifies a protocol, and keystone will use the incoming remote ID to determine the Identity Provider. The second specifies the Identity Provider directly, which must then be supplied to horizon when configuring `horizon for WebSSO`_. The path must exactly match the path that will be used to access the keystone service. For example, if the identity provider you created in `Create an Identity Provider`_ is ``samltest`` and the protocol you created in `Create a Protocol`_ is ``saml2``, then the Locations will be: .. code-block:: apache Require valid-user AuthType [...] ... Require valid-user AuthType [...] ... Require valid-user AuthType [...] ... However, if you have configured the keystone service to use a virtual path such as ``/identity``, that part of the path should be included: .. code-block:: apache Require valid-user AuthType [...] ... ... .. _horizon for WebSSO: `Configuring Horizon as a WebSSO Frontend`_ Configure the auth module ~~~~~~~~~~~~~~~~~~~~~~~~~ If your Identity Provider is a SAML IdP, there are two main Apache modules that can be used as a SAML Service Provider: `mod_shib` and `mod_auth_mellon`. For an OpenID Connect Identity Provider, `mod_auth_openidc` is used. You can also use other auth modules such as kerberos, X.509, or others. Check the documentation for the provider you choose for detailed installation and configuration guidance. Depending on the Service Provider module you've chosen, you will need to install the applicable Apache module package and follow additional configuration steps. This guide contains examples for two major federation protocols: * SAML2.0 - see guides for the following implementations: * :ref:`Set up mod_shib `. * :ref:`Set up mod_auth_mellon `. * OpenID Connect: :ref:`Set up mod_auth_openidc `. .. _federation_configuring_keystone: Configuring Keystone -------------------- While the Apache module does the majority of the heavy lifting, minor changes are needed to allow keystone to allow and understand federated authentication. Add the Auth Method ~~~~~~~~~~~~~~~~~~~ Add the authentication methods to the ``[auth]`` section in ``keystone.conf``. The auth method here must have the same name as the protocol you created in `Create a Protocol`_. You should also remove ``external`` as an allowable method. .. code-block:: console [auth] methods = password,token,saml2,openid Configure the Remote ID Attribute ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Keystone is mostly apathetic about what HTTPD auth module you choose to configure for your Service Provider, but must know what header key to look for from the auth module to determine the Identity Provider's remote ID so it can associate the incoming request with the Identity Provider resource. The key name is decided by the auth module choice: * For ``mod_shib``: use ``Shib-Identity-Provider`` * For ``mod_auth_mellon``: the attribute name is configured with the ``MellonIdP`` parameter in the VirtualHost configuration, if set to e.g. ``IDP`` then use ``MELLON_IDP`` * For ``mod_auth_openidc``: the attribute name is related to the ``OIDCClaimPrefix`` parameter in the Apache configuration, if set to e.g. ``OIDC-`` use ``HTTP_OIDC_ISS`` It is recommended that this option be set on a per-protocol basis by creating a new section named after the protocol: .. code-block:: ini [saml2] remote_id_attribute = Shib-Identity-Provider [openid] remote_id_attribute = HTTP_OIDC_ISS Alternatively, a generic option may be set at the ``[federation]`` level. .. code-block:: ini [federation] remote_id_attribute = HTTP_OIDC_ISS Add a Trusted Dashboard (WebSSO) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you intend to configure horizon as a WebSSO frontend, you must specify the URLs of trusted horizon servers. This value may be repeated multiple times. This setting ensures that keystone only sends token data back to trusted servers. This is performed as a precaution, specifically to prevent man-in-the-middle (MITM) attacks. The value must exactly match the origin address sent by the horizon server, including any trailing slashes. .. code-block:: ini [federation] trusted_dashboard = https://horizon1.example.org/auth/websso/ trusted_dashboard = https://horizon2.example.org/auth/websso/ Add the Callback Template (WebSSO) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If you intend to configure horizon as a WebSSO frontend, and if not already done for you by your distribution's keystone package, copy the `sso_callback_template.html`_ template into the location specified by the ``[federation]/sso_callback_template`` option in ``keystone.conf``. You can also use this template as an example to create your own custom HTML redirect page. Restart the keystone WSGI service or the Apache frontend service after making changes to your keystone configuration. .. code-block:: console # systemctl restart apache2 .. _sso_callback_template.html: https://opendev.org/openstack/keystone/raw/branch/master/etc/sso_callback_template.html .. _horizon-websso: Configuring Horizon as a WebSSO Frontend ---------------------------------------- .. note:: Consult `horizon's official documentation`_ for details on configuring horizon. .. _horizon's official documentation: https://docs.openstack.org/horizon/latest/configuration/settings.html Keystone on its own is not capable of supporting a browser-based Single Sign-on authentication flow such as the SAML2.0 WebSSO profile, therefore we must enlist horizon's assistance. Horizon can be configured to support SSO by enabling it in horizon's ``local_settings.py`` configuration file and adding the possible authentication choices that will be presented to the user on the login screen. Ensure the `WEBSSO_ENABLED` option is set to `True` in horizon's local_settings.py file, this will provide users with an updated login screen for horizon. .. code-block:: python WEBSSO_ENABLED = True Configure the options for authenticating that a user may choose from at the login screen. The pairs configured in this list map a user-friendly string to an authentication option, which may be one of: * The string ``credentials`` which forces horizon to present its own username and password fields that the user will use to authenticate as a local keystone user * The name of a protocol that you created in `Create a Protocol`_, such as ``saml2`` or ``openid``, which will cause horizon to call keystone's `WebSSO API without an Identity Provider`_ to authenticate the user * A string that maps to an Identity Provider and Protocol combination configured in ``WEBSSO_IDP_MAPPING`` which will cause horizon to call keystone's `WebSSO API specific to the given Identity Provider`_. .. code-block:: python WEBSSO_CHOICES = ( ("credentials", _("Keystone Credentials")), ("openid", _("OpenID Connect")), ("saml2", _("Security Assertion Markup Language")), ("myidp_openid", "Acme Corporation - OpenID Connect"), ("myidp_saml2", "Acme Corporation - SAML2") ) WEBSSO_IDP_MAPPING = { "myidp_openid": ("myidp", "openid"), "myidp_saml2": ("myidp", "saml2") } The initial selection of the dropdown menu can also be configured: .. code-block:: python WEBSSO_INITIAL_CHOICE = "credentials" Remember to restart the web server when finished configuring horizon: .. code-block:: console # systemctl restart apache2 .. _WebSSO API without an Identity Provider: https://docs.openstack.org/api-ref/identity/v3-ext/index.html#web-single-sign-on-authentication-new-in-version-1-2 .. _WebSSO API specific to the given Identity Provider: https://docs.openstack.org/api-ref/identity/v3-ext/index.html#web-single-sign-on-authentication-new-in-version-1-3 Authenticating -------------- Use the CLI to authenticate with a SAML2.0 Identity Provider ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. FIXME(cmurphy): Include examples for OpenID Connect authentication with the CLI The ``python-openstackclient`` can be used to authenticate a federated user in a SAML Identity Provider to keystone. .. note:: The SAML Identity Provider must be configured to support the ECP authentication profile. To use the CLI tool, you must have the name of the Identity Provider resource in keystone, the name of the federation protocol configured in keystone, and the ECP endpoint for the Identity Provider. If you are the cloud administrator, the name of the Identity Provider and protocol was configured in `Create an Identity Provider`_ and `Create a Protocol`_ respectively. If you are not the administrator, you must obtain this information from the administrator. The ECP endpoint for the Identity Provider can be obtained from its metadata without involving an administrator. This endpoint is the ``urn:oasis:names:tc:SAML:2.0:bindings:SOAP`` binding in the metadata document: .. code-block:: console $ curl -s https://samltest.id/saml/idp | grep urn:oasis:names:tc:SAML:2.0:bindings:SOAP ~~~~~~~~~~~~~~~~~~~~~ Find available scopes ~~~~~~~~~~~~~~~~~~~~~ If you are a new user and are not aware of what resources you have access to, you can use an unscoped query to list the projects or domains you have been granted a role assignment on: .. code-block:: bash export OS_AUTH_TYPE=v3samlpassword export OS_IDENTITY_PROVIDER=samltest export OS_IDENTITY_PROVIDER_URL=https://samltest.id/idp/profile/SAML2/SOAP/ECP export OS_PROTOCOL=saml2 export OS_USERNAME=morty export OS_PASSWORD=panic export OS_AUTH_URL=https://sp.keystone.example.org/v3 export OS_IDENTITY_API_VERSION=3 openstack federation project list openstack federation domain list ~~~~~~~~~~~~~~~~~~ Get a scoped token ~~~~~~~~~~~~~~~~~~ If you already know the project, domain or system you wish to scope to, you can directly request a scoped token: .. code-block:: bash export OS_AUTH_TYPE=v3samlpassword export OS_IDENTITY_PROVIDER=samltest export OS_IDENTITY_PROVIDER_URL=https://samltest.id/idp/profile/SAML2/SOAP/ECP export OS_PROTOCOL=saml2 export OS_USERNAME=morty export OS_PASSWORD=panic export OS_AUTH_URL=https://sp.keystone.example.org/v3 export OS_IDENTITY_API_VERSION=3 export OS_PROJECT_NAME=federated_project export OS_PROJECT_DOMAIN_NAME=Default openstack token issue Use horizon to authenticate with an external Identity Provider ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When horizon is configured to enable WebSSO, a dropdown menu will appear on the login screen before the user has authenticated. Select an authentication method from the menu to be redirected to your Identity Provider for authentication. .. image:: ../../_static/horizon-login-sp.png :height: 400px :alt: Horizon login screen using external authentication .. _keystone_as_idp: -------------------------------------- Keystone as an Identity Provider (IdP) -------------------------------------- Prerequisites ------------- When keystone is configured as an Identity Provider, it is often referred to as `Keystone to Keystone`, because it enables federation between multiple OpenStack clouds using the SAML2.0 protocol. If you are not familiar with the idea of federated identity, see the :ref:`introduction ` first. When setting up `Keystone to Keystone`, it is easiest to :ref:`configure a keystone Service Provider ` first with a sandbox Identity Provider such as `samltest.id`_. .. _samltest.id: https://samltest.id This feature requires installation of the xmlsec1 tool via your distribution packaging system (for instance apt or yum) .. code-block:: console # apt-get install xmlsec1 .. note:: In this guide, the keystone Identity Provider is configured on a host called idp.keystone.example.org listening on the standard HTTPS port. All keystone paths will start with the keystone version prefix, ``/v3``. If you have configured keystone to listen on port 5000, or to respond on the path ``/identity`` (for example), take this into account in your own configuration. Configuring Metadata -------------------- Since keystone is acting as a SAML Identity Provider, its metadata must be configured in the ``[saml]`` section (not to be confused with an optional ``[saml2]`` section which you may have configured in `Configure the Remote Id Attribute`_ while setting up keystone as Service Provider) of ``keystone.conf`` so that it can served by the `metadata API`_. .. _metadata API: https://docs.openstack.org/api-ref/identity/v3-ext/index.html#retrieve-metadata-properties The two parameters that **must** be set in order for keystone to generate metadata are ``idp_entity_id`` and ``idp_sso_endpoint``: .. code-block:: ini [saml] idp_entity_id=https://idp.keystone.example.org/v3/OS-FEDERATION/saml2/idp idp_sso_endpoint=https://idp.keystone.example.org/v3/OS-FEDERATION/saml2/sso ``idp_entity_id`` sets the Identity Provider entity ID, which is a string of your choosing that uniquely identifies the Identity Provider to any Service Provider. ``idp_sso_endpoint`` is required to generate valid metadata, but its value is currently not used because keystone as an Identity Provider does not support the SAML2.0 WebSSO auth profile. This may change in the future which is why there is no default value provided and must be set by the operator. For completeness, the following Organization and Contact configuration options should also be updated to reflect your organization and administrator contact details. .. code-block:: ini idp_organization_name=example_company idp_organization_display_name=Example Corp. idp_organization_url=example.com idp_contact_company=example_company idp_contact_name=John idp_contact_surname=Smith idp_contact_email=jsmith@example.com idp_contact_telephone=555-555-5555 idp_contact_type=technical It is important to take note of the default ``certfile`` and ``keyfile`` options, and adjust them if necessary: .. code-block:: ini certfile=/etc/keystone/ssl/certs/signing_cert.pem keyfile=/etc/keystone/ssl/private/signing_key.pem You must generate a PKI key pair and copy the files to these paths. You can use the ``openssl`` tool to do so. Keystone does not provide a utility for this. Check the ``idp_metadata_path`` setting and adjust it if necessary: .. code-block:: ini idp_metadata_path=/etc/keystone/saml2_idp_metadata.xml To create metadata for your keystone IdP, run the ``keystone-manage`` command and redirect the output to a file. For example: .. code-block:: console # keystone-manage saml_idp_metadata > /etc/keystone/saml2_idp_metadata.xml Finally, restart the keystone WSGI service or the web server frontend: .. code-block:: console # systemctl restart apache2 Creating a Service Provider Resource ------------------------------------ Create a Service Provider resource to represent your Service Provider as an object in keystone: .. code-block:: console $ openstack service provider create keystonesp \ --service-provider-url https://sp.keystone.example.org/Shibboleth.sso/SAML2/ECP --auth-url https://sp.keystone.example.org/v3/OS-FEDERATION/identity_providers/keystoneidp/protocols/saml2/auth The ``--auth-url`` is the `federated auth endpoint`_ for a specific Identity Provider and protocol name, here named ``keystoneidp`` and ``saml2``. The ``--service-provider-url`` is the ``urn:oasis:names:tc:SAML:2.0:bindings:PAOS`` binding for the Assertion Consumer Service of the Service Provider. It can be obtained from the Service Provider metadata: .. code-block:: console $ curl -s https://sp.keystone.example.org/Shibboleth.sso/Metadata | grep urn:oasis:names:tc:SAML:2.0:bindings:PAOS .. _federated auth endpoint: https://docs.openstack.org/api-ref/identity/v3-ext/index.html#request-an-unscoped-os-federation-token Authenticating -------------- Use the CLI to authenticate with Keystone-to-Keystone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Use ``python-openstackclient`` to authenticate with the IdP and then get a scoped token from the SP. .. code-block:: console export OS_USERNAME=demo export OS_PASSWORD=nomoresecret export OS_AUTH_URL=https://idp.keystone.example.org/v3 export OS_IDENTITY_API_VERSION=3 export OS_PROJECT_NAME=federated_project export OS_PROJECT_DOMAIN_NAME=Default export OS_SERVICE_PROVIDER=keystonesp export OS_REMOTE_PROJECT_NAME=federated_project export OS_REMOTE_PROJECT_DOMAIN_NAME=Default openstack token issue Use Horizon to switch clouds ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ No additional configuration is necessary to enable horizon for Keystone to Keystone. Log into the horizon instance for the Identity Provider using your regular local keystone credentials. Once logged in, you will see a Service Provider dropdown menu which you can use to switch your dashboard view to another cloud. .. image:: ../../_static/horizon-login-idp.png :height: 175px :alt: Horizon dropdown menu for switching between keystone providers .. include:: openidc.inc .. include:: mellon.inc .. include:: shibboleth.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/federation/federated_identity.rst0000664000175000017500000000025000000000000025323 0ustar00zuulzuul00000000000000================== Federated Identity ================== .. toctree:: :maxdepth: 2 introduction.rst configure_federation.rst mapping_combinations.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/federation/introduction.rst0000664000175000017500000005017700000000000024225 0ustar00zuulzuul00000000000000.. Copyright 2018 SUSE Linux GmbH All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _federation_introduction: Introduction to Keystone Federation =================================== ---------------------------- What is keystone federation? ---------------------------- Identity federation is the ability to share identity information across multiple identity management systems. In keystone, this is implemented as an authentication method that allows users to authenticate directly with another identity source and then provides keystone with a set of user attributes. This is useful if your organization already has a primary identity source since it means users don't need a separate set of credentials for the cloud. It is also useful for connecting multiple clouds together, as we can use a keystone in another cloud as an identity source. Using :ref:`LDAP as an identity backend ` is another way for keystone to obtain identity information from an external source, but it requires keystone to handle passwords directly rather than offloading authentication to the external source. Keystone supports two configuration models for federated identity. The most common configuration is with :ref:`keystone as a Service Provider (SP) `, using an external Identity Provider, such as a Keycloak or Google, as the identity source and authentication method. The second type of configuration is ":ref:`Keystone to Keystone `", where two keystones are linked with one acting as the identity source. This document discusses identity federation involving a secondary identity management that acts as the source of truth concerning the users it contains, specifically covering the SAML2.0 and OpenID Connect protocols, although keystone can work with other protocols. A similar concept is :doc:`external authentication ` whereby keystone is still the source of truth about its users but authentication is handled externally. Yet another closely related topic is :doc:`tokenless authentication ` which uses some of the same constructs as described here but allows services to validate users without using keystone tokens. -------- Glossary -------- **Service Provider (SP)** A Service Provider is the service providing the resource an end-user is requesting. In our case, this is keystone, which provides keystone tokens that we use on other OpenStack services. We do NOT call the other OpenStack services "service providers". The specific service we care about in this context is the token service, so that is our Service Provider. **Identity Provider (IdP)** An Identity Provider is the service that accepts credentials, validates them, and generates a yay/nay response. It returns this response along with some other attributes about the user, such as their username, their display name, and whatever other details it stores and you've configured your Service Provider to accept. **Entity ID or Remote ID** An Entity ID or a Remote ID are both names for a unique identifier string for either a Service Provider or an Identity Provider. It usually takes the form of a URN, but the URN does not need to be a resolvable URL. Remote IDs are globally unique. Two Identity Providers cannot be associated with the same remote ID. Keystone uses the remote ID retrieved from the HTTPD environment variables to match the incoming request with a trusted Identity Provider and render the appropriate authorization mapping. **SAML2.0** `SAML2.0`_ is an XML-based federation protocol. It is commonly used in internal-facing organizations, such as a university or business in which IT services are provided to members of the organization. **OpenID Connect (OpenIDC)** `OpenID Connect`_ is a JSON-based federation protocol built on OAuth 2.0. It's used more often by public-facing services like Google. **Assertion** An assertion is a formatted statement from the Identity Provider that asserts that a user is authenticated and provides some attributes about the user. The Identity Provider always signs the assertion and typically encrypts it as well. **Single Sign-On (SSO)** `Single Sign-On`_ is a mechanism related to identity federation whereby a user may log in to their identity management system and be granted a token or ticket that allows them access to multiple Service Providers. .. _SAML2.0: http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html .. _OpenID Connect: https://openid.net/connect/ .. _Single Sign-On: https://en.wikipedia.org/wiki/Single_sign-on -------------------- Authentication Flows -------------------- Understanding the flow of information as a user moves through the authentication process is key to being able to debug later on. Normal keystone --------------- .. seqdiag:: :name: normal-keystone :alt: Diagram of keystone's normal auth flow, in which a user agent authenticates and authorizes themself with keystone and obtains a scoped token to pass to an OpenStack service. seqdiag { default_fontsize = 13; useragent [label = "User Agent"]; keystone [label = "Keystone"]; openstack [label = "OpenStack"]; useragent -> keystone [label = "GET /v3/auth/tokens"]; keystone -> keystone [label = "Authenticate"]; keystone -> keystone [label = "Authorize"]; useragent <- keystone [label = "Scoped token"]; useragent -> openstack [label = "GET /v2.1/servers"]; } In a normal keystone flow, the user requests a scoped token directly from keystone. Keystone accepts their credentials and checks them against its local storage or against its LDAP backend. Then it checks the scope that the user is requesting, ensuring they have the correct role assignments, and produces a scoped token. The user can use the scoped token to do something else in OpenStack, like request servers, but everything that happens after the token is produced is irrelevant to this discussion. SAML2.0 ------- SAML2.0 WebSSO ~~~~~~~~~~~~~~ .. seqdiag:: :name: saml2-websso :alt: Diagram of a standard WebSSO authentication flow. seqdiag { edge_length = 325; default_fontsize = 13; useragent [label = "User Agent"]; sp [label = "Service Provider"]; idp [label = "Identity Provider"]; useragent -> sp [label = "GET /secure"]; useragent <- sp [label = "HTTP 302 Location: https://idp/auth? SAMLRequest=req"]; useragent -> idp [label = "GET /auth?SAMLRequest=req"]; idp -> idp [label = "Authenticate"]; useragent <- idp [label = "HTTP 200 SAMLResponse in HTML form"]; useragent -> sp [label = "POST /assertionconsumerservice"]; sp -> sp [label = "Validate"]; useragent <- sp [label = "HTTP 302; Location: /secure"]; useragent -> sp [label = "GET /secure"]; } This diagram shows a standard `WebSSO`_ authentication flow, not one involving keystone. WebSSO is one of a few `SAML2.0 profiles`_. It is based on the idea that a web browser will be acting as an intermediary and so the flow involves concepts that a browser can understand and act on, like HTTP redirects and HTML forms. First, the user uses their web browser to request some secure resource from the Service Provider. The Service Provider detects that the user isn't authenticated yet, so it generates a SAML Request which it base64 encodes, and then issues an HTTP redirect to the Identity Provider. The browser follows the redirect and presents the SAML Request to the Identity Provider. The user is prompted to authenticate, probably by filling out a username and password in a login page. The Identity Provider responds with an HTTP success and generates a SAML Response with an HTML form. The browser automatically POSTs the form back to the Service Provider, which validates the SAML Response. The Service Provider finally issues another redirect back to the original resource the user had requested. .. _WebSSO: http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0-cd-02.html#5.1.Web%20Browser%20SSO%20Profile|outline .. _SAML2.0 profiles: http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0-cd-02.html#5.Major%20Profiles%20and%20Federation%20Use%20Cases|outline SAML2.0 ECP ~~~~~~~~~~~ .. seqdiag:: :name: saml2-ecp :alt: Diagram of a standard ECP authentication flow. seqdiag { default_fontsize = 13; useragent [label = "User Agent"]; sp [label = "Service Provider"]; idp [label = "Identity Provider"]; useragent -> sp [label = "GET /secure"]; useragent <- sp [label = "HTTP 200 SAML Request"]; useragent -> idp [label = "POST /auth SAML Request"]; idp -> idp [label = "Authenticate"]; useragent <- idp [label = "HTTP 200 SAMLResponse in SOAP"]; useragent -> sp [label = "POST /responseconsumer"]; sp -> sp [label = "Validate"]; useragent <- sp [label = "HTTP 200 /secure"]; } `ECP`_ is another SAML profile. Generally the flow is similar to the WebSSO flow, but it is designed for a client that natively understands SAML, for example the `keystoneauth`_ library (and therefore also the `python-openstackclient `__ CLI tool). ECP is slightly different from the browser-based flow and is not supported by all SAML2.0 IdPs, and so getting WebSSO working does not necessarily mean ECP is working correctly, or vice versa. ECP support must often be turned on explicitly in the Identity Provider. .. _ECP: http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0-cd-02.html#5.2.ECP%20Profile|outline .. _keystoneauth: https://docs.openstack.org/keystoneauth/latest/ WebSSO with keystone and horizon ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. seqdiag:: :name: saml2-keystone-horizon :alt: Diagram of the SAML2.0 WebSSO auth flow specific to horizon, keystone, and the HTTPD module acting as service provider. seqdiag { default_fontsize = 13; useragent [label = "User Agent"]; horizon [label = "Horizon"]; httpd [label = "HTTPD", color = "lightgrey"]; keystone [label = "Keystone", color = "lightgrey"]; idp [label = "Identity Provider"]; useragent -> horizon [label = "POST /auth/login"]; useragent <- horizon [label = "HTTP 302 Location: /v3/auth/OS-FEDERATION /websso/saml2"]; useragent -> httpd [label = "GET /v3/auth/OS-FEDERATION/websso/saml2"]; useragent <- httpd [label = "HTTP 302 Location: https://idp/auth?SAMLRequest=req"]; useragent -> idp [label = "GET /auth"]; idp -> idp [label = "Authenticate"]; useragent <- idp [label = "HTTP 200 SAMLResponse in HTML form"]; useragent -> httpd [label = "POST /assertionconsumerservice"]; httpd -> httpd [label = "Validate"]; useragent <- httpd [label = "HTTP 302 Location: /v3/auth/OS-FEDERATION/websso/saml2"]; useragent -> keystone [label = "GET /v3/auth/OS-FEDERATION/websso/saml2"]; keystone -> keystone [label = "Issue token"]; useragent <- keystone [label = "HTTP 200 HTML form containing unscoped token"]; useragent -> horizon [label = "POST /auth/websso"]; useragent <- horizon [label = "successful login"]; } Keystone is not a web front-end, which means horizon needs to handle some parts of being a Service Provider to implement WebSSO. In the diagram above, horizon is added, and keystone and HTTPD are split out from each other to distinguish which parts each are responsible for, though typically both together are referred to as the Service Provider. In this model, the user requests to log in to horizon by selecting a federated authentication method from a dropdown menu. Horizon automatically generates a keystone URL based on the Identity Provider and protocol selected and redirects the browser to keystone. That location is equivalent to the /secure resource in the `SAML2.0 WebSSO`_ diagram. The browser follows the redirect, and the HTTPD module detects that the user isn't logged in yet and issues another redirect to the Identity Provider with a SAML Request. At this point, the flow is the same as in the normal WebSSO model. The user logs into the Identity Provider, a SAML Response is POSTed back to the Service Provider, where the HTTPD module validates the response and issues a redirect back to the location that horizon had originally requested, which is a special federation auth endpoint. At this point keystone is able to grant an unscoped token, which it hands off as another HTML form. The browser will POST that back to horizon, which triggers the normal login process, picking a project to scope to and getting a scoped token from keystone. Note that horizon is acting as a middleman, since it knows the endpoint of the secure resource it requests from keystone. Keystone to Keystone ~~~~~~~~~~~~~~~~~~~~ .. seqdiag:: :name: keystone-to-keystone :alt: Diagram of the IdP-initiated auth flow in a keystone-to-keystone model. seqdiag { edge_length = 240; default_fontsize = 13; useragent [label = "User Agent"]; sp [label = "Service Provider"]; idp [label = "Identity Provider"]; useragent -> idp [label = "POST /v3/auth/tokens"]; idp -> idp [label = "Authenticate"]; useragent <- idp [label = "HTTP 201 X-Subject-Token: token"]; useragent -> idp [label = "POST /v3/auth/OS-FEDERATION/saml2/ecp"]; useragent <- idp [label = "HTTP 201 SAMLResponse in SOAP envelope"]; useragent -> sp [label = "POST /PAOS-url"]; sp -> sp [label = "Validate"]; useragent <- sp [label = "HTTP 302"]; useragent -> sp [label = "GET /v3/OS-FED/.../auth"]; useragent <- sp [label = "HTTP 201 X-Subject-Token: unscoped token"]; useragent -> sp [label = "POST /v3/auth/tokens (request scoped token)"]; } When keystone is used as an Identity Provider in a Keystone to Keystone configuration, the auth flow is nonstandard. It is similar to an `IdP-initiated auth flow`_. In this case, the user goes directly to the Identity Provider first before requesting any resource from the Service Provider. The user will get a token from keystone, then use that to request a SAML Response via ECP. When it gets that response back, it POSTs that to the Service Provider, which will grant a token for it. Notice that the Service Provider has to accept data from the Identity Provider and therefore needs to have a way of trusting it. The Identity Provider, on the other hand, never has to accept data from the Service Provider. There is no back and forth, the user simply completes the auth process on one side and presents the result to the other side. .. _IdP-initiated auth flow: http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0-cd-02.html#5.1.4.IdP-Initiated%20SSO:%20%20POST%20Binding|outline OpenID Connect -------------- OpenID Connect Authentication Flow ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. seqdiag:: :name: openidc :alt: Diagram of a standard OpenID Connect authentication flow :align: left seqdiag { edge_length = 330; default_fontsize = 13; useragent [label = "User Agent"]; sp [label = "Service Provider"]; idp [label = "Identity Provider"]; useragent -> sp [label = "GET /secure"]; useragent <- sp [label = "HTTP 302 Location: https://idp/auth? client_id=XXX&redirect_uri=https://sp/secure"]; useragent -> idp [label = "GET /auth?client_id=XXX&redirect_uri=https://sp/secure"]; idp -> idp [label = "Authenticate"]; useragent <- idp [label = "HTTP 302 Location: https://sp/auth?code=XXX"]; useragent -> sp [label = "GET /auth?code=XXX"]; sp -> idp [label = "POST https://idp/token code=XXX&redirect_uri=https://sp/secure"]; sp <- idp [label = "HTTP 200 {\"access_code\": \"XXX\", \"id_token\": \"XXX\"}"]; useragent <- sp [label = "HTTP 302; Location: /secure"]; useragent -> sp [label = "GET /secure"]; } OpenID Connect is different from any SAML2.0 flow because the negotiation is not handled entirely through the client. The Service Provider must make a request directly to the Identity Provider, which means this flow would not be appropriate if the Service Provider and Identity Provider are in segregated networks. When the user requests a secure resource from the Service Provider, they are redirected to the Identity Provider to log in. The Identity Provider then redirects the user back to the Service Provider using a known redirect URI and providing an authorization code. The Service Provider must then make a back-channel request directly to the Identity Provider using the provided code, and exchange it for an ID token. OpenID Connect with keystone and horizon ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. seqdiag:: :name: oidc-keystone-horizon :alt: Diagram of the OpenID Connect WebSSO auth flow specific to horizon, keystone, and the HTTPD module acting as service provider. seqdiag { edge_length = 200 default_fontsize = 13; useragent [label = "User Agent"]; horizon [label = "Horizon"]; httpd [label = "HTTPD", color = "lightgrey"]; keystone [label = "Keystone", color = "lightgrey"]; idp [label = "Identity Provider"]; useragent -> horizon [label = "POST /auth/login"]; useragent <- horizon [label = "HTTP 302 Location: /v3/auth/OS-FEDERATION /websso/openid"]; useragent -> httpd [label = "GET /v3/auth/OS-FEDERATION/websso/openid"]; useragent <- httpd [label = "HTTP 302 Location: https://idp/auth? client_id=XXX& redirect_uri=https://sp/v3/auth/OS-FEDERATION/websso"]; useragent -> idp [label = "GET /auth?client_id=XXX& redirect_uri=https://sp/v3/auth/OS-FEDERATION/websso"]; idp -> idp [label = "Authenticate"]; useragent <- idp [label = "HTTP 302 Location: https://sp/v3/auth/OS-FEDERATION/websso"]; useragent -> httpd [label = "GET /v3/auth/OS-FEDERATION/websso"]; httpd -> idp [label = "POST https://idp/token code=XXX& redirect_uri=https://sp/v3/auth/OS-FEDERATION/websso"]; httpd <- idp [label = "HTTP 200 {\"access_code\": \"XXX\", \"id_token\": \"XXX\"}"]; useragent <- httpd [label = "HTTP 302 Location: /v3/auth/OS-FEDERATION/websso/mapped"]; useragent -> keystone [label = "GET /v3/auth/OS-FEDERATION/websso/mapped"]; keystone -> keystone [label = "Issue token"]; useragent <- keystone [label = "HTTP 200 HTML form containing unscoped token"]; useragent -> horizon [label = "POST /auth/websso"]; useragent <- horizon [label = "successful login"]; } From horizon and keystone's point of view, the authentication flow is the same for OpenID Connect as it is for SAML2.0. It is only the HTTPD OpenIDC module that must handle the flow in accordance with the spec. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/federation/mapping_combinations.rst0000664000175000017500000007610100000000000025677 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Mapping Combinations ==================== ----------- Description ----------- During the authentication process an identity provider (IdP) will present keystone with a set of user attributes about the user that is authenticating. For example, in the SAML2 flow this comes to keystone in the form of a SAML document. The attributes are typically processed by third-party software and are presented to keystone as environment variables. The original document from the IdP is generally not available to keystone. This is how the `Shibboleth` and `Mellon` implementations work. The mapping format described in this document maps these environment variables to a local keystone user. The mapping may also define group membership for that user and projects the user can access. An IdP has exactly one mapping specified per protocol. Mappings themselves can be used multiple times by different combinations of IdP and protocol. ----------- Definitions ----------- A mapping looks as follows: .. code-block:: none { "rules": [ { "local": [ { [] [] } ], "remote": [ { [] } ] } ] } * `mapping`: a JSON object containing a list of rules. * `rules`: a property in the mapping that contains the list of rules. * `rule`: a JSON object containing `local` and `remote` properties to define the rule. There is no explicit `rule` property. * `local`: a JSON object containing information on what local attributes will be mapped. The mapping engine processes this using the `context` (defined below) and the result is a representation of the user from keystone's perspective. * ``: the local user that will be mapped to the federated user. * ``: (optional) the local groups the federated user will be placed in. * ``: (optional) the local projects mapped to the federated user. * ``: (optional) the local domain mapped to the federated user, projects, and groups. Projects and groups can also override this default domain by defining a domain of their own. Moreover, if no domain is defined in this configuration, the attribute mapping schema will use the identity provider OpenStack domain. * `remote`: a JSON object containing information on what remote attributes will be mapped. * ``: a JSON object that tells the mapping engine what federated attribute to make available for substitution in the local object. There can be one or more of these objects in the `remote` list. * ``: a JSON object containing conditions that allow a rule. There can be zero or more of these objects in the `remote` list. * `direct mapping`: the mapping engine keeps track of each match and makes them available to the local rule for substitution. * `assertion`: data provided to keystone by the IdP to assert facts (name, groups, etc) about the authenticating user. This is an XML document when using the SAML2 protocol. * `mapping context`: the data, represented as key-value pairs, that is used by the mapping engine to turn the `local` object into a representation of the user from keystone's perspective. The mapping context contains the environment of the keystone process and any `direct mapping` values calculated when processing the `remote` list. -------------------------- How Mappings Are Processed -------------------------- A mapping is selected by IdP and protocol. Then keystone takes the mapping and processes each rule sequentially stopping after the first matched rule. A rule is matched when all of its conditions are met. First keystone evaluates each condition from the rule's remote property to see if the rule is a match. If it is a match, keystone saves the data captured by each of the matches from the rule's remote property in an ordered list. We call these matches `direct mappings` since they can be used in the next step. After the rule is found using the rule's conditions and a list of direct mappings is stored, keystone begins processing the rule's `local` property. Each object in the `local` property is collapsed into a single JSON object. For example: .. code-block:: none { "local": [ { "user": {...} }, { "projects": [...] }, ] } becomes: .. code-block:: none { "local": { "user": {...} "projects": [...] }, } when the same property exists in the local multiple times the first occurrence wins: .. code-block:: none { "local": [ { "user": {#first#} }, { "projects": [...] }, { "user": {#second#} }, ] } becomes: .. code-block:: none { "local": { "user": {#first#} "projects": [...] }, } We take this JSON object and then recursively process it in order to apply the direct mappings. This is simply looking for the pattern `{#}` and substituting it with values from the direct mappings list. The index of the direct mapping starts at zero. ------------- Mapping Rules ------------- Mapping Engine -------------- The mapping engine can be tested before creating a federated setup. It can be tested with the ``keystone-manage mapping_engine`` command: .. code-block:: console $ keystone-manage mapping_engine --rules --input .. NOTE:: Although the rules file is formatted as JSON, the input file of assertion data is formatted as individual lines of key: value pairs, see `keystone-manage mapping_engine --help` for details. Mapping Conditions ------------------ Mappings support 5 different types of conditions: ``empty``: The rule is matched to all claims containing the remote attribute type. This condition does not need to be specified. ``any_one_of``: The rule is matched only if any of the specified strings appear in the remote attribute type. Condition result is boolean, not the argument that is passed as input. ``not_any_of``: The rule is not matched if any of the specified strings appear in the remote attribute type. Condition result is boolean, not the argument that is passed as input. ``blacklist``: This rule removes all groups matched from the assertion. It is not intended to be used as a way to prevent users, or groups of users, from accessing the service provider. The output from filtering through a blacklist will be all groups from the assertion that were not listed in the blacklist. ``whitelist``: This rule explicitly states which groups should be carried over from the assertion. The result is the groups present in the assertion and in the whitelist. .. NOTE:: ``empty``, ``blacklist`` and ``whitelist`` are the only conditions that can be used in direct mapping ({0}, {1}, etc.) Multiple conditions can be combined to create a single rule. Mappings Examples ----------------- The following are all examples of mapping rule types. empty condition ~~~~~~~~~~~~~~~ .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0} {1}", "email": "{2}" }, "group": { "name": "{3}", "domain": { "id": "0cd5e9" } } } ], "remote": [ { "type": "FirstName" }, { "type": "LastName" }, { "type": "Email" }, { "type": "OIDC_GROUPS" } ] } ] } .. NOTE:: The numbers in braces {} are indices, they map in order. For example:: - Mapping to user with the name matching the value in remote attribute FirstName - Mapping to user with the name matching the value in remote attribute LastName - Mapping to user with the email matching value in remote attribute Email - Mapping to a group(s) with the name matching the value(s) in remote attribute OIDC_GROUPS .. NOTE:: If the user id and name are not specified in the mapping, the server tries to directly map ``REMOTE_USER`` environment variable. If this variable is also unavailable the server returns an HTTP 401 Unauthorized error. Groups can have multiple values. Each value must be separated by a `;` Example: OIDC_GROUPS=developers;testers other conditions ~~~~~~~~~~~~~~~~ In ```` shown below, please supply one of the following: ``any_one_of``, or ``not_any_of``. .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "HTTP_OIDC_GROUPIDS", "": [ "HTTP_OIDC_EMAIL" ] } ] } ] } In ```` shown below, please supply one of the following: ``blacklist``, or ``whitelist``. .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "groups": "{1}", "domain": { "id": "0cd5e9" } } ], "remote": [ { "type": "UserName" }, { "type": "HTTP_OIDC_GROUPIDS", "": [ "me@example.com" ] } ] } ] } In the above example, a whitelist can be used to only map the user into a few of the groups in their ``HTTP_OIDC_GROUPIDS`` remote attribute: .. code-block:: json { "type": "HTTP_OIDC_GROUPIDS", "whitelist": [ "Developers", "OpsTeam" ] } A blacklist can map the user into all groups except those matched: .. code-block:: json { "type": "HTTP_OIDC_GROUPIDS", "blacklist": [ "Finance" ] } Regular expressions can be used in any condition for more flexible matches: .. code-block:: json { "type": "HTTP_OIDC_GROUPIDS", "whitelist": [ ".*Team$" ] } When mapping into groups, either ids or names can be provided in the local section: .. code-block:: json { "local": [ { "group": { "id":"0cd5e9" } } ] } .. code-block:: json { "local": [ { "group": { "name": "developer_group", "domain": { "id": "abc1234" } } } ] } .. code-block:: json { "local": [ { "group": { "name": "developer_group", "domain": { "name": "private_cloud" } } } ] } Users can be mapped to local users that already exist in keystone's identity backend by setting the ``type`` attribute of the user to ``local`` and providing the domain to which the local user belongs: .. code-block:: json { "local": [ { "user": { "name": "local_user", "type": "local", "domain": { "name": "local_domain" } } } ] } The user is then treated as existing in the local identity backend, and the server will attempt to fetch user details (id, name, roles, groups) from the identity backend. The local user and domain are not generated dynamically, so if they do not exist in the local identity backend, authentication attempts will result in a 401 Unauthorized error. If you omit the ``type`` attribute or set it to ``ephemeral`` or do not provide a domain, the user is deemed ephemeral and becomes a member of the identity provider's domain. It will not be looked up in the local keystone backend, so all of its attributes must come from the IdP and the mapping rules. .. NOTE:: Domain ``Federated`` is a service domain - it cannot be listed, displayed, added or deleted. There is no need to perform any operation on it prior to federation configuration. Output ------ If a mapping is valid you will receive the following output: .. code-block:: none { "group_ids": "[]", "user": { "domain": { "id": "Federated" or "" }, "type": "ephemeral" or "local", "name": "", "id": "" }, "group_names": [ { "domain": { "name": "" }, "name": { "name": "[]" } }, { "domain": { "name": "" }, "name": { "name": "[]" } } ] } If the mapped user is local, mapping engine will discard further group assigning and return set of roles configured for the user. Regular Expressions ------------------- Regular expressions can be used in a mapping by specifying the ``regex`` key, and setting it to ``true``. .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "name": "{1}", "domain": { "id": "abc1234" } } }, ], "remote": [ { "type": "UserName" }, { "type": "HTTP_OIDC_GROUPIDS", "any_one_of": [ ".*@yeah.com$" ] "regex": true }, { "type": "HTTP_OIDC_GROUPIDS", "whitelist": [ "Project.*$" ], "regex": true } ] } ] } This allows any user with a claim containing a key with any value in ``HTTP_OIDC_GROUPIDS`` to be mapped to group with id ``0cd5e9``. Additionally, for every value in the ``HTTP_OIDC_GROUPIDS`` claim matching the string ``Project.*``, the user will be assigned to the project with that name. Condition Combinations ---------------------- Combinations of mappings conditions can also be done. ``empty``, ``any_one_of``, and ``not_any_of`` can all be used in the same rule, but cannot be repeated within the same condition. ``any_one_of`` and ``not_any_of`` are mutually exclusive within a condition's scope. So are ``whitelist`` and ``blacklist``. .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "id": "0cd5e9" } }, ], "remote": [ { "type": "UserName" }, { "type": "cn=IBM_Canada_Lab", "not_any_of": [ ".*@naww.com$" ], "regex": true }, { "type": "cn=IBM_USA_Lab", "any_one_of": [ ".*@yeah.com$" ] "regex": true } ] } ] } As before group names and users can also be provided in the local section. This allows any user with the following claim information to be mapped to group with id 0cd5e9. .. code-block:: json {"UserName":"@yeah.com"} {"cn=IBM_USA_Lab":"@yeah.com"} {"cn=IBM_Canada_Lab":"@yeah.com"} The following claims will be mapped: - any claim containing the key UserName. - any claim containing key cn=IBM_Canada_Lab that doesn't have the value @naww.com. - any claim containing key cn=IBM_USA_Lab that has value @yeah.com. Multiple Rules -------------- Multiple rules can also be utilized in a mapping. .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" }, "group": { "name": "non-contractors", "domain": { "id": "abc1234" } } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "not_any_of": [ "Contractor", "SubContractor" ] } ] }, { "local": [ { "user": { "name": "{0}" }, "group": { "name": "contractors", "domain": { "id": "abc1234" } } } ], "remote": [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] } ] } ] } The above assigns groups membership basing on ``orgPersonType`` values: - neither ``Contractor`` nor ``SubContractor`` will belong to the ``non-contractors`` group. - either ``Contractor or ``SubContractor`` will belong to the ``contractors`` group. Rules are additive, so permissions will only be granted for the rules that succeed. All the remote conditions of a rule must be valid. When using multiple rules you can specify more than one effective user identification, but only the first match will be considered and the others ignored ordered from top to bottom. Since rules are additive one can specify one user identification and this will also work. The best practice for multiple rules is to create a rule for just user and another rule for just groups. Below is rules example repeated but with global username mapping. .. code-block:: json { "rules": [{ "local": [{ "user": { "id": "{0}" } }], "remote": [{ "type": "UserType" }] }, { "local": [{ "group": { "name": "non-contractors", "domain": { "id": "abc1234" } } }], "remote": [{ "type": "orgPersonType", "not_any_of": [ "Contractor", "SubContractor" ] }] }, { "local": [{ "group": { "name": "contractors", "domain": { "id": "abc1234" } } }], "remote": [{ "type": "orgPersonType", "any_one_of": [ "Contractor", "SubContractor" ] }] }] } Auto-Provisioning ----------------- The mapping engine has the ability to aid in the auto-provisioning of resources when a federated user authenticates for the first time. This can be achieved using a specific mapping syntax that the mapping engine can parse and ultimately make decisions on. For example, consider the following mapping: .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "projects": [ { "name": "Production", "roles": [ { "name": "reader" } ] }, { "name": "Staging", "roles": [ { "name": "member" } ] }, { "name": "Project for {0}", "roles": [ { "name": "admin" } ] } ] } ], "remote": [ { "type": "UserName" } ] } ] } The semantics of the ``remote`` section have not changed. The difference between this mapping and the other examples is the addition of a ``projects`` section within the ``local`` rules. The ``projects`` list supplies a list of projects that the federated user will be given access to. The projects will be automatically created if they don't exist when the user authenticated and the mapping engine has applied values from the assertion and mapped them into the ``local`` rules. In the above example, an authenticated federated user will be granted the ``reader`` role on the ``Production`` project, ``member`` role on the ``Staging`` project, and they will have ``admin`` role on the ``Project for jsmith``. It is important to note the following constraints apply when auto-provisioning: * Projects are the only resource that will be created dynamically. * Projects will be created within the domain associated with the Identity Provider or the domain mapped via the attribute mapping (`federation_attribute_mapping_schema_version >= 2.0`). * The ``projects`` section of the mapping must also contain a ``roles`` section. + Roles within the project must already exist in the deployment or domain. * Assignments are actually created for the user which is unlike the ephemeral group memberships. Since the creation of roles typically requires policy changes across other services in the deployment, it is expected that roles are created ahead of time. Federated authentication should also be considered idempotent if the attributes from the SAML assertion have not changed. In the example from above, if the user's name is still ``jsmith``, then no new projects will be created as a result of authentication. Mappings can be created that mix ``groups`` and ``projects`` within the ``local`` section. The mapping shown in the example above does not contain a ``groups`` section in the ``local`` rules. This will result in the federated user having direct role assignments on the projects in the ``projects`` list. The following example contains ``local`` rules comprised of both ``projects`` and ``groups``, which allow for direct role assignments and group memberships. .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "projects": [ { "name": "Marketing", "roles": [ { "name": "member" } ] }, { "name": "Development project for {0}", "roles": [ { "name": "admin" } ] } ] }, { "group": { "name": "Finance", "domain": { "id": "6fe767" } } } ], "remote": [ { "type": "UserName" } ] } ] } In the above example, a federated user will receive direct role assignments on the ``Marketing`` project, as well as a dedicated project specific to the federated user's name. In addition to that, they will also be placed in the ``Finance`` group and receive all role assignments that group has on projects and domains. keystone-to-keystone -------------------- keystone-to-keystone federation also utilizes mappings, but has some differences. An attribute file (e.g. ``/etc/shibboleth/attribute-map.xml`` in a Shibboleth implementation) is used to add attributes to the mapping `context`. Attributes look as follows: .. code-block:: xml The service provider must contain a mapping as shown below. ``openstack_user``, and ``openstack_user_domain`` match to the attribute names we have in the Identity Provider. It will map any user with the name ``user1`` or ``admin`` in the ``openstack_user`` attribute and ``openstack_domain`` attribute ``default`` to a group with id ``abc1234``. .. code-block:: json { "rules": [ { "local": [ { "group": { "id": "abc1234" } } ], "remote": [ { "type": "openstack_user", "any_one_of": [ "user1", "admin" ] }, { "type":"openstack_user_domain", "any_one_of": [ "Default" ] } ] } ] } A keystone user's groups can also be mapped to groups in the service provider. For example, with the following attributes declared in Shibboleth's attributes file: .. code-block:: xml Then the following mapping can be used to map the user's group membership from the keystone IdP to groups in the keystone SP: .. code-block:: json { "rules": [ { "local": [ { "user": { "name": "{0}" } }, { "groups": "{1}" } ], "remote": [ { "type": "openstack_user" }, { "type": "openstack_groups" } ] } ] } ``openstack_user``, and ``openstack_groups`` will be matched by service provider to the attribute names we have in the Identity Provider. It will take the ``openstack_user`` attribute and finds in the assertion then inserts it directly in the mapping. The identity provider will set the value of ``openstack_groups`` by group name and domain name to which the user belongs in the Idp. Suppose the user belongs to 'group1' in domain 'Default' in the IdP then it will map to a group with the same name and same domain's name in the SP. The possible attributes that can be used in a mapping are `openstack_user`, `openstack_user_domain`, `openstack_roles`, `openstack_project`, `openstack_project_domain` and `openstack_groups`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/federation/mellon.inc0000664000175000017500000001200600000000000022720 0ustar00zuulzuul00000000000000.. -*- rst -*- .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _mellon: ----------------- Setting Up Mellon ----------------- See :ref:`keystone-as-sp` before proceeding with these Mellon-specific instructions. Configuring Apache HTTPD for mod_auth_mellon -------------------------------------------- .. note:: You are advised to carefully examine the `mod_auth_mellon documentation`_. .. _mod_auth_mellon documentation: https://github.com/Uninett/mod_auth_mellon/blob/master/doc/user_guide/mellon_user_guide.adoc#installing-configuring-mellon Follow the steps outlined at: Keystone install guide for `SUSE`_, `RedHat`_ or `Ubuntu`_. .. _`SUSE`: ../../install/keystone-install-obs.html#configure-the-apache-http-server .. _`RedHat`: ../../install/keystone-install-rdo.html#configure-the-apache-http-server .. _`Ubuntu`: ../../install/keystone-install-ubuntu.html#configure-the-apache-http-server Install the Module ~~~~~~~~~~~~~~~~~~ Install the Apache module package. For example, on Ubuntu: .. code-block:: console # apt-get install libapache2-mod-auth-mellon The package and module name will differ between distributions. Configure mod_auth_mellon ~~~~~~~~~~~~~~~~~~~~~~~~~ Unlike ``mod_shib``, all of ``mod_auth_mellon``'s configuration is done in Apache, not in a separate config file. Set up the shared settings in a single ```` directive near the top in your keystone VirtualHost file, before your protected endpoints: .. code-block:: apache MellonEnable "info" MellonSPPrivateKeyFile /etc/apache2/mellon/sp.keystone.example.org.key MellonSPCertFile /etc/apache2/mellon/sp.keystone.example.org.cert MellonSPMetadataFile /etc/apache2/mellon/sp-metadata.xml MellonIdPMetadataFile /etc/apache2/mellon/idp-metadata.xml MellonEndpointPath /v3/mellon MellonIdP "IDP" Configure Protected Endpoints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure each protected path to use the ``Mellon`` AuthType: .. code-block:: apache Require valid-user AuthType Mellon MellonEnable auth Do the same for the WebSSO auth paths if using horizon as a single sign-on frontend: .. code-block:: apache Require valid-user AuthType Mellon MellonEnable auth Require valid-user AuthType Mellon MellonEnable auth Configure the Mellon Service Provider Metadata ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Mellon provides a script called ``mellon_create_metadata.sh``_ which generates the values for the config directives ``MellonSPPrivateKeyFile``, ``MellonSPCertFile``, and ``MellonSPMetadataFile``. Run the script: .. code-block:: console $ ./mellon_create_metadata.sh \ https://sp.keystone.example.org/mellon \ http://sp.keystone.example.org/v3/OS-FEDERATION/identity_providers/samltest/protocols/saml2/auth/mellon The first parameter is used as the entity ID, a URN of your choosing that must uniquely identify the Service Provider to the Identity Provider. The second parameter is the full URL for the endpoint path corresponding to the parameter ``MellonEndpointPath``. After generating the keypair and metadata, copy the files to the locations given by the ``MellonSPPrivateKeyFile`` and ``MellonSPCertFile`` settings in your Apache configuration. Upload the Service Provider's Metadata file which you just generated to your Identity Provider. This is the file used as the value of the `MellonSPMetadataFile` in the config. The IdP may provide a webpage where you can upload the file, or you may be required to submit the file using `wget` or `curl`. Please check your IdP documentation for details. Exchange Metadata ~~~~~~~~~~~~~~~~~ Fetch your Identity Provider's Metadata file and copy it to the path specified by the ``MellonIdPMetadataFile`` setting in your Apache configuration. .. code-block:: console $ wget -O /etc/apache2/mellon/idp-metadata.xml https://samltest.id/saml/idp Remember to reload Apache after finishing configuring Mellon: .. code-block:: console # systemctl reload apache2 .. _`mellon_create_metadata.sh`: https://github.com/UNINETT/mod_auth_mellon/blob/master/mellon_create_metadata.sh Continue configuring keystone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :ref:`Continue configuring keystone ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/federation/openidc.inc0000664000175000017500000002163100000000000023057 0ustar00zuulzuul00000000000000.. -*- rst -*- .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _federation_openidc: ------------------------- Setting Up OpenID Connect ------------------------- See :ref:`keystone-as-sp` before proceeding with these OpenIDC-specific instructions. These examples use Google as an OpenID Connect Identity Provider. The Service Provider must be added to the Identity Provider in the `Google API console`_. .. _Google API console: https://console.developers.google.com/ Configuring Apache HTTPD for mod_auth_openidc --------------------------------------------- .. note:: You are advised to carefully examine the `mod_auth_openidc documentation`_. .. _mod_auth_openidc documentation: https://github.com/zmartzone/mod_auth_openidc#how-to-use-it Install the Module ~~~~~~~~~~~~~~~~~~ Install the Apache module package. For example, on Ubuntu: .. code-block:: console # apt-get install libapache2-mod-auth-openidc The package and module name will differ between distributions. Configure mod_auth_openidc ~~~~~~~~~~~~~~~~~~~~~~~~~~ In the Apache configuration for the keystone VirtualHost, set the following OIDC options: .. code-block:: apache OIDCClaimPrefix "OIDC-" OIDCResponseType "id_token" OIDCScope "openid email profile" OIDCProviderMetadataURL https://accounts.google.com/.well-known/openid-configuration OIDCOAuthVerifyJwksUri https://www.googleapis.com/oauth2/v3/certs OIDCClientID OIDCClientSecret OIDCCryptoPassphrase OIDCRedirectURI https://sp.keystone.example.org/v3/redirect_uri ``OIDCScope`` is the list of attributes that the user will authorize the Identity Provider to send to the Service Provider. ``OIDCClientID`` and ``OIDCClientSecret`` must be generated and obtained from the Identity Provider. ``OIDCProviderMetadataURL`` is a URL from which the Service Provider will fetch the Identity Provider's metadata. ``OIDCOAuthVerifyJwksUri`` is a URL from which the Service Provider will download the public key from the Identity Provider to check if the user's access token is valid or not, this configuration must be used while using the AuthType ``auth-openidc``, when using the AuthType ``openid-connect`` and the OIDCProviderMetadataURL is configured, this property will not be necessary. ``OIDCRedirectURI`` is a vanity URL that must point to a protected path that does not have any content, such as an extension of the protected federated auth path. It should not match any Keystone API endpoints or mod_auth_openidc will handle requests to the endpoint instead of Keystone. This can lead to unusual errors and behaviors from Keystone. .. note:: If using a mod_wsgi version less than 4.3.0, then the `OIDCClaimPrefix` must be specified to have only alphanumerics or a dash ("-"). This is because `mod_wsgi blocks headers that do not fit this criteria`_. .. _mod_wsgi blocks headers that do not fit this criteria: http://modwsgi.readthedocs.org/en/latest/release-notes/version-4.3.0.html#bugs-fixed Configure Protected Endpoints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configure each protected path to use the ``openid-connect`` AuthType: .. code-block:: apache Require valid-user AuthType openid-connect Require valid-user AuthType openid-connect .. note:: To add support to Bearer Access Token authentication flow that is used by applications that do not adopt the browser flow, such the OpenStack CLI, you will need to change the AuthType from ``openid-connect`` to ``auth-openidc``. Do the same for the WebSSO auth paths if using horizon: .. code-block:: apache Require valid-user AuthType openid-connect Require valid-user AuthType openid-connect Remember to reload Apache after altering the VirtualHost: .. code-block:: console # systemctl reload apache2 .. note:: When creating :ref:`mapping rules `, in keystone, note that the 'remote' attributes will be prefixed, with ``HTTP_``, so for instance, if you set ``OIDCClaimPrefix`` to ``OIDC-``, then a typical remote value to check for is: ``HTTP_OIDC_ISS``. Configuring Multiple Identity Providers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To configure multiples Identity Providers in your environment you will need to set your OIDC options like the following options: .. code-block:: apache OIDCClaimPrefix "OIDC-" OIDCResponseType "id_token" OIDCScope "openid email profile" OIDCMetadataDir OIDCCryptoPassphrase OIDCRedirectURI https://sp.keystone.example.org/redirect_uri OIDCOAuthVerifyCertFiles # # # The ``OIDCOAuthVerifyCertFiles`` is a tuple separated with `space` containing the key-id (kid) of the Issuer's public key and a path to the Issuer certificate. The separator ``#`` is used to split the (``kid``) and the public certificate address The metadata folder configured in the option ``OIDCMetadataDir`` must have all your Identity Providers configurations, the name of the files will be the name (with path) of the Issuers like: .. code-block:: - | - accounts.google.com.client | - accounts.google.com.conf | - accounts.google.com.provider | - keycloak.example.org%2Fauth%2Frealms%2Fidp.client | - keycloak.example.org%2Fauth%2Frealms%2Fidp.conf | - keycloak.example.org%2Fauth%2Frealms%2Fidp.provider .. note:: The name of the file must be url-encoded if needed, as the Apache2 mod_auth_openidc will get the raw value from the query parameter ``iss`` from the http request and check if there is a metadata with this name, as the query parameter is url-encoded, so the metadata file name need to be encoded too. For example, if you have an Issuer with ``/`` in the URL, then you need to escape it to ``%2F`` by applying a URL escape in the file name. The content of these files must be a JSON like ``accounts.google.com.client``: .. code-block:: json { "client_id":"", "client_secret":"" } The ``.client`` file handles the SP credentials in the Issuer. ``accounts.google.com.conf``: This file will be a JSON that overrides some of OIDC options. The options that are able to be overridden are listed in the `OpenID Connect Apache2 plugin documentation`_. .. _`OpenID Connect Apache2 plugin documentation`: https://github.com/zmartzone/mod_auth_openidc/wiki/Multiple-Providers#opclient-configuration If you do not want to override the config values, you can leave this file as an empty JSON like ``{}``. ``accounts.google.com.provider``: This file will contain all specifications about the IdentityProvider. To simplify, you can just use the JSON returned in the ``.well-known`` endpoint: .. code-block:: json { "issuer": "https://accounts.google.com", "authorization_endpoint": "https://accounts.google.com/o/oauth2/v2/auth", "token_endpoint": "https://oauth2.googleapis.com/token", "userinfo_endpoint": "https://openidconnect.googleapis.com/v1/userinfo", "revocation_endpoint": "https://oauth2.googleapis.com/revoke", "jwks_uri": "https://www.googleapis.com/oauth2/v3/certs", "response_types_supported": [ "code", "token", "id_token", "code token", "code id_token", "token id_token", "code token id_token", "none" ], "subject_types_supported": [ "public" ], "id_token_signing_alg_values_supported": [ "RS256" ], "scopes_supported": [ "openid", "email", "profile" ], "token_endpoint_auth_methods_supported": [ "client_secret_post", "client_secret_basic" ], "claims_supported": [ "aud", "email", "email_verified", "exp", "family_name", "given_name", "iat", "iss", "locale", "name", "picture", "sub" ], "code_challenge_methods_supported": [ "plain", "S256" ] } Continue configuring keystone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :ref:`Continue configuring keystone ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/federation/shibboleth.inc0000664000175000017500000002103200000000000023554 0ustar00zuulzuul00000000000000.. -*- rst -*- .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _shibboleth: --------------------- Setting up Shibboleth --------------------- See :ref:`keystone-as-sp` before proceeding with these Shibboleth-specific instructions. .. note:: The examples below are for Ubuntu 16.04, for which only version 2 of the Shibboleth Service Provider is available. Version 3 is available for other distributions and the configuration should be identical to version 2. Configuring Apache HTTPD for mod_shib ------------------------------------- .. note:: You are advised to carefully examine the `mod_shib Apache configuration documentation`_. .. _mod_shib Apache configuration documentation: https://wiki.shibboleth.net/confluence/display/SHIB2/NativeSPApacheConfig Configure keystone under Apache, following the steps in the install guide for `SUSE`_, `RedHat`_ or `Ubuntu`_. .. _`SUSE`: ../../install/keystone-install-obs.html#configure-the-apache-http-server .. _`RedHat`: ../../install/keystone-install-rdo.html#configure-the-apache-http-server .. _`Ubuntu`: ../../install/keystone-install-ubuntu.html#configure-the-apache-http-server Install the Module ~~~~~~~~~~~~~~~~~~ Install the Apache module package. For example, on Ubuntu: .. code-block:: console # apt-get install libapache2-mod-shib2 The package and module name will differ between distributions. Configure Protected Endpoints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ In the Apache configuration for the keystone VirtualHost, set an additional ```` which is not part of keystone's API: .. code-block:: apache SetHandler shib If you are using ``mod_proxy``, for example to proxy requests to the ``/identity`` path to keystone's UWSGI service, you must exempt this Shibboleth endpoint from it: .. code-block:: apache Proxypass Shibboleth.sso ! Configure each protected path to use the ``shibboleth`` AuthType: .. code-block:: apache Require valid-user AuthType shibboleth ShibRequestSetting requireSession 1 ShibExportAssertion off ShibRequireSession On ShibRequireAll On Do the same for the WebSSO auth paths if using horizon as a single sign-on frontend: .. code-block:: apache Require valid-user AuthType shibboleth ShibRequestSetting requireSession 1 ShibExportAssertion off ShibRequireSession On ShibRequireAll On Require valid-user AuthType shibboleth ShibRequestSetting requireSession 1 ShibExportAssertion off ShibRequireSession On ShibRequireAll On Remember to reload Apache after altering the VirtualHost: .. code-block:: console # systemctl reload apache2 Configuring mod_shib -------------------- .. note:: You are advised to examine `Shibboleth Service Provider Configuration documentation `_ Generate a keypair ~~~~~~~~~~~~~~~~~~ For all SAML Service Providers, a PKI key pair must be generated and exchanged with the Identity Provider. The ``mod_shib`` package on the Ubuntu distribution provides a utility to generate the key pair: .. code-block:: console # shib-keygen -y which will generate a key pair under ``/etc/shibboleth``. In other cases, the package might generate the key pair automatically upon installation. Configure metadata ~~~~~~~~~~~~~~~~~~ ``mod_shib`` also has its own configuration file at ``/etc/shibboleth/shibboleth2.xml`` that must be altered, as well as its own daemon. First, give the Service Provider an entity ID. This is a URN that you choose that must be globally unique to the Identity Provider: .. code-block:: xml Depending on your Identity Provider, you may also want to change the REMOTE_USER setting, more on that in a moment. Set the entity ID of the Identity Provider (this is the same as the value you provided for ``--remote-id`` in `Identity Provider`): .. code-block:: xml Additionally, if you want to enable ECP (required for Keystone-to-Keystone), the SSO tag for this entity must also have the ECP flag set: .. code-block:: xml Tell Shibboleth where to find the metadata of the Identity Provider. You could either tell it to fetch it from a URI or point it to a local file. For example, pointing to a local file: .. code-block:: xml or pointing to a remote location: .. code-block:: xml When you are finished configuring ``shibboleth2.xml``, restart the ``shibd`` daemon: .. code-block:: console # systemctl restart shibd Check the ``shibd`` logs in ``/var/log/shibboleth/shibd.log`` and ``/var/log/shibboleth/shibd_warn.log`` for errors or warnings. Configure allowed attributes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: For more information see the `attributes documentation `_ By default, ``mod_shib`` does not pass all attributes received from the Identity Provider to keystone. If your Identity Provider does not use attributes known to ``shibd``, you must configure them. For example, `samltest.id` uses a custom UID attribute. It is not discoverable in the Identity Provider metadata, but the attribute name and type is logged in the ``mod_shib`` logs when an authentication attempt is made. To allow the attribute, add it to ``/etc/shibboleth/attribute-map.xml``: .. code-block:: xml You may also want to use that attribute as a value for the ``REMOTE_USER`` variable, which will make the ``REMOTE_USER`` variable usable as a parameter to your mapping rules. To do so, add it to ``/etc/shibboleth/shibboleth2.xml``: .. code-block:: xml Similarly, if using keystone as your Identity Provider, several custom attributes will be needed in ``/etc/shibboleth/attribute-map.xml``: .. code-block:: xml And update the ``REMOTE_USER`` variable in ``/etc/shibboleth/shibboleth2.xml`` if desired: .. code-block:: xml Restart the ``shibd`` daemon after making these changes: .. code-block:: console # systemctl restart shibd Exchange Metadata ~~~~~~~~~~~~~~~~~ Once configured, the Service Provider metadata is available to download: .. code-block:: console # wget https://sp.keystone.example.org/Shibboleth.sso/Metadata Upload your Service Provider's metadata to your Identity Provider. This step depends on your Identity Provider choice and is not covered here. If keystone is your Identity Provider you do not need to upload this file. Continue configuring keystone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :ref:`Continue configuring keystone ` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/fernet-token-faq.rst0000664000175000017500000004422300000000000022525 0ustar00zuulzuul00000000000000=================================== Fernet - Frequently Asked Questions =================================== The following questions have been asked periodically since the initial release of the fernet token format in Kilo. What is a fernet token? ~~~~~~~~~~~~~~~~~~~~~~~ A fernet token is a bearer token that represents user authentication. Fernet tokens contain a limited amount of identity and authorization data in a `MessagePacked `_ payload. The payload is then wrapped as a `Fernet `_ message for transport, where Fernet provides the required web safe characteristics for use in URLs and headers. The data inside a fernet token is protected using symmetric encryption keys, or fernet keys. What is a fernet key? ~~~~~~~~~~~~~~~~~~~~~ A fernet key is used to encrypt and decrypt fernet tokens. Each key is actually composed of two smaller keys: a 128-bit AES encryption key and a 128-bit SHA256 HMAC signing key. The keys are held in a key repository that keystone passes to a library that handles the encryption and decryption of tokens. What are the different types of keys? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A key repository is required by keystone in order to create fernet tokens. These keys are used to encrypt and decrypt the information that makes up the payload of the token. Each key in the repository can have one of three states. The state of the key determines how keystone uses a key with fernet tokens. The different types are as follows: Primary key: There is only ever one primary key in a key repository. The primary key is allowed to encrypt and decrypt tokens. This key is always named as the highest index in the repository. Secondary key: A secondary key was at one point a primary key, but has been demoted in place of another primary key. It is only allowed to decrypt tokens. Since it was the primary at some point in time, its existence in the key repository is justified. Keystone needs to be able to decrypt tokens that were created with old primary keys. Staged key: The staged key is a special key that shares some similarities with secondary keys. There can only ever be one staged key in a repository and it must exist. Just like secondary keys, staged keys have the ability to decrypt tokens. Unlike secondary keys, staged keys have never been a primary key. In fact, they are opposites since the staged key will always be the next primary key. This helps clarify the name because they are the next key staged to be the primary key. This key is always named as ``0`` in the key repository. So, how does a staged key help me and why do I care about it? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The fernet keys have a natural lifecycle. Each key starts as a staged key, is promoted to be the primary key, and then demoted to be a secondary key. New tokens can only be encrypted with a primary key. Secondary and staged keys are never used to encrypt token. The staged key is a special key given the order of events and the attributes of each type of key. The staged key is the only key in the repository that has not had a chance to encrypt any tokens yet, but it is still allowed to decrypt tokens. As an operator, this gives you the chance to perform a key rotation on one keystone node, and distribute the new key set over a span of time. This does not require the distribution to take place in an ultra short period of time. Tokens encrypted with a primary key can be decrypted, and validated, on other nodes where that key is still staged. Where do I put my key repository? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The key repository is specified using the ``key_repository`` option in the keystone configuration file. The keystone process should be able to read and write to this location but it should be kept secret otherwise. Currently, keystone only supports file-backed key repositories. .. code-block:: ini [fernet_tokens] key_repository = /etc/keystone/fernet-keys/ What is the recommended way to rotate and distribute keys? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The :command:`keystone-manage` command line utility includes a key rotation mechanism. This mechanism will initialize and rotate keys but does not make an effort to distribute keys across keystone nodes. The distribution of keys across a keystone deployment is best handled through configuration management tooling, however ensure that the new primary key is distributed first. Use :command:`keystone-manage fernet_rotate` to rotate the key repository. Do fernet tokens still expire? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Yes, fernet tokens can expire just like any other keystone token formats. Why should I choose fernet tokens over UUID tokens? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Even though fernet tokens operate very similarly to UUID tokens, they do not require persistence or leverage the configured token persistence driver in any way. The keystone token database no longer suffers bloat as a side effect of authentication. Pruning expired tokens from the token database is no longer required when using fernet tokens. Because fernet tokens do not require persistence, they do not have to be replicated. As long as each keystone node shares the same key repository, fernet tokens can be created and validated instantly across nodes. Why should I choose fernet tokens over PKI or PKIZ tokens? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The arguments for using fernet over PKI and PKIZ remain the same as UUID, in addition to the fact that fernet tokens are much smaller than PKI and PKIZ tokens. PKI and PKIZ tokens still require persistent storage and can sometimes cause issues due to their size. This issue is mitigated when switching to fernet because fernet tokens are kept under a 250 byte limit. PKI and PKIZ tokens typically exceed 1600 bytes in length. The length of a PKI or PKIZ token is dependent on the size of the deployment. Bigger service catalogs will result in longer token lengths. This pattern does not exist with fernet tokens because the contents of the encrypted payload is kept to a minimum. Should I rotate and distribute keys from the same keystone node every rotation? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ No, but the relationship between rotation and distribution should be lock-step. Once you rotate keys on one keystone node, the key repository from that node should be distributed to the rest of the cluster. Once you confirm that each node has the same key repository state, you could rotate and distribute from any other node in the cluster. If the rotation and distribution are not lock-step, a single keystone node in the deployment will create tokens with a primary key that no other node has as a staged key. This will cause tokens generated from one keystone node to fail validation on other keystone nodes. How do I add new keystone nodes to a deployment? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The keys used to create fernet tokens should be treated like super secret configuration files, similar to an SSL secret key. Before a node is allowed to join an existing cluster, issuing and validating tokens, it should have the same key repository as the rest of the nodes in the cluster. How should I approach key distribution? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Remember that key distribution is only required in multi-node keystone deployments. If you only have one keystone node serving requests in your deployment, key distribution is unnecessary. Key distribution is a problem best approached from the deployment's current configuration management system. Since not all deployments use the same configuration management systems, it makes sense to explore options around what is already available for managing keys, while keeping the secrecy of the keys in mind. Many configuration management tools can leverage something like ``rsync`` to manage key distribution. Key rotation is a single operation that promotes the current staged key to primary, creates a new staged key, and prunes old secondary keys. It is easiest to do this on a single node and verify the rotation took place properly before distributing the key repository to the rest of the cluster. The concept behind the staged key breaks the expectation that key rotation and key distribution have to be done in a single step. With the staged key, we have time to inspect the new key repository before syncing state with the rest of the cluster. Key distribution should be an operation that can run in succession until it succeeds. The following might help illustrate the isolation between key rotation and key distribution. #. Ensure all keystone nodes in the deployment have the same key repository. #. Pick a keystone node in the cluster to rotate from. #. Rotate keys. #. Was it successful? #. If no, investigate issues with the particular keystone node you rotated keys on. Fernet keys are small and the operation for rotation is trivial. There should not be much room for error in key rotation. It is possible that the user does not have the ability to write new keys to the key repository. Log output from ``keystone-manage fernet_rotate`` should give more information into specific failures. #. If yes, you should see a new staged key. The old staged key should be the new primary. Depending on the ``max_active_keys`` limit you might have secondary keys that were pruned. At this point, the node that you rotated on will be creating fernet tokens with a primary key that all other nodes should have as the staged key. This is why we checked the state of all key repositories in Step one. All other nodes in the cluster should be able to decrypt tokens created with the new primary key. At this point, we are ready to distribute the new key set. #. Distribute the new key repository. #. Was it successful? #. If yes, you should be able to confirm that all nodes in the cluster have the same key repository that was introduced in Step 3. All nodes in the cluster will be creating tokens with the primary key that was promoted in Step 3. No further action is required until the next schedule key rotation. #. If no, try distributing again. Remember that we already rotated the repository and performing another rotation at this point will result in tokens that cannot be validated across certain hosts. Specifically, the hosts that did not get the latest key set. You should be able to distribute keys until it is successful. If certain nodes have issues syncing, it could be permission or network issues and those should be resolved before subsequent rotations. How long should I keep my keys around? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The fernet tokens that keystone creates are only secure as the keys creating them. With staged keys the penalty of key rotation is low, allowing you to err on the side of security and rotate weekly, daily, or even hourly. Ultimately, this should be less time than it takes an attacker to break a ``AES256`` key and a ``SHA256 HMAC``. Is a fernet token still a bearer token? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Yes, and they follow exactly the same validation path as UUID tokens, with the exception of being written to, and read from, a back end. If someone compromises your fernet token, they have the power to do all the operations you are allowed to do. What if I need to revoke all my tokens? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To invalidate every token issued from keystone and start fresh, remove the current key repository, create a new key set, and redistribute it to all nodes in the cluster. This will render every token issued from keystone as invalid regardless if the token has actually expired. When a client goes to re-authenticate, the new token will have been created with a new fernet key. What can an attacker do if they compromise a fernet key in my deployment? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If any key used in the key repository is compromised, an attacker will be able to build their own tokens. If they know the ID of an administrator on a project, they could generate administrator tokens for the project. They will be able to generate their own tokens until the compromised key has been removed from the repository. I rotated keys and now tokens are invalidating early, what did I do? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Using fernet tokens requires some awareness around token expiration and the key lifecycle. You do not want to rotate so often that secondary keys are removed that might still be needed to decrypt unexpired tokens. If this happens, you will not be able to decrypt the token because the key the was used to encrypt it is now gone. Only remove keys that you know are not being used to encrypt or decrypt tokens. For example, your token is valid for 24 hours and we want to rotate keys every six hours. We will need to make sure tokens that were created at 08:00 AM on Monday are still valid at 07:00 AM on Tuesday, assuming they were not prematurely revoked. To accomplish this, we will want to make sure we set ``max_active_keys=6`` in our keystone configuration file. This will allow us to hold all keys that might still be required to validate a previous token, but keeps the key repository limited to only the keys that are needed. The number of ``max_active_keys`` for a deployment can be determined by dividing the token lifetime, in hours, by the frequency of rotation in hours and adding two. Better illustrated as:: token_expiration = 24 rotation_frequency = 6 max_active_keys = (token_expiration / rotation_frequency) + 2 The reason for adding two additional keys to the count is to include the staged key and a buffer key. .. note:: If validating expired tokens is needed (for example when services are configured to use ServiceToken auth), the value of ``allow_expired_window`` option from the ``[token]`` config section should also be taken into account, so that the formula to calculate the max_active_keys is max_active_keys = ((token_expiration + allow_expired_window) / rotation_frequency) + 2 This can be shown based on the previous example. We initially setup the key repository at 6:00 AM on Monday, and the initial state looks like: .. code-block:: console $ ls -la /etc/keystone/fernet-keys/ drwx------ 2 keystone keystone 4096 . drwxr-xr-x 3 keystone keystone 4096 .. -rw------- 1 keystone keystone 44 0 (staged key) -rw------- 1 keystone keystone 44 1 (primary key) All tokens created after 6:00 AM are encrypted with key ``1``. At 12:00 PM we will rotate keys again, resulting in, .. code-block:: console $ ls -la /etc/keystone/fernet-keys/ drwx------ 2 keystone keystone 4096 . drwxr-xr-x 3 keystone keystone 4096 .. -rw------- 1 keystone keystone 44 0 (staged key) -rw------- 1 keystone keystone 44 1 (secondary key) -rw------- 1 keystone keystone 44 2 (primary key) We are still able to validate tokens created between 6:00 - 11:59 AM because the ``1`` key still exists as a secondary key. All tokens issued after 12:00 PM will be encrypted with key ``2``. At 6:00 PM we do our next rotation, resulting in: .. code-block:: console $ ls -la /etc/keystone/fernet-keys/ drwx------ 2 keystone keystone 4096 . drwxr-xr-x 3 keystone keystone 4096 .. -rw------- 1 keystone keystone 44 0 (staged key) -rw------- 1 keystone keystone 44 1 (secondary key) -rw------- 1 keystone keystone 44 2 (secondary key) -rw------- 1 keystone keystone 44 3 (primary key) It is still possible to validate tokens issued from 6:00 AM - 5:59 PM because keys ``1`` and ``2`` exist as secondary keys. Every token issued until 11:59 PM will be encrypted with key ``3``, and at 12:00 AM we do our next rotation: .. code-block:: console $ ls -la /etc/keystone/fernet-keys/ drwx------ 2 keystone keystone 4096 . drwxr-xr-x 3 keystone keystone 4096 .. -rw------- 1 keystone keystone 44 0 (staged key) -rw------- 1 keystone keystone 44 1 (secondary key) -rw------- 1 keystone keystone 44 2 (secondary key) -rw------- 1 keystone keystone 44 3 (secondary key) -rw------- 1 keystone keystone 44 4 (primary key) Just like before, we can still validate tokens issued from 6:00 AM the previous day until 5:59 AM today because keys ``1`` - ``4`` are present. At 6:00 AM, tokens issued from the previous day will start to expire and we do our next scheduled rotation: .. code-block:: console $ ls -la /etc/keystone/fernet-keys/ drwx------ 2 keystone keystone 4096 . drwxr-xr-x 3 keystone keystone 4096 .. -rw------- 1 keystone keystone 44 0 (staged key) -rw------- 1 keystone keystone 44 1 (secondary key) -rw------- 1 keystone keystone 44 2 (secondary key) -rw------- 1 keystone keystone 44 3 (secondary key) -rw------- 1 keystone keystone 44 4 (secondary key) -rw------- 1 keystone keystone 44 5 (primary key) Tokens will naturally expire after 6:00 AM, but we will not be able to remove key ``1`` until the next rotation because it encrypted all tokens from 6:00 AM to 12:00 PM the day before. Once we do our next rotation, which is at 12:00 PM, the ``1`` key will be pruned from the repository: .. code-block:: console $ ls -la /etc/keystone/fernet-keys/ drwx------ 2 keystone keystone 4096 . drwxr-xr-x 3 keystone keystone 4096 .. -rw------- 1 keystone keystone 44 0 (staged key) -rw------- 1 keystone keystone 44 2 (secondary key) -rw------- 1 keystone keystone 44 3 (secondary key) -rw------- 1 keystone keystone 44 4 (secondary key) -rw------- 1 keystone keystone 44 5 (secondary key) -rw------- 1 keystone keystone 44 6 (primary key) If keystone were to receive a token that was created between 6:00 AM and 12:00 PM the day before, encrypted with the ``1`` key, it would not be valid because it was already expired. This makes it possible for us to remove the ``1`` key from the repository without negative validation side-effects. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4821143 keystone-26.0.0/doc/source/admin/figures/0000775000175000017500000000000000000000000020264 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/figures/keystone-federation.png0000664000175000017500000014773300000000000024770 0ustar00zuulzuul00000000000000PNG  IHDR2n IDATx^pɺaffffFff8̌sn[;J؉$kFg^]{xߙVOO? P(@ P@Wu5 (@ P>/ P(@ PA_.Q(@ PA(@ Pt(Ae(@ P(k(@ P:`ᠲK(@ P`5@ P(@ 0pP% P(@ 0(@ Pu8(@ Py P(@ P@ :Tv(@ P (@ P(C}*D P(@}^(@ PС](@ P> P(@ PA_.Q(@ PA(@ Pt(Ae(@ P(k(@ P:`ᠲK(@ P`5@ P(@ 0pP% P(@ 0(@ Pu8(@ Py P(@ P@ :Tv(@ P (@ P(C}*D P(@}^(@ PС](@ P> P(@ PA_.Q(@ PA(@ Pt(Ae(@ P(k(@ P:`ᠲK o߾5+ lذ:OZ .B ^k!qb+)@; 6 ݻw7mիZj ޽{5zȑԩ:RС]=FksU iG97 )oOmE 4oܣJ*aʕ\ `N0!jժeAZWˡ(`}}뛲D 8L2ظqd…sN޵klb;l QЀM# 0;XM/_D1?}R(@ P PepKA\r~3J(H"Èq0c 4m8&wcC(@ P>/ P,f͚aڴiU>g9sO|#SL'_94-c)pNR;HΦL;vDÆ 77oިm3ϟnL)_~Z*F]OkZ~ݾ}{?OԒ%KPzuu<;L8Qի;Ν;En.OnVR&Jn^&OP& 'Q?x*(🀭~!':+\Ȑ!-c͛ׯ_mQEOr`z.[%'O͛}S=_x%KݻwD~_JHXwaaNK2&~9ddV1"ߏ4i)<57dl0CA_^$3y C5=d(ɓ0tH8YK[зf;-%]6<==c!C w9Y"ARؕ!C5ٳ%J,q1oQ GQtG_ngϮz&}w+={Tke&|ܸq0aE9KA픰*(]q$I VXx5jd֡Goz Ge˖)ӵ!BP5:*?x[6+/]tÑ5kV9tGr>}jVܤoΜ94hP2(YFo>df_H4-ȑGUoѢZ/xdiҋ/|ԕ/_>;հ P`з7knle3-B͚5}֭[?(,0=KomovZ kذab $KGx|MQٲeN\2VZe  xw5[Z>}z:tC6On-#˳f2;Rr-^5oƌ}fi:uꨗfvX{зv;} rҥKéM6W7hoi]|&M|{GeoZ0pa`}%| P@ zE `]wdow[fzk^d1b0of--3N߂ȑ#՛l};EY鶙r9]vd{"E-Wv ׫WMܼ G*)@ 0kn` 8-[3]n#kStߒ%71s}koӧuCz|r]e]7=+i{HVviժcg\Aͣ "o۶ ŋ7rlpرb}?//^|ҾރA?UT|͖-[| r%7(e|y !۪ʮIA84!fLҝ(@'`wf7)`m[})F]4]"Nd;ȑ#‡pGR8 CL00ރA_-o5=,ꍲ?ӣqƘ1c_Kn޼ycV)/YQ=Yt$`/<ǹunzv!O>rC ar޼yf/aʞ=;dp)3o YK^,o`M۷&Jz_&caX^zɓ'7+S\/]d֯8qٳfmO ( 83 `/M'!ʲ!²l%K,fEIR!BPzْȬÚk`6mʕ+!uFT4g5.#/QF+WLR"m$;0y?}\ P@ ZA$`ˠ˲D^%;X:d9|KNs[C?t9 oFm߅}ӿUʖu ޗxzjJ 2eRo~ٳ P?0agA_ʑoBdٓ7 Cre̘iuc`Z P @R@"t 5}v|,EeZym۶j}]2c/;<3 0;㨳(@ P`(@ P(>S(@ ^A_CR(@ 83:L P({}1;H P( 83(@ Pu? (@ P3 0;㨳(@ P`(@ P(>S(@ ^A_CR(@ 83:L P({}1;H P( 83(@ Pu? (@ P3 0;㨳(@ P`(@ P(>S(@ ^A_CR(@ 83:L P({}1;H P( 83(@ Pu? =y{;=~|ϟ*(" " B!~pH7F"x(@ 0ۜP >gcP@1#FT17},Dd(@0keN >~J*8njXQ#r:8uP& ߀} ?x BwAlQ$K\ȳ(@ @A, KtV칆[ hN` ƌ 6bl/,%хИ\cs)!} J5yqP@s57fl0&c{)`aFZ  mጾƌ6bl/, .(9 36Z`ڈJ P@c+W-} K 0khT x0ѯ̠ϋXɠ1c)5}K 3]9 ZX3Z3Z`ڈ+cA Π1c{)5}K Ab,t4&z bАMw En]!g9\޽mj9L!92kv0;vQ@/ zI) A}dS 3xrM'bܼH)?gܽz#hr򹭫|9k5]pt}G!0"?ߦVA\>Mjv~ZM~ƀ VAPQdUc_|czn&~NONn?" P@ ~9+~ڿXOTEK8= cزcp}G !`e gO#mk&nP^[?W^G{3w[;7IlU[v?t+~I7>!f( >l^h 3 Gocx(|qX(Z&$?}t-}Yf=~^Lx.l^97ǗO-V\-Z7K`>"ca I뉰#"U(_9$ITv};j* 8s;{c/30Aޭh_yFqT~NFU|F1?WUHp'Aڥ3Tm=FG'^$ -n 'ARDo_ËgԹG-@"Ҿ7_ r\}OG2ø=+enX 떸cH%/F+ѽ[P 1 OΗo1Z̸9s3bĎo,ǿ^ϟ<@-ה-*k IDATi#Ĭ} Ԩ7,x]2sNL9 buxx/Çfs!PH4 \U7X?}DL0l:l8\L@2]65.2Nwo^A .F`*Wa7g~@ h\A_;!@o+7{(Gv#G5rqV}%U їK486^7E6UUp>|VRjP6|$t0UP9~aaH.+e,o2r(&Kw$67 !oQ5n]v}&D>.\zEq1t<JU=w3ܰDӺn{bòSKkN0"o6|UaQdMK1Pm^9 ]R7co۪k x1Ѳ28sl/5+nM^s޳eF`jG/F}G`p3&` fr[;; D^keyHb) p1$4=S#m<aa{צիZZ2nny/TЗzNתY]WGEcO͂U3v\e0 y;oLW,xߎgpY u_ G޼q'UrW ѺXzM3G!kEt6wJnl?Kv]SͫKR8 Zv2՚skW%ǔ5+QY7n݇}TxsRM}G!`N,`% 8~ A 4j_?wD1o%#'䋫~>R~5;5tť3GѼ揧~r}D~B^ACH,:)yl 2;.A?1Aq97/}],Z8&Hfq=/JJa-]b&v [ij5bf̹%˝V̢4-uSͰc1pPzOP?KnJ*剣Z\wzo$Iԣ6c@H2&,uؼAߡu0 h=_i#/AQ`Bxyi%Kp[bKlsL—/TؓgW쿇a™}>4gk}llر~*3mnS3/n\9q Y?ߴ]|]M,Xt 2xV/@.6`e۾Yq)Cʒc,ͱ _yBZqtzX$?R5*[n o~ߪjNtː5+EӆIǡ(WQfϟ V }_7q2wLKVW7*-dǣ0oe_f8Gj^5oM]ڂ^:tSO7asjO!A߻̒cy=W!7w5 y-Mr_}|YbSC |U~_)w iޖ+5lehk0{-V%*5DfջO`KA_N/kg2eTy2QsvQ[V́{7ia(S3b3 ObH}M`wqf/u*`j`FA\p dFҝ?~~Tx1j5j:GJ֝woR.yc6}+߭n_?߂齖M2> ݆{h z& ncĬm:$653oھqc0ݺW s~J<2"bhp?"Df[.]*H>޽y}[W \Ȑ;/ s Š٣,o8E|g=^|H&H~_A/Ae&A_ޤj_rur5շ&A߷lyQtk\\+rԘק^S~` O<{{,7/QqWl>2mϤAmc*KzMm=[o6*nFr czc.>2} H8, );׊-f|~RĊ `w!a{(;} );L_$OvEՌ~P׌M²Y#ᝧ<-o 5C:5o8vmXsF,CvYɐ gϺŒӖfe~Ƈqq}( &2e鳛ӶzNܽyv wi0囐eF`Y|e[MXoʍ|cM//:KMP>}-3yɸϝ{7/ŗ CP@ Zv Apno_FaGG^x!C#J*?eXGO3ſ;_f[T̄OcSna{KïFĨ1,{x ~/O5F\Dl,gT"c4T uA?@Y)HA߉]՟FX7SC|%}"g:j"ˁY>l(`riF߆LU,_צZ2r$L֡O`aO5Q9sk~\^*p\뗐qewڭBv`{UupͩYVAi׃!ɠٴL*{1$BZ~{0[_%R (a}  N 6`з91+ A?N%S6rF6,00Z A?_tN8;=Yl$`y3ۈRf1 SJA4,cӝ^A/P 6'f!}S'n⩃i7q!V 9n@Xs{SgʃRDgyAP`з/KM A?wVt jZ o_i['a ۲ӆAsj 7{"4/av D?l%o+YK;~.}Ȯ5qv4>KT?o{h8?Kݴ"Ef*v#ݐ%oI oBuPa}"@`wa([Cϙ3uhJL3Eqkv݇Acvg7/E{cȬ4:)+gMQn9}[ٲ\ PKAW4,}g0xn$Hs z 6±=4_<-#jxAh/:Mgз#K|`A ~̜?}9 nsׄ f*A0cg?ļq];/Ur?+7 [WxS#Ȑ7'nF┙PvW|~(7 {LFPaߺoӇww$'A (XbKʺprv˧Aڛ2Cn YAbU={VM X !G UlW7Nx!n,"^\ ZG!1y%WĂUH1~ Cڗǵ ,xM ӇշAGA>{ax]e]F(\>JVoػS5WƊ q&LJwoqQ|Ia99޼|R_PL1%AQuzXvd_ڬ;LiUY>Rt<{ nO71IXx?vyc"cbp.=ѢLio:򳶕60bZ_/l^:zFm[œ{!F?U ܸtB-)Rg{?{cl=N"[ gEĩ1Uzc{D9i^*Z4x~Is-:vPH)/9[ZNnd6R @9-[O/oj:[jf6+ Yw6$L5-7emkO6ReΧHygL`LmtN#xosrktL}K 1uu #B lWW,F=۴@/~L/˥֣z(^%Qޓg}}L P p(CAX7F][ FEZ.!פ͈m8 lG=ꗣUH+rK%2AA tY͊v(ޱ޼4.b(T ӆgm~ނ='nRK,g˧Ъ,d_FgӷSMK`2zxP~aQH2r3Ta7xI}=+D`w`c(?cO3E*i)>)G$i$M6Z; b%@?]jѺwn.9;Fb6ݫq>S`>S%ad_-4L&T6KAwCsL,CӸdlƘ&|$?]W5Ҡvuh+{huroLi^2>dwHo+Ȼ +OTCEؖ5z#P0*Oz=^0;P!!gL'N܄+!!AvjM^ҼzRWBg [8LyJU9>'ݹX f5=׎!Hh?t +yKeYf>OB4-Ęnp^41 UT紫Bu߅x{{ޘؿy!rVG>. FuE]%imkqS6T3rn\EVPB?,?tvYLGWr%!?A MAoN<)`}j+Tnb&A[ЗPFւѴJ9m p.2@F'7X}CP˨ eؿW*V w3.^1#-WD#cqDBPQog$w d`Ya(4_=l8'[ԤB/nůBB+o8ˠϾS.ѷ 3+m A?]߻q[3|i{(|/kE !BmQ2ݳ~v堹5fߞ/ᯞ=X# IDAT?T@3m)  h-o gkQc'Kпo?'K>=&lDD׺dNך `.pFٯ`з0˧  A?mo(a[>uqvh=LwnYc;"}how{ chneC6N:Ც P pe~ j&- h^PͼqYZu$u?m|YʒeBO}Ƃp"> P e1b7@<&tAܥЬ A LZ-L4ęÛQ()Q[3uC/\bз1˧ 0;kZS3蛍Iphbo+Z~[ 5J"[ G֮BZ펵s#wZj&`F_dз-˥~ 0R A?UNt8;=Yl$`)m$b)`; e`@ ~ kxt'|3N:6&o7jVD ~ }D X Y<(k0! #TϹFI/vv`з2 A?Y.l&pAf,`@ ~2kxt'z3N:6&o7jVD ~Ҥ ek f> hX$M^pN׮ss?{M{0CuPF8 Y,l&p:pY0(y!P@A_ãȦ;M}gzv`з5+A?1g)`[78o[aN 0 A?!GMwV[ :7&o7jVD ~DI_8Kl*p5U>Ƶ)3 S 0;Zh;n/~3k}(~'s&/g)@{0CuPF]7~"FX P?~|ǽ;72 FcY(@% 28x!X N3?CGݬyݨY/0kE DG0a_Kl"[|FEҩmR (kya!bh Nx)޿DQ$K\<{K MAnԬ-F,> A\3NW)@<߿C7: 2 F.`X9jLZa){OųG\f)1` (O;CBh1,~.۱=5k 0;%@ |gQlFZP_2c&9<(s`VOkf]C|,x}|[L P>/ D`SBCQu+v| {"appI?cO(@`wءa(?فg䒓.Bx6(`3%:N;6f>/ Hĕ'Qh a#w )k|_DJdI]a)@ 0kn` ^็X:)Dp! (@x9>}TW+ y Z Pqg 2 .R`!B E˧jo_r9(cvPiW5+\S,x(5T;kg 2Ux(k֬AŊU#VZe*nMW^![l~:ڵkqYxE Pna hOիȞ=;^~w['#ܙ3gQF~O(@ *(@ț7/}}60zco#e/_ժUSmذK}Nu2$ anӦMpuu֭[5/dA!z9E)(@`w1g)ݺu;vl<(P|y[E;B P~`O#`ԭ[WUm6+V>xZMtcƌq6ZA?GSJԩSȑ#~B90ػw/ ,~4{l4hЀ@(}^@ |Y3gΨud$`r5P@jiW֬Y E P0Ҡ\vXhiѣ66m3f ]t*±Q gСCѣG FTRș3)U.\y(@g`wQg) /zٲeUk-[U:H Gtz86lvMf(@ ]A䬐=E<}A C ag %Ky2(@ 0}? 8"TXVrVY.ЫW/ <1bĀƍMf(@ MAnԬ0n'OFպ-P\9_E~6\`ԩhٲjC+W.o1Kߺu nnn=z7(`}0 P@ߏ+ www6<(` ={PB9s~(ePдv^|~:ڶmkl&j߾= eΜYf#)@ JAV,0(_<֭[… cΝԡM7nެ>}zpn`lR (}-H vF=i?{;|s$!_޸`Gn*F P 6e%KPfMy=C… ܏?bҥ=eNAᆄ ~Ν; #F@Ν9ġLo07oތ%J8t{8 P`з*ˤwȑ'N@ݺu1o;^9ӧOz;wSNk(@g`wf?)`'Y]T)UۢEکzVCvB"E[&YJƃ30;(ݻw÷?Ο?Εw;ȃ. Pz`Ȳ_Ç;wnU۔)SТE ;j(w/FZԇeMz(@= 0qT' IS=|{l'OSͬ&н{w 6 cVZ<(@ MA_o#P+WƪU?~ݻ׎5* @ҥi&/^[lY(@`was(}b[M(VvR@ 6l ۹sg12t%dg(`+Vjժu֡lٲP;wDѢEUG:u\(p gϚ)IK.YwaȐ!<(e1cƠcǎ,6C ZN P(ϋ@Μ9qQԬY-gy2UA;w.2gά~ Al(@? 0'R7ƬY>}zN BǏj'O^z*(u} O; ;nnnpqqQ!?SLvP>gϞUa˗/5jZÃ\d(`ڵj3nܸ۫1cR&59ll4' /RJaƍ5Q J,^%j(E}-L; t GFfT3@ |B дiSSN-P~}nSYdQa?P@NS`X@ձl2ΝK,w^ԩS7Ξ=ۑ˶Q0 0bD>}!BmdɨC 8s _~Ř1cСCPpx}"6Xf *V*[jۧvBX`eq;v@"El!( N=<^~ @޽IC Xƍ9S,/ Pyt6 I O<8tV|(Qnʗ"^Aᇈ m7ovI:Z:thV)qoݺuСC5#6Ы^G&Nm۪3%Æ<(@? Ȍ˱h"ԬY,ogpVGGؽ{7 .3k,[p}l]@˚!Ci:z> Pl] ӧO҃۷o O֭ ٍG σ# 0;h-@ҥi& yP' Kȗ}5j3g]A(@0ERv#F vjRɃ{yc}yqЮ]/(`E}+b( 8 mۆbŊ9z> hB`޼y_jΝ;Ͽhl$([}-;Fsuu߾}Ø1cСCQVԩz%A۲hѢYtE P 7'(9/_/6lvA X_@{پ};ʔ)[H P Ъ@ڵ^=npx=B=0x`o3H WA_c˞Q@ [;%p F-'H*e(@ lٲ%KT5,YիWam, :(c 6lٲ˖-CժUu[v# ;[WBRN8cK(@`wfGM͛jѧO_ @:upB. Q`pn}^ }PbEZJ=e(?{,7n3f8fC* P@ Zv̙Znɓ'#yj]~᝙}@ Nl QdVܹsj)O oܹZВ@Zxb̙RV P@C ,6߿G9p ԭ[#(9ϟ?vǒ]xPEY$P~}3gά N5 PǏWa_'B^tǃ5ɲ(`'#GK." ҥSͬٳѨQ#U޽{f,p^}{\7oFRT-Zd|Wa):tq8qbu9rd7!(`}8 Et=z`vP(Rvyƚ5kl[KFAiՃ! +Wk׮C P;wzOw0`](@ ? G}?~<޷7k6n܈2eʨz/_*Uؽ ЗƓѩ3|r۷iO- 8СC!† H¹A{ Pe:sVM<-[}0yӵ:W\8tPS`:Tor yPZ4k ӦMw; Pf 6ewʕ+cժUjomc(lGn4iZjg/)@ 0[Qz}U;oDGUG`֬Yhܸ0qqgO)`M}kj, XI`ŊZ*mݺu([Jf1 m%MTL+bĈZj>J ~wK.߽{!C{DXpؽ{7*TիW;N WA߿b<6ȑ#7-ZdX<(nR7Ϟ=C>}пGo2G 8 A.7}*0~z吥}> P`O; ;nnnpqqQ!?SLvPZ @ǎ1fď_=zUSZ1c6m+Kxd) PyMP ̙ wڅB PKX-(EmbĉH,<<{k3g8>2(x6۸ou@oV?E/GxTD@o~nРAnMQnRD p]D@㭵Zn„ "#" "0f׳gOѣG޺C" C@BuR5i[wɑ9C4A"N8w1_ J"Ԛ~ "n悧M^7=" &ۻ믿ޭ:u=&$ ߄"7_O4&j|?+ϝ{ռ%"Є$T#@Gyqk*@S8qcv}٧)ʭBԆ~m"P :.g~ IDATkg /,7_"  K@BaFklfc9@ ϝs9nfַE@~6~׵kW_K. nkNht뭷{vmD@ڏ~j>/{w!N?[#o~諯rwO+h/ުmNwƍ6xcwwtWFD@&'馛V[m%D" mD@B[U͇СC ,7Ś""  H;p@7sx[LD@ڄ~4+쮻rݺuƺ @7txhmЪf <>_ugq;kSAD@2o}֯^{p`2' m@"guȲ#F"}lއ^{HSYD@j@@BPu#.QFyO\LD@\w__4v]c5*@HW .ӾN>dwGYf/t[fej."wy繥^=& lW*'cǎu[nu]zӝu~agwD@Z~kj~ۇ|~#6@3x7I&>2I@BL`:\m/6u2h&lEr-GT|UD  tHOw%\>$" MGA/"MWXD8 }="P&?~,խe^A@vmѣǏo$" & a@;x衇ΒPS˫"кg믿:֭j&mF@B\խW_}?o;+"@2`^xs=t*@$+%ڎ@Ϟ=ݘ1c\׮]ݽvWE@Zɱ=nWo v"$۠UŎ8ꨣI':ww]h:~Q]AD@{. < VBGDұmIkq;qfm֖Ti : &vD@~JOnСCᮺԏk7o݉'<Fw ɭL~zۍ9@nvzb-DGD H7aӧUfi""  @`w3?5@TrHCKǶ SO=vangVXmꮊz76`w} @oSqkO;(,5jTa!n;@c}#tAnذaU@FD$ }= "xO>/@c!LD@ڙ@#_v}vơ@SoRakM` 7=zprKo닀@S8sJ8#^~@o6R s"п7|p袋\s͕ӝuh|쳏1b[nO#>pv;S *4뮻ƍsmYS[V% ߪ-zl&roDFD@D>hw'Ν;'x-5.-"P&Ý~n(-=z[omFtu$p,.slAUF"_xwy'A"ȟ~u0]L\>vYg~pWBD@D?`ҧOș~uGQ" " SrA믺ꪢ$"# aV Zk-7a„|n$s=_7*DO?"%" @60`:tX?kT^6$ڪ'|ۍ92vsLmC%  Տ`wu֭[C_( /)O~a. tc{=z[nhhcmXVZ=shW駟~WwgN@;+BMRo7s5?@jC@B6\u L5T3}Yvd" " C#pC Oq2#`6dww}@: tF:"'79qGyeY|VeY&6" " yO=P'+"WolӍ3h:׫W"" "{FmT$ґ5&`Ynod!VK.Ϭq[" "7w}ׯzgUW]|"hAn-53 fn*Is̑Ft~#gWLO@} 8|=߁YIv' O@?iU9EO?_&" "и *9VkAnJ:FS;˲뮻~@ׂig$_w?i qcPztD@D5Ì,}7*0 G_@c(J8쳻E7F" "P-~-ȭu] ȣ-K[kߩS&(" "P)_ r+%ډ~;vֵB܅ZoSD@D.rޛ.Z[Ctolފ q!:2HZB H35&` qɅo15./" "Є,ec--mTs' ;r0&GLD@D@?@qz:D@D@D@D@D H`J" " " " " g@D@D@D@D@Z~ 6$" " " " zD@D@D@D@D H`J" " " " " g@D@D@D@D@Z~ 6$" " " " zD@D@D@D@D H`J" " " " " g@D@D@D@D@Z~ 6$" " " " zD@D@D@D@D H`J" " " " " g@D@D@D@D@Z~ 6$" " " " zD@D@D@D@D H`J" " " " " g@D@D@D@D@Z~ 6$" " " " zD@D@D@D@D H`J" " " " " g@D@D@D@D@Z~ 6$" " " " zD@D@D@D@D H`J" " " " " g@D@D@*$;︗^zp +?Vx+zP/} ;{wQѩS'kMO 筿ng.:y=s[.2nEJxo);u0hMB?o⺟@CxG}T(K/6l0w9>S5O/eر{P4_?xURn:7쳻[2, Sw:kCk={tcƌ)n*2{ꩧ=Ȕs.6GRgqF7s_cZoo+ȍHB?w亡@#ӧ9rdDz}3?wčL<.A8AB &I&tM4LS^7jY32OټV[mޏ2RV &DV *{9|{IB?tz!h>9s}o*sguV*1$O:2n{.Mm|y6l2C)ѣG޺gQ;7,ss9]tE~1 Z.> /,b\fr-IH6"|B\"{QhsAzRleFD :1B"FLLW^Y8 YBn&]fYb% D;cA5\E >E ,)&οo_0 em8p7n\?jx?N0!\㪫r'Nt_~+n L6{W2d)`܋:5h_=[1چ?{f?:fH0f,}嗟*YɜT,t᜗{V fl7|i}U~/K~SN|cFt5]GFy1(W[#25աmlB8Y SB#,^]V"6lBQXpugf-H3<h 1JO>r-'Ko_:b!qs= uI!*8jh8<0.?b0.Ùd5%6(R3Yb\>atNf8]wB&̌!xfǀ-^L@,N6B1 @ f 3Yf0rx^|ͅ>ee|O6Pryb?xCMG\C607<_fCupV,Y!n^xVyx) \O>/gJto "\)˵ pf>[s5 \z饅."õ&x~{؅OL_͘pefr~ixXw;ZÌ ƽxwMG\@|O]ta Ù3se^43F8cc^C`)JaĕVju:rJma9 t*xe;"c@,l++%N8ajaAI!^/Z3:p A8M׊8/0;+e,EyҞ9.$PꙎ7sdEA#2!S@bу[}% CTw8tpKOKNjqCC- PpLe.ն}!;_|Er?,+7 \ >T# fQ9a8L™ f>Es3ҟOciB>?6P&CsO78Zhp y1C✱OrxjcK¦l6#ȱfYXBu3}FP'Y}2 }F").( fxxMbcd! xx b36^ˆh ^!Α3i8ܦA9^xo<p=BC0ΕN~+ږXtttԈ|b\x;dvSϬ7cT, oԐ2ʬXF tnyYbk<=<< YRǁ r:<2ȓ@g:x#,G\PӯC5y߱pV.y@(ͮ03ηx5(%kJ}K8?? a h0eM믿>Z!fӎ3ȱYt5v 140OzB3 Hq#!4Pǃ<:.8UL{Ou~o)g+0̛;z.[X>a;JPJvT11bd"KߌlW3DvȣK]lqgβ>BVS'Յ<%$IQ0X1/W3}7 ,YywXFMԺͪHYkRmMpK,Ěy iR{/<>-f8pkwMwS.+(L|:sU qP *v1f(bLahqxH}K#3f0='a%(ԞG0vɷAUx tbaZ,&YfW g &!駟iB?^d7;ɹ"Й }Y`qzfP6#BCYyGZ39G;, '1krd1UiGB?Jyxwuײ=

p<LO3=LaL_.Z`mʃh'&$atӘ DTl"#w eH0y/%8׼܏iJq_x/ Έt ;f K+ 7[嵘ĬXFZ6F,c^ovcy1LH- x_yN4+%fg:%QK{t@gЃϟƬ3gP3{tgѐ&z x !BTq YI#A.{VR0:$ a)0 &7c&5̢I@Й0Ӟtso>6f4}Bπ,\P´Ln<0.6{K98qRZJk9Ur,0#p~ 򌂙H7N0e=xN O#(\a׊AS%~ zŴ15ɀҰ"ش8.|(eR UBG.D1d]XZY5b|~^f}Xb 3hR}^ۤ55Y~91 L1c6٤]@5zYb[Jy1kƀ: ,VCXi"o$/5=z=0a,6Y2¹<1FB5m;&L~;^IՌYfpq=8EOpٜ5ii✵<9 }>)N{ { wgX)''.NOSe }nB- vɍ˟&r>^&̾1 d\f6l1l9kC.aCp g")#)*%x60#"n߲.sҞpEP31 BY0h0S ?ߩ=;Y83̂^#BXp۬WUo7ŃfψQIx%,'q7.f,۷_ogZ<ބ03@zkof.'w~ؖsEN[2! fS$<)YHXҞ1.)ӞrI0lO OxK֜{vrı|B]9>AceJj8醱Ǜx{;"R2q^>3Ygx،ÜŶ\+%|4t-&#O=ZBZu1nke]XFU6F,c=ߤveᙅ$mw@ìI%D8>v  l(g!7# ''ោ_XE~KZH&פrq"ώK,Mڝ{$MpC= ͝rЧ H]|XFgpm`֤ό7S$$1[xBi*/4G%^K5E҄8 3=;弧DBlDMG9u8P[9>ЏF1eX,OŵKlO3<  ^,f1 `²02$YNVJ%;r>il{a,͒vͺHXzj2֢ͪX GŶGij$8'qs wdZٶovmtTB 3{3l2#jeFv/v3xGMvr>Eq|1@G:ޤ6iF-^0y|6us˗;?dQOO9_ B8Q,eq ǖ g dΉ5PBr3Ix)pƃmGq,N.3tqyYNL 6fqrU>ӊ7q<<_ 䅙"؋&^0Rk3mC"N%BOthM , Ӑg99OSa"<a[lW߸cRVRk3pb]OYX6F,c=bnM=Bx(2}lr,Ut6tt(na7fjw||HfVlr;5/ ZbC&,{]$Y^}CoSr$ 8p#5<\Q󀐝bNz,ލpcSY#rUVh1,^uıUSnZJZ@<Ʒnl^e X< xi~ YS!f1@f\^"BlLyfz >aoQzUzve KF&YҥҖjS]QD@D@D@D@N@BM@ HW(" " " " u' _&PD@D@D@D@D$TWЯ{" " " " "P}g+@ H׽ T> 3E@D@D@D@D$*T~" " " " "Pwuo@D@D@D@D@O@BLuE; 7 " " " " ' _}ԝ~ݛ@Я>S]QD@D@D@D@N@BM@ HW(" " " " u' _&PD@D@D@D@D$TWЯ{" " " " "P}g+@ H׽ ThL#Fp׿ܦnZk4feڠT?5j{|{8nro_ o|͗x~8 묳x3ߣ~nr?ݿotkfm3\[6͵$TPȗꫯө$CqgyK,o!˼sm[c5iBr'twy{OUW]NTI&^{7`wi_}WK.qG}ܕW^vek }~޽r2Ku}6|s74Ӹnww.c==裾|/[p `6yJ+Ux q= hBD@D@@)'ztnem $sK }~ :TamFNmRBv[mCdfmkfd0ĬSƳ+7xxnzn Z4N^ns>{O琉VYe,f1Ly%OE|衇1/&N_dinݺNHW㙹K/GxLx*F#PL '\b9"V(ZtB{キo#GYg-u7穘~D k._n4P|[o|=όk柅Gyķ1zN,1zK/s=yX!.~VRU8;S*Δ6t.yG^T:1czj.zud;(_ .YdwZP,^ԏoa>}_߇aĀ}ٓq:30 <O=_ʠsϭᢳ 뮻V[=LD05<ŬibyD9#zG{q^Q/yp2( vygqό.8f X>f[l1?d`;쟧rއFx~FP=Nce S^6[ExsW8bٌb q|0fG=EKK޽݅^迁؏?  o@5,FF"rlGeĿZ}5̯9 IDAT5x`nfG0-DyUI+6qv&OSZ 3أ }yLRƢ^)fx&>B s*hr߇)q~5(f S:EFŦ$-_uU~zIF k0IM Ib/Ck$O!Ӧ1HqLD@D@Y53 X-,x=b#a8gҽKBa<^ŋ۩S'XGBcj}رcݖ[ng_lf tgbBoux^tm$㩷ǔr#D#: 6drQ 6X:b͟}cX+W%u*455J<6`;/N0^_*зe<\s5]`psaΞ?D*y:v~5(fbo;ӏ}/>SL)aT'|C```=qk&d2mJG|4:ZFLR洛6Κa C+D>Y=Z}}GXa8ƍWr1ߛ{Г 'T;^l |Y T\hYګ1%;̇3:H8@L" ˄>/  3H UX[+>/4ybyIG'6,@ٳ N2bYXm3u e@QCnN(9$CPN(*i1mĺlwU3Bjqp1 ;)&~&o;pa&O;f ;k"1>7JqM6b{Z;F(1l2o^*aa6 dW*\̴G2ub1 |ń3T'; Y['9+}:NKw`yQNU.uvBuB >B[gtKtH&/z>fSq++iEI3)>>F^'."\V8Nm&q?;,5lxv#o'C2ی 7xxf"Nnx|1oeX&;982p?Fqؒ>p3Yf*Z 1N:cg>fk(? b-Q|JǚCm ! (mJf$8C/@Dij5ńT/7^~xgѴ :ZM 8prOof)&mp75uIfxJ{u_gĢe^/H,&IqRVf1iٟX3#[e)&ma!I|x/Xg#C%σvBeR`"R6X<<$.fLY!Aa }2 e&Yp!)ac>EK[B':8O !C۶#/F PGUq !*̼,QIeމۂH%Ogyi-J8Ba,KxfMC? }lv=˽oľN;6ز殜݆-ׄ}Gpf U ۥ##d &, Z(i4^~U,% ޞbBB\veSZ ,Am'1qQao>b/byXڈFdaqSh7z~{(R[%{T qtPt4Lё㙁I10f::vy)P_bi7f0D9fRo2S@3X}B3|@Ɲ_x~@Vi\gc{bYǕ% }˭´ay }eilGDF4lÃO(sT@.8ǻ'B1N1ʶXOh "-N ox`1o88Ǭ4i18ѷX@E >_,@,b:Bbb5PJ>ڃ$8pl $D νl@PEm'{n1Tny"9;Yaû:R?Z fx{V8uxq`2#C۲ |1 `@Isj'+1<G#X(ffdf6f[){hB "$"Qga8?3w 88K|3Yf9k.4qugւY4B+?W1N Y6+eY>נIw3nK\n+t)KCcuYxnyxm3:U  .g8hgc:>diqdDncگ1 ׁ1ٶx5FglX5DMc% }{ス'Ebdj' ӎx_xq?8E&I0ѩzo+yuGM(BO}l0f F2BHƉE4b6)>@Qf'`/8&9c:nU2UZ֏O>ÁZr!H2#B4 xnxV`4sפ\dVړ v+Bw#b)9D8L N*y8xRD''X~6 +&-$*SSN2$0&1Ų0fؐ_&" "ZCהuZD{kZ_PStmOGR#2CǣGd3$,a)E txp/UJ^C? }cSM B Yӄ>#x@6WIʆT Cansl\̀)\_M^lڴ*" "PmBD'8Q͖Nmm ի~X^bHى,8aCbm?.qV,ݯ YD=/=WlB^hiB?EY3KL)Π&I$g)/C;flq;vlb8E}KWk DH d-첎,%F^yخ_|)vM7ԭZi;uaoc`lrܣgϞnit$ۣUK(ꫯv{e]V]L/b#-bSO=wu]7,e^zCc6裏JSO-y} lPFo&sz\u3V[|_ %fk7nL3 ?&TcEr '|:u|IQv7{{袋?q?DG;n,zV[ﴻksDO:F>M;E7]v_"k-Du]nEqgu谋wQOH(J Gou{뭷++&jB7A+7xxn{}HRK- =n^PK/0l6rǏw]tqMՇ_|-ғkI8M4M7tNlש-hD^z%G,"/|׿iΝ;שdwz!߹nРASO LTTat?Ce\wuq]CD@Z@O9ׯ_?ߏ/wrGyd"O?7rH7묳zon)f‹0s@(J{=/ ,gy晢!FV߳>ק{;d]Xx}=GGi ?6xvlW&9?,ءcC|fOY95㢚?ܝr)~n+T~ӛiƂC=/j8 7x`(f|nK6%iGDM dą#YK ,ŻFdž#zy8F?蠃Rcq8 vin}q~⥭=w}g~12qp cqw䪥OcESoH 0l HDG!_uO1wLw73Xʎ*'2PW9 ^x_-28KM|" 0><}^>8t|2,X3s00@{*ĈE,=?ǻ'P= V8(7K6@h {W} fy՗x~=|bF]n}WWOY;0׵kWwZOA@B?/p/ll>تxTjTnEX<^{/p}l RlaPAR3,YJDZ=zHRê.MO੧&^uNoi}U3c͢݋.ȟ~:: a}wlBa,i| h[oQ|Ķ20CK$BQw,R{|xh>D|P6 AQʠ% 2{<;L"w-*6(_p$Yf߲AIՈkBJp-;OxH1g=aG?['`Fꫯ3مq I>^BpW8X$sxt,N(sڭm LxE>kbίw~1j^~~Û Φ> J^&\X$n{f+̪% !T.cM.vx43ǃ ,~#~sCŋNG3\ `Ň+("aE悴mX|᱂ *lʛA6R1`C>Xs?ʎ#[ܟdGq;餓g`Ŭe9Tǵ6y8]%/y"kΎo)Vg2T, uLh;_G~{/6d5a+xcg 7ȕz7GBr'u$X(Dt15iI><2㘧"LE \&Zgul,.#N_//k"N,9ˢ\bYhȂps[b)cEMxKl/fiZב8ȏ@=UY[Ō,38iNrϱN5AP2S5YD}kfȆ$oE& }[Dxl:qӼ>H +e@$6 Я'ƈκHy2;#LW3)&NBX,&L%0Jf6AGNe]yճBl1 p4__+_.&mrB$1"?CO!DLyY؟MpIHYk@% M2Pc+N$-6 i0f( 36}ZIDAT} bfYx<>e&4 ,$f=LHjy9㜾Ȳ΄C>ӛI;2%}{l<Ŷr:z }믿rfC?)Zڹs "fL6Xy ?R!YwLB?~ P-o/Pc,y" 1#^{KH8' -Xѯ1Ma@dB {V|`ń>~aE8{HZgF=^Ϭ+JkMe-Rd 2h;_3u=?:RVf: }t!:1J 1\ԛѯЧ,"0q:xZ5\9JRIQگ_?BJ8]7>*3ˌ f9s?n+ Bls عiTj }hYhfK5-f%b& g6fv0f-+ICwY, c(PbB!)SRR4VbyȐG,f cZ@ z81F b1 HPJ >IwHKȠLF k,%3^K%.XFcYkX1̬-0ƃDV8\5e&%ÔvȅewP/ bBcQCkÜ- U;țpf1<|X_Dw*5t34v1B#_BC >/e w >CCg1IhL;LW [#'uBgNd5 = ;ӿx;aĆQ&Iز^>3{4>?a4cvfe]%O1bOq>q5ʽ+]w-Sz}AHV :@\|Cq} {x\wu~Rv饗z7 ; \lXXplq]p=: VoaOD@D@*$p7ܱw ;#It=Xǎ2{݂ .Xn?ؑsDO:ɄL3|OFQ.6Nk-ӔT&l뮒t+w> OUW]>l0i_Rؗo/7zYm?~ҥ%8jMH@B ME<ݍ7ٓO>Cys5Wa{#t ,O3b\C=ԝvin}q~Ic}g~q.qp cqw[w-_i鼖! 2M@u ~Ep_%M\s9{ Ac[zݬy'hsOq,u|nsw~u] H7a" " I੧r.nݺy/6ﵰ_/\{bYgBpls/ӫWk{G܅^)% a`K^{mCYks 7PB?Q{[ofV%E@D@"q0Э^Ļs:h+{챇ql"}.ܓO)_'AoR*@[`'؛o@#x6W\cɺCf2}n-m: }u'GN:$~Oר$eT.4зp >>~r_}OCu Ԏ,%n }o߾ޛEylsO~&:MHIC" " M Н_~Ž>&mK-%3̃}4O9f;ft>C,%q>L~UЯK]ID@D@jF Ÿ[wޙlm\|Эzns|#l=a63ȗ_~?㎍BúfyP HH$@wыi{YdGN?ozfVYw kv~[hɄ{)oO >uJ } .ÊzF9٥ӄ>Yz$3_{z*D H7}" " "P;o2d[YtBc-t￿aσ rw[~?p 6xcǢc岠G R2HhiB?@EH7Z<" " "РMg.Yrڇ!3 ZlKږ~6*." " " "$[uU7% ߶M2 Vn]MD@D@D@Dm Hmӫ" " " " L@B[Wuh[m@+oUD@D@D@D@ږ~6*." " " "$[uU7% ߶M2T9רIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/figures/keystone-federation.svg0000664000175000017500000001700000000000000024762 0ustar00zuulzuul00000000000000
Identity service
(keystone)
[Not supported by viewer]

Federation

[Not supported by viewer]

Service Provider (SP)

  • SAML v2
  • OpenID Connect
[Not supported by viewer]

Identity Provider (IdP)

  • Keystone 2 KeyStore
  • PySAML 2
  • SAML
[Not supported by viewer]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/getting-started.rst0000664000175000017500000000041000000000000022452 0ustar00zuulzuul00000000000000=============== Getting Started =============== Everything you need to get started administering a keystone deployment. .. toctree:: :maxdepth: 1 identity-concepts identity-sources bootstrap cli-manage-projects-users-and-roles manage-services ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/health-check-middleware.rst0000664000175000017500000000077200000000000024013 0ustar00zuulzuul00000000000000Health Check ============ Health check mechanism allows an operator to configure the endpoint URL that will provide information to a load balancer if the given API endpoint at the node should be available or not. It's enabled by default in Keystone using the functions from `oslo.middleware`. And the URL is ``/healthcheck``. For more information and configuration options for the middleware see `oslo.middleware `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/identity-concepts.rst0000664000175000017500000003467100000000000023032 0ustar00zuulzuul00000000000000================= Identity concepts ================= Authentication The process of confirming the identity of a user. To confirm an incoming request, OpenStack Identity validates a set of credentials users supply. Initially, these credentials are a user name and password, or a user name and API key. When OpenStack Identity validates user credentials, it issues an authentication token. Users provide the token in subsequent requests. Credentials Data that confirms the identity of the user. For example, user name and password, user name and API key, or an authentication token that the Identity service provides. Domain An Identity service API v3 entity. Domains are a collection of projects and users that define administrative boundaries for managing Identity entities. Domains can represent an individual, company, or operator-owned space. They expose administrative activities directly to system users. Users can be granted the administrator role for a domain. A domain administrator can create projects, users, and groups in a domain and assign roles to users and groups in a domain. Endpoint A network-accessible address, usually a URL, through which you can access a service. If you are using an extension for templates, you can create an endpoint template that represents the templates of all consumable services that are available across the regions. Group An Identity service API v3 entity. Groups are a collection of users owned by a domain. A group role, granted to a domain or project, applies to all users in the group. Adding or removing users to or from a group grants or revokes their role and authentication to the associated domain or project. OpenStackClient A command-line interface for several OpenStack services including the Identity API. For example, a user can run the :command:`openstack service create` and :command:`openstack endpoint create` commands to register services in their OpenStack installation. Project A container that groups or isolates resources or identity objects. Depending on the service operator, a project might map to a customer, account, organization, or tenant. Region An Identity service API v3 entity. Represents a general division in an OpenStack deployment. You can associate zero or more sub-regions with a region to make a tree-like structured hierarchy. Although a region does not have a geographical connotation, a deployment can use a geographical name for a region, such as ``us-east``. Role A personality with a defined set of user rights and privileges to perform a specific set of operations. The Identity service issues a token to a user that includes a list of roles. When a user calls a service, that service interprets the user role set, and determines to which operations or resources each role grants access. Service An OpenStack service, such as Compute (nova), Object Storage (swift), or Image service (glance), that provides one or more endpoints through which users can access resources and perform operations. Token An alpha-numeric text string that enables access to OpenStack APIs and resources. A token may be revoked at any time and is valid for a finite duration. While OpenStack Identity supports token-based authentication in this release, it intends to support additional protocols in the future. OpenStack Identity is an integration service that does not aspire to be a full-fledged identity store and management solution. User A digital representation of a person, system, or service that uses OpenStack cloud services. The Identity service validates that incoming requests are made by the user who claims to be making the call. Users have a login and can access resources by using assigned tokens. Users can be directly assigned to a particular project and behave as if they are contained in that project. User management ~~~~~~~~~~~~~~~ Identity user management examples: * Create a user named ``alice``: .. code-block:: console $ openstack user create --password-prompt --email alice@example.com alice * Create a project named ``acme``: .. code-block:: console $ openstack project create acme --domain default * Create a domain named ``emea``: .. code-block:: console $ openstack --os-identity-api-version=3 domain create emea * Create a role named ``compute-user``: .. code-block:: console $ openstack role create compute-user .. note:: Individual services assign meaning to roles, typically through limiting or granting access to users with the role to the operations that the service supports. Role access is typically configured in the service's ``policy.yaml`` file. For example, to limit Compute access to the ``compute-user`` role, edit the Compute service's ``policy.yaml`` file to require this role for Compute operations. The Identity service assigns a project and a role to a user. You might assign the ``compute-user`` role to the ``alice`` user in the ``acme`` project: .. code-block:: console $ openstack role add --project acme --user alice compute-user A user can have different roles in different projects. For example, Alice might also have the ``admin`` role in the ``Cyberdyne`` project. A user can also have multiple roles in the same project. The ``/etc/[SERVICE_CODENAME]/policy.yaml`` file controls the tasks that users can perform for a given service. For example, the ``/etc/nova/policy.yaml`` file specifies the access policy for the Compute service, the ``/etc/glance/policy.yaml`` file specifies the access policy for the Image service, and the ``/etc/keystone/policy.yaml`` file specifies the access policy for the Identity service. The default ``policy.yaml`` files in the Compute, Identity, and Image services recognize only the ``admin`` role. Any user with any role in a project can access all operations that do not require the ``admin`` role. To restrict users from performing operations in, for example, the Compute service, you must create a role in the Identity service and then modify the ``/etc/nova/policy.yaml`` file so that this role is required for Compute operations. For example, the following line in the ``/etc/cinder/policy.yaml`` file does not restrict which users can create volumes: .. code-block:: none "volume:create": "", If the user has any role in a project, he can create volumes in that project. To restrict the creation of volumes to users who have the ``compute-user`` role in a particular project, you add ``"role:compute-user"``: .. code-block:: none "volume:create": "role:compute-user", To restrict all Compute service requests to require this role, the resulting file looks like: .. code-block:: json { "admin_or_owner": "role:admin or project_id:%(project_id)s", "default": "rule:admin_or_owner", "compute:create": "role:compute-user", "compute:create:attach_network": "role:compute-user", "compute:create:attach_volume": "role:compute-user", "compute:get_all": "role:compute-user", "compute:unlock_override": "rule:admin_api", "admin_api": "role:admin", "compute_extension:accounts": "rule:admin_api", "compute_extension:admin_actions": "rule:admin_api", "compute_extension:admin_actions:pause": "rule:admin_or_owner", "compute_extension:admin_actions:unpause": "rule:admin_or_owner", "compute_extension:admin_actions:suspend": "rule:admin_or_owner", "compute_extension:admin_actions:resume": "rule:admin_or_owner", "compute_extension:admin_actions:lock": "rule:admin_or_owner", "compute_extension:admin_actions:unlock": "rule:admin_or_owner", "compute_extension:admin_actions:resetNetwork": "rule:admin_api", "compute_extension:admin_actions:injectNetworkInfo": "rule:admin_api", "compute_extension:admin_actions:createBackup": "rule:admin_or_owner", "compute_extension:admin_actions:migrateLive": "rule:admin_api", "compute_extension:admin_actions:migrate": "rule:admin_api", "compute_extension:aggregates": "rule:admin_api", "compute_extension:certificates": "role:compute-user", "compute_extension:cloudpipe": "rule:admin_api", "compute_extension:console_output": "role:compute-user", "compute_extension:consoles": "role:compute-user", "compute_extension:createserverext": "role:compute-user", "compute_extension:deferred_delete": "role:compute-user", "compute_extension:disk_config": "role:compute-user", "compute_extension:evacuate": "rule:admin_api", "compute_extension:extended_server_attributes": "rule:admin_api", "compute_extension:extended_status": "role:compute-user", "compute_extension:flavorextradata": "role:compute-user", "compute_extension:flavorextraspecs": "role:compute-user", "compute_extension:flavormanage": "rule:admin_api", "compute_extension:floating_ip_dns": "role:compute-user", "compute_extension:floating_ip_pools": "role:compute-user", "compute_extension:floating_ips": "role:compute-user", "compute_extension:hosts": "rule:admin_api", "compute_extension:keypairs": "role:compute-user", "compute_extension:multinic": "role:compute-user", "compute_extension:networks": "rule:admin_api", "compute_extension:quotas": "role:compute-user", "compute_extension:rescue": "role:compute-user", "compute_extension:security_groups": "role:compute-user", "compute_extension:server_action_list": "rule:admin_api", "compute_extension:server_diagnostics": "rule:admin_api", "compute_extension:simple_tenant_usage:show": "rule:admin_or_owner", "compute_extension:simple_tenant_usage:list": "rule:admin_api", "compute_extension:users": "rule:admin_api", "compute_extension:virtual_interfaces": "role:compute-user", "compute_extension:virtual_storage_arrays": "role:compute-user", "compute_extension:volumes": "role:compute-user", "compute_extension:volume_attachments:index": "role:compute-user", "compute_extension:volume_attachments:show": "role:compute-user", "compute_extension:volume_attachments:create": "role:compute-user", "compute_extension:volume_attachments:delete": "role:compute-user", "compute_extension:volumetypes": "role:compute-user", "volume:create": "role:compute-user", "volume:get_all": "role:compute-user", "volume:get_volume_metadata": "role:compute-user", "volume:get_snapshot": "role:compute-user", "volume:get_all_snapshots": "role:compute-user", "network:get_all_networks": "role:compute-user", "network:get_network": "role:compute-user", "network:delete_network": "role:compute-user", "network:disassociate_network": "role:compute-user", "network:get_vifs_by_instance": "role:compute-user", "network:allocate_for_instance": "role:compute-user", "network:deallocate_for_instance": "role:compute-user", "network:validate_networks": "role:compute-user", "network:get_instance_uuids_by_ip_filter": "role:compute-user", "network:get_floating_ip": "role:compute-user", "network:get_floating_ip_pools": "role:compute-user", "network:get_floating_ip_by_address": "role:compute-user", "network:get_floating_ips_by_project": "role:compute-user", "network:get_floating_ips_by_fixed_address": "role:compute-user", "network:allocate_floating_ip": "role:compute-user", "network:deallocate_floating_ip": "role:compute-user", "network:associate_floating_ip": "role:compute-user", "network:disassociate_floating_ip": "role:compute-user", "network:get_fixed_ip": "role:compute-user", "network:add_fixed_ip_to_instance": "role:compute-user", "network:remove_fixed_ip_from_instance": "role:compute-user", "network:add_network_to_project": "role:compute-user", "network:get_instance_nw_info": "role:compute-user", "network:get_dns_domains": "role:compute-user", "network:add_dns_entry": "role:compute-user", "network:modify_dns_entry": "role:compute-user", "network:delete_dns_entry": "role:compute-user", "network:get_dns_entries_by_address": "role:compute-user", "network:get_dns_entries_by_name": "role:compute-user", "network:create_private_dns_domain": "role:compute-user", "network:create_public_dns_domain": "role:compute-user", "network:delete_dns_domain": "role:compute-user" } Service management ~~~~~~~~~~~~~~~~~~ The Identity service provides identity, token, catalog, and policy services. It consists of: * keystone Web Server Gateway Interface (WSGI) service Can be run in a WSGI-capable web server such as Apache httpd to provide the Identity service. The service and administrative APIs are run as separate instances of the WSGI service. * Identity service functions Each has a pluggable back end that allow different ways to use the particular service. Most support standard back ends like LDAP or SQL. The Identity service also maintains a user that corresponds to each service, such as, a user named ``nova`` for the Compute service, and a special service project called ``service``. For information about how to create services and endpoints, see the :ref:`Administrator Guide `. Groups ~~~~~~ A group is a collection of users in a domain. Administrators can create groups and add users to them. A role can then be assigned to the group, rather than individual users. Groups were introduced with the Identity API v3. Identity API V3 provides the following group-related operations: * Create a group * Delete a group * Update a group (change its name or description) * Add a user to a group * Remove a user from a group * List group members * List groups for a user * Assign a role on a project to a group * Assign a role on a domain to a group * Query role assignments to groups .. note:: The Identity service server might not allow all operations. For example, if you use the Identity server with the LDAP Identity back end and group updates are disabled, a request to create, delete, or update a group fails. Here are a couple of examples: * Group A is granted Role A on Project A. If User A is a member of Group A, when User A gets a token scoped to Project A, the token also includes Role A. * Group B is granted Role B on Domain B. If User B is a member of Group B, when User B gets a token scoped to Domain B, the token also includes Role B. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/identity-sources.rst0000664000175000017500000000253200000000000022666 0ustar00zuulzuul00000000000000.. Copyright 2018 SUSE Linux GmbH All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================== Configuring Keystone ==================== Identity sources ================ One of the most impactful decisions you'll have to make when configuring keystone is deciding how you want keystone to source your identity data. Keystone supports several different choices that will substantially impact how you'll configure, deploy, and interact with keystone. You can also mix-and-match various sources of identity (see :ref:`Domain-specific Configuration ` for an example). For example, you can store OpenStack service users and their passwords in SQL, manage customers in LDAP, and authenticate employees via SAML federation. .. support_matrix:: identity-support-matrix.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/identity-support-matrix.ini0000664000175000017500000001015200000000000024165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file contains a specification of what feature capabilities each driver # is able to support. Feature capabilities include what API operations are # supported, what backend behaviors features can be used and what aspects of # the driver implementation can be configured. The capabilities can be # considered to be structured into nested groups, but in this file they have # been flattened for ease of representation. The section names represent the # group structure. At the top level there are the following groups defined: # # - operation: Public API operations. # - feature: Features of the driver. # # When considering which capabilities should be marked as required, consider # the following guiding principles. # # The 'status' field takes possible values: # # - required: Unconditionally required to be implemented. # - optional: Optional to support, but nice to have. # - choice(group): At least one of the options within the named group # must be implemented. # - conditional(cond): Required, if the referenced condition is met. # # The value against each 'driver.XXXX' entry refers to the level of the # implementation of the feature in that driver: # # - complete: Fully implemented, expected to work at all times. # - partial: Implemented, but with caveats about when it will work. # For example, some configurations or hardware or guest OS may not # support it. # - missing: Not implemented at all. # # In the case of the driver being marked as 'partial', then # 'notes' entry should be used to explain the caveats around the # implementation. # # The 'cli' field takes a list of client commands, separated by semicolon. # These CLi commands are related to that operation. # Example: # cli=openstack domain list;openstack domain show # # List of driver implementations for which we are going to track the status of # features. This list only covers drivers that are in tree. Out of tree # drivers should maintain their own equivalent document, and merge it with this # when their code merges into core. [driver.sql] title=SQL [driver.ldap] title=LDAP [driver.oauth1] title=OAuth v1.0a [driver.external] title=REMOTE_USER [driver.oidc] title=OpenID Connect [driver.samlv2] title=SAML v2 [operation.local_authentication] title=Local authentication status=optional notes=Authenticate with keystone by providing credentials directly to keystone. driver.sql=complete driver.ldap=complete driver.oauth1=complete driver.external=missing driver.oidc=missing driver.samlv2=missing [operation.external_authentication] title=External authentication status=optional notes=Authenticate with keystone by providing credentials to an external system that keystone trusts (as with federation). driver.sql=missing driver.ldap=missing driver.oauth1=missing driver.external=complete driver.oidc=complete driver.samlv2=complete [operation.identity_crud] title=Identity management status=optional notes=Create, update, enable/disable, and delete users via Keystone's HTTP API. driver.sql=complete driver.ldap=partial driver.oauth1=complete driver.external=missing driver.oidc=missing driver.samlv2=missing [operation.pci_controls] title=PCI-DSS controls status=optional notes=Configure keystone to enforce PCI-DSS compliant security controls. driver.sql=complete driver.ldap=partial driver.oauth1=missing driver.external=partial driver.oidc=missing driver.samlv2=missing [operation.auditing] title=Auditing status=optional notes=Audit authentication flows using PyCADF. driver.sql=complete driver.ldap=complete driver.oauth1=missing driver.external=missing driver.oidc=complete driver.samlv2=complete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/index.rst0000664000175000017500000000076200000000000020466 0ustar00zuulzuul00000000000000.. _identity_management: ==================== Administrator Guides ==================== OpenStack Identity, code-named keystone, is the default Identity management system for OpenStack. This section contains guides for keystone operators to help with administering a keystone deployment. .. toctree:: :maxdepth: 2 getting-started configuration operations tokens service-api-protection keystone-features authentication-mechanisms oauth2-usage-guide configure-https ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/integrate-with-ldap.inc0000664000175000017500000004323300000000000023171 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _integrate_with_ldap: Integrate Identity with LDAP ============================ The OpenStack Identity service supports integration with existing LDAP directories for authentication and authorization services. LDAP back ends require initialization before configuring the OpenStack Identity service to work with it. For more information, see `Setting up LDAP for use with Keystone `__. When the OpenStack Identity service is configured to use LDAP back ends, you can split authentication (using the *identity* feature) and authorization (using the *assignment* feature). OpenStack Identity only supports read-only LDAP integration. The *identity* feature enables administrators to manage users and groups by each domain or the OpenStack Identity service entirely. This is supported by the LDAP identity back end. The *assignment* feature enables administrators to manage project role authorization using the OpenStack Identity service SQL database. There is no assignment back end for LDAP. Identity LDAP server set up --------------------------- .. important:: If you are using SELinux (enabled by default on RHEL derivatives), then in order for the OpenStack Identity service to access LDAP servers, you must enable the ``authlogin_nsswitch_use_ldap`` boolean value for SELinux on the server running the OpenStack Identity service. To enable and make the option persistent across reboots, set the following boolean value as the root user: .. code-block:: console # setsebool -P authlogin_nsswitch_use_ldap on The Identity configuration is split into two separate back ends; identity (back end for users and groups), and assignments (back end for domains, projects, roles, role assignments). To configure Identity, set options in the ``/etc/keystone/keystone.conf`` file. See `Integrate Identity back end with LDAP`_ for Identity back end configuration examples. Modify these examples as needed. **To define the destination LDAP server** Define the destination LDAP server in the ``/etc/keystone/keystone.conf`` file: .. code-block:: ini [ldap] url = ldap://localhost user = dc=Manager,dc=example,dc=org password = samplepassword suffix = dc=example,dc=org Although it's not recommended (see note below), multiple LDAP servers can be supplied to ``url`` to provide high-availability support for a single LDAP backend. By default, these will be tried in order of apperance, but an additional option, ``randomize_urls`` can be set to true, to randomize the list in each process (when it starts). To specify multiple LDAP servers, simply change the ``url`` option in the ``[ldap]`` section to be a list, separated by commas: .. code-block:: ini url = "ldap://localhost,ldap://backup.localhost" randomize_urls = true .. NOTE:: Failover mechanisms in the LDAP backend can cause delays when switching over to the next working LDAP server. Randomizing the order in which the servers are tried only makes the failure behavior not dependent on which of the ordered servers fail. Individual processes can still be delayed or time out, so this doesn't fix the issue at hand, but only makes the failure mode more gradual. This behavior cannot be easily fixed inside the service, because keystone would have to monitor the status of each LDAP server, which is in fact a task for a load balancer. Because of this, it is recommended to use a load balancer in front of the LDAP servers, which can monitor the state of the cluster and instantly redirect connections to the working LDAP server. **Additional LDAP integration settings** Set these options in the ``/etc/keystone/keystone.conf`` file for a single LDAP server, or ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` files for multiple back ends. Example configurations appear below each setting summary: **Query option** .. hlist:: :columns: 1 * Use ``query_scope`` to control the scope level of data presented (search only the first level or search an entire sub-tree) through LDAP. * Use ``page_size`` to control the maximum results per page. A value of zero disables paging. * Use ``alias_dereferencing`` to control the LDAP dereferencing option for queries. .. code-block:: ini [ldap] query_scope = sub page_size = 0 alias_dereferencing = default chase_referrals = **Debug** Use ``debug_level`` to set the LDAP debugging level for LDAP calls. A value of zero means that debugging is not enabled. .. code-block:: ini [ldap] debug_level = 4095 This setting sets ``OPT_DEBUG_LEVEL`` in the underlying python library. This field is a bit mask (integer), and the possible flags are documented in the OpenLDAP manpages. Commonly used values include 255 and 4095, with 4095 being more verbose and 0 being disabled. We recommend consulting the documentation for your LDAP back end when using this option. .. WARNING:: Enabling ``debug_level`` will negatively impact performance. **Connection pooling** Various LDAP back ends use a common LDAP module to interact with LDAP data. By default, a new connection is established for each LDAP operation. This is expensive when TLS support is enabled, which is a likely configuration in an enterprise setup. Reusing connections from a connection pool drastically reduces overhead of initiating a new connection for every LDAP operation. Use ``use_pool`` to enable LDAP connection pooling. Configure the connection pool size, maximum retry, reconnect trials, timeout (-1 indicates indefinite wait) and lifetime in seconds. .. code-block:: ini [ldap] use_pool = true pool_size = 10 pool_retry_max = 3 pool_retry_delay = 0.1 pool_connection_timeout = -1 pool_connection_lifetime = 600 **Connection pooling for end user authentication** LDAP user authentication is performed via an LDAP bind operation. In large deployments, user authentication can use up all available connections in a connection pool. OpenStack Identity provides a separate connection pool specifically for user authentication. Use ``use_auth_pool`` to enable LDAP connection pooling for end user authentication. Configure the connection pool size and lifetime in seconds. Both ``use_pool`` and ``use_auth_pool`` must be enabled to pool connections for user authentication. .. code-block:: ini [ldap] use_auth_pool = false auth_pool_size = 100 auth_pool_connection_lifetime = 60 When you have finished the configuration, restart the OpenStack Identity service. .. warning:: During the service restart, authentication and authorization are unavailable. Integrate Identity back end with LDAP ------------------------------------- The Identity back end contains information for users, groups, and group member lists. Integrating the Identity back end with LDAP allows administrators to use users and groups in LDAP. .. important:: For OpenStack Identity service to access LDAP servers, you must define the destination LDAP server in the ``/etc/keystone/keystone.conf`` file. For more information, see `Identity LDAP server set up`_. **To integrate one Identity back end with LDAP** #. Enable the LDAP Identity driver in the ``/etc/keystone/keystone.conf`` file. This allows LDAP as an identity back end: .. code-block:: ini [identity] #driver = sql driver = ldap #. Create the organizational units (OU) in the LDAP directory, and define the corresponding location in the ``/etc/keystone/keystone.conf`` file: .. code-block:: ini [ldap] user_tree_dn = ou=Users,dc=example,dc=org user_objectclass = inetOrgPerson group_tree_dn = ou=Groups,dc=example,dc=org group_objectclass = groupOfNames .. note:: These schema attributes are extensible for compatibility with various schemas. For example, this entry maps to the person attribute in Active Directory: .. code-block:: ini user_objectclass = person Restart the OpenStack Identity service. .. warning:: During service restart, authentication and authorization are unavailable. **To integrate multiple Identity back ends with LDAP** #. Set the following options in the ``/etc/keystone/keystone.conf`` file: #. Enable the LDAP driver: .. code-block:: ini [identity] #driver = sql driver = ldap #. Enable domain-specific drivers: .. code-block:: ini [identity] domain_specific_drivers_enabled = True domain_config_dir = /etc/keystone/domains #. Restart the OpenStack Identity service. .. warning:: During service restart, authentication and authorization are unavailable. #. List the domains using the dashboard, or the OpenStackClient CLI. Refer to the `Command List `__ for a list of OpenStackClient commands. #. Create domains using OpenStack dashboard, or the OpenStackClient CLI. #. For each domain, create a domain-specific configuration file in the ``/etc/keystone/domains`` directory. Use the file naming convention ``keystone.DOMAIN_NAME.conf``, where DOMAIN\_NAME is the domain name assigned in the previous step. .. note:: The options set in the ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` file will override options in the ``/etc/keystone/keystone.conf`` file. #. Define the destination LDAP server in the ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` file. For example: .. code-block:: ini [ldap] url = ldap://localhost user = dc=Manager,dc=example,dc=org password = samplepassword suffix = dc=example,dc=org #. Create the organizational units (OU) in the LDAP directories, and define their corresponding locations in the ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` file. For example: .. code-block:: ini [ldap] user_tree_dn = ou=Users,dc=example,dc=org user_objectclass = inetOrgPerson group_tree_dn = ou=Groups,dc=example,dc=org group_objectclass = groupOfNames .. note:: These schema attributes are extensible for compatibility with various schemas. For example, this entry maps to the person attribute in Active Directory: .. code-block:: ini user_objectclass = person #. Restart the OpenStack Identity service. .. warning:: During service restart, authentication and authorization are unavailable. **Additional LDAP integration settings** Set these options in the ``/etc/keystone/keystone.conf`` file for a single LDAP server, or ``/etc/keystone/domains/keystone.DOMAIN_NAME.conf`` files for multiple back ends. Example configurations appear below each setting summary: Filters Use filters to control the scope of data presented through LDAP. .. code-block:: ini [ldap] user_filter = (memberof=cn=openstack-users,ou=workgroups,dc=example,dc=org) group_filter = Identity attribute mapping Mask account status values (include any additional attribute mappings) for compatibility with various directory services. Superfluous accounts are filtered with ``user_filter``. Setting attribute ignore to list of attributes stripped off on update. For example, you can mask Active Directory account status attributes in the ``/etc/keystone/keystone.conf`` file: .. code-block:: ini [ldap] user_id_attribute = cn user_name_attribute = sn user_mail_attribute = mail user_pass_attribute = userPassword user_enabled_attribute = userAccountControl user_enabled_mask = 2 user_enabled_invert = false user_enabled_default = 512 user_default_project_id_attribute = user_additional_attribute_mapping = group_id_attribute = cn group_name_attribute = ou group_member_attribute = member group_desc_attribute = description group_additional_attribute_mapping = It is possible to model more complex LDAP schemas. For example, in the user object, the objectClass posixAccount from `RFC2307 `_ is very common. If this is the underlying objectClass, then the ``uid`` field should probably be ``uidNumber`` and the ``username`` field should be either ``uid`` or ``cn``. The following illustrates the configuration: .. code-block:: ini [ldap] user_id_attribute = uidNumber user_name_attribute = cn Enabled emulation OpenStack Identity supports emulation for integrating with LDAP servers that do not provide an ``enabled`` attribute for users. This allows OpenStack Identity to advertise ``enabled`` attributes when the user entity in LDAP does not. The ``user_enabled_emulation`` option must be enabled and the ``user_enabled_emulation_dn`` option must be a valid LDAP group. Users in the group specified by ``user_enabled_emulation_dn`` will be marked as ``enabled``. For example, the following will mark any user who is a member of the ``enabled_users`` group as enabled: .. code-block:: ini [ldap] user_enabled_emulation = True user_enabled_emulation_dn = cn=enabled_users,cn=groups,dc=openstack,dc=org If the directory server has an enabled attribute, but it is not a boolean type, a mask can be used to convert it. This is useful when the enabled attribute is an integer value. The following configuration highlights the usage: .. code-block:: ini [ldap] user_enabled_attribute = userAccountControl user_enabled_mask = 2 user_enabled_default = 512 In this case, the attribute is an integer and the enabled attribute is listed in bit 1. If the mask configured ``user_enabled_mask`` is different from 0, it retrieves the attribute from ``user_enabled_attribute`` and performs an add operation with the ``user_enabled_mask``. If the sum of the operation matches the mask, then the account is disabled. The value of ``user_enabled_attribute`` is also saved before applying the add operation in ``enabled_nomask``. This is done in case the user needs to be enabled or disabled. Lastly, setting ``user_enabled_default`` is needed in order to create a default value on the integer attribute (512 = NORMAL ACCOUNT in Active Directory). When you have finished configuration, restart the OpenStack Identity service. .. warning:: During service restart, authentication and authorization are unavailable. Secure the OpenStack Identity service connection to an LDAP back end -------------------------------------------------------------------- We recommend securing all connections between OpenStack Identity and LDAP. The Identity service supports the use of TLS to encrypt LDAP traffic. Before configuring this, you must first verify where your certificate authority file is located. For more information, see the `OpenStack Security Guide SSL introduction `_. Once you verify the location of your certificate authority file: **To configure TLS encryption on LDAP traffic** #. Open the ``/etc/keystone/keystone.conf`` configuration file. #. Find the ``[ldap]`` section. #. In the ``[ldap]`` section, set the ``use_tls`` configuration key to ``True``. Doing so will enable TLS. #. Configure the Identity service to use your certificate authorities file. To do so, set the ``tls_cacertfile`` configuration key in the ``ldap`` section to the certificate authorities file's path. .. note:: You can also set the ``tls_cacertdir`` (also in the ``ldap`` section) to the directory where all certificate authorities files are kept. If both ``tls_cacertfile`` and ``tls_cacertdir`` are set, then the latter will be ignored. #. Specify what client certificate checks to perform on incoming TLS sessions from the LDAP server. To do so, set the ``tls_req_cert`` configuration key in the ``[ldap]`` section to ``demand``, ``allow``, or ``never``: .. hlist:: :columns: 1 * ``demand`` - The LDAP server always receives certificate requests. The session terminates if no certificate is provided, or if the certificate provided cannot be verified against the existing certificate authorities file. * ``allow`` - The LDAP server always receives certificate requests. The session will proceed as normal even if a certificate is not provided. If a certificate is provided but it cannot be verified against the existing certificate authorities file, the certificate will be ignored and the session will proceed as normal. * ``never`` - A certificate will never be requested. When you have finished configuration, restart the OpenStack Identity service. .. NOTE:: If you are unable to connect to LDAP via OpenStack Identity, or observe a *SERVER DOWN* error, set the ``TLS_CACERT`` in ``/etc/ldap/ldap.conf`` to the same value specified in the ``[ldap] tls_certificate`` section of ``keystone.conf``. On distributions that include openstack-config, you can configure TLS encryption on LDAP traffic by running the following commands instead. .. code-block:: console # openstack-config --set /etc/keystone/keystone.conf \ ldap use_tls True # openstack-config --set /etc/keystone/keystone.conf \ ldap tls_cacertfile ``CA_FILE`` # openstack-config --set /etc/keystone/keystone.conf \ ldap tls_req_cert ``CERT_BEHAVIOR`` Where: - ``CA_FILE`` is the absolute path to the certificate authorities file that should be used to encrypt LDAP traffic. - ``CERT_BEHAVIOR`` specifies what client certificate checks to perform on an incoming TLS session from the LDAP server (``demand``, ``allow``, or ``never``). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/jws-key-rotation.rst0000664000175000017500000000746200000000000022611 0ustar00zuulzuul00000000000000================ JWS key rotation ================ The JWS token provider issues tokens using asymmetric signing. This document attempts to describe how to manage key pairs in a deployment of keystone nodes that need to validate tokens issued by one another. The inherent benefit of using asymmetric keys is that each keystone server generates it's own key pair. The private key is used to sign tokens. Anyone with access to the public key has the ability to verify the token signature. This is a critical step in validating tokens across a cluster of keystone nodes. It is necessary for operators to sync public keys across all keystone nodes in the deployment. Each keystone server will need a corresponding public key for every node. This only applies to public keys. Private keys should never leave the server they are generated from. Initial setup ------------- Before a deployment of keystone servers can issue JWT tokens, each server must set ``keystone.conf [token] provider = jws``. Additionally, each API server must have its own asymmetric key pair either generated manually or using ``keystone-manage create_jws_keypair``. If you're generating the key pairs manually, they must be usable with the ``ES256`` JSON Web Algorithm (`JWA`_). It is worth noting that the ``keystone-manage create_jws_keypair`` command line utility will create an appropriate key pair, but it will not automatically deploy it to the key repository locations defined in ``keystone.conf [jwt_tokens]``. It is up to operators to move these files accordingly and resolve possible file name conflicts. After generating a key pair, the public key from each API server must be shared with every other API server in the deployment. Ensure the private key used to sign JWS tokens is readable by the process running keystone and available in the ``keystone.conf [jwt_tokens] jws_private_key_repository`` location. Keystone will automatically use a key named ``private.pem`` to sign tokens and ignore all other keys in the repository. To validate tokens, keystone will iterate all available public keys in ``keystone.conf [jwt_tokens] jws_public_key_repository``. At a minimum, this repository needs to have the corresponding public key to the ``private.pem`` key found in ``keystone.conf [jwt_tokens] jws_private_key_repository``. .. _`JWA`: https://tools.ietf.org/html/rfc7518 Continued operations -------------------- Depending on the security requirements for your deployment, you might need to rotate out an existing key pair. To do so without prematurely invalidating tokens, follow these steps: 1. Generate a new asymmetric key pair for a given keystone API server (see ``keystone-manage create_jws_keypair`` for more details) 2. Copy or sync the newly generated public key to the public key repositories of all other keystone API servers, the public key should be placed in ``keystone.conf [jwt_tokens] jws_public_key_repository`` 3. Copy the new private key to the private key repository on the API server you're performing the rotation on and make sure it's named ``private.pem``, at this point the server will start signing tokens with the new private key and all other keystone API servers will be able to validate those tokens since they already have a copy of the public key from step #2 4. At this point, you must wait until the last tokens signed with the old private key have expired before you can remove the old corresponding public keys from each keystone API server, note this should be a minimum of ``keystone.conf [token] expiration`` 5. Once you're confident all tokens signed with the old private key are expired, it is safe to remove the old corresponding public key from each API server in the deployment, which is important in case the original private key was compromised and prevents attackers from using it craft their own tokens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/keystone-features.rst0000664000175000017500000000041700000000000023031 0ustar00zuulzuul00000000000000========================== Advanced Keystone Features ========================== Guides to lesser-known features of keystone. .. toctree:: :maxdepth: 2 unified-limits resource-options credential-encryption health-check-middleware event_notifications ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/limit-list-size.inc0000664000175000017500000000260100000000000022351 0ustar00zuulzuul00000000000000.. -*- rst -*- .. Copyright 2018 SUSE Linux GmbH All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Limiting list return size ========================= Keystone provides a method of setting a limit to the number of entities returned in a collection, which is useful to prevent overly long response times for list queries that have not specified a sufficiently narrow filter. This limit can be set globally by setting ``list_limit`` in the default section of ``keystone.conf``, with no limit set by default. Individual driver sections may override this global value with a specific limit, for example: .. code-block:: ini [resource] list_limit = 100 If a response to ``list_{entity}`` call has been truncated, then the response status code will still be 200 (OK), but the ``truncated`` attribute in the collection will be set to ``true``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/logging.inc0000664000175000017500000000127700000000000020750 0ustar00zuulzuul00000000000000.. -*- rst -*- Logging ======= You configure logging externally to the rest of Identity. The name of the file specifying the logging configuration is set using the ``log_config_append`` option in the ``[DEFAULT]`` section of the ``/etc/keystone/keystone.conf`` file. To route logging through syslog, set ``use_syslog=true`` in the ``[DEFAULT]`` section. A sample logging configuration file is available with the project in ``etc/logging.conf.sample``. Like other OpenStack projects, Identity uses the `Python logging module`_, which provides extensive configuration options that let you define the output levels and formats. .. _`Python logging module`: https://docs.python.org/library/logging.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/manage-services.rst0000664000175000017500000002464000000000000022431 0ustar00zuulzuul00000000000000.. _manage_services: ============================================ Create and manage services and service users ============================================ Service Catalog =============== OpenStack services can be discovered when registered in keystone's service catalog. The service catalog can be managed as either a static file template or as a dynamic database table. File-based Service Catalog (``templated.Catalog``) -------------------------------------------------- The templated catalog is an in-memory backend initialized from a read-only ``template_file``. Choose this option only if you know that your service catalog will not change very much over time. .. NOTE:: Attempting to change your service catalog against this driver will result in ``HTTP 501 Not Implemented`` errors. This is the expected behavior. If you want to use these commands, you must instead use the SQL-based Service Catalog driver. ``keystone.conf`` example: .. code-block:: ini [catalog] driver = templated template_file = /opt/stack/keystone/etc/default_catalog.templates The value of ``template_file`` is expected to be an absolute path to your service catalog configuration. An example ``template_file`` is included in keystone, however you should create your own to reflect your deployment. SQL-based Service Catalog (``sql.Catalog``) ------------------------------------------- A dynamic database-backed driver fully supporting persistent configuration. ``keystone.conf`` example: .. code-block:: ini [catalog] driver = sql .. NOTE:: A `template_file` does not need to be defined for the sql based catalog. To build your service catalog using this driver, see the built-in help: .. code-block:: bash $ openstack --help $ openstack service create --help $ openstack endpoint create --help Create a service ~~~~~~~~~~~~~~~~ #. List the available services: .. code-block:: console $ openstack service list +----------------------------------+----------+------------+ | ID | Name | Type | +----------------------------------+----------+------------+ | 9816f1faaa7c4842b90fb4821cd09223 | cinder | volume | | 1250f64f31e34dcd9a93d35a075ddbe1 | cinderv2 | volumev2 | | da8cf9f8546b4a428c43d5e032fe4afc | ec2 | ec2 | | 5f105eeb55924b7290c8675ad7e294ae | glance | image | | dcaa566e912e4c0e900dc86804e3dde0 | keystone | identity | | 4a715cfbc3664e9ebf388534ff2be76a | nova | compute | | 1aed4a6cf7274297ba4026cf5d5e96c5 | novav21 | computev21 | | bed063c790634c979778551f66c8ede9 | neutron | network | | 6feb2e0b98874d88bee221974770e372 | s3 | s3 | +----------------------------------+----------+------------+ #. To create a service, run this command: .. code-block:: console $ openstack service create --name SERVICE_NAME --description SERVICE_DESCRIPTION SERVICE_TYPE The arguments are: - ``service_name``: the unique name of the new service. - ``service_type``: the service type, such as ``identity``, ``compute``, ``network``, ``image``, ``object-store`` or any other service identifier string. - ``service_description``: the description of the service. For example, to create a ``swift`` service of type ``object-store``, run this command: .. code-block:: console $ openstack service create --name swift --description "object store service" object-store +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | object store service | | enabled | True | | id | 84c23f4b942c44c38b9c42c5e517cd9a | | name | swift | | type | object-store | +-------------+----------------------------------+ #. To get details for a service, run this command: .. code-block:: console $ openstack service show SERVICE_TYPE|SERVICE_NAME|SERVICE_ID For example: .. code-block:: console $ openstack service show object-store +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | object store service | | enabled | True | | id | 84c23f4b942c44c38b9c42c5e517cd9a | | name | swift | | type | object-store | +-------------+----------------------------------+ Create an endpoint ~~~~~~~~~~~~~~~~~~ #. Once a service is created, register it at an endpoint: .. code-block:: console $ openstack endpoint create nova public http://example.com/compute/v2.1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | c219aa779e90403eb4a78cf0aa7d38b1 | | interface | public | | region | None | | region_id | None | | service_id | 0f5da035b8e94629bf35e7ec1703a8eb | | service_name | nova | | service_type | compute | | url | http://example.com/compute/v2.1 | +--------------+----------------------------------+ Delete a service ~~~~~~~~~~~~~~~~ To delete a specified service, specify its ID. .. code-block:: console $ openstack service delete SERVICE_TYPE|SERVICE_NAME|SERVICE_ID For example: .. code-block:: console $ openstack service delete object-store Service users ============= To authenticate users against the Identity service, you must create a service user for each OpenStack service. For example, create a service user for the Compute, Block Storage, and Networking services. To configure the OpenStack services with service users, create a project for all services and create users for each service. Assign the admin role to each service user and project pair. This role enables users to validate tokens and authenticate and authorize other user requests. Create service users -------------------- #. Create a project for the service users. Typically, this project is named ``service``, but choose any name you like: .. code-block:: console $ openstack project create service --domain default +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | None | | domain_id | e601210181f54843b51b3edff41d4980 | | enabled | True | | id | 3e9f3f5399624b2db548d7f871bd5322 | | is_domain | False | | name | service | | parent_id | e601210181f54843b51b3edff41d4980 | +-------------+----------------------------------+ #. Create service users for the relevant services for your deployment. For example: .. code-block:: console $ openstack user create nova --password Sekr3tPass +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | 95ec3e1d5dd747f5a512d261731d29c7 | | name | nova | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ #. Assign the admin role to the user-project pair. .. code-block:: console $ openstack role add --project service --user nova admin +-------+----------------------------------+ | Field | Value | +-------+----------------------------------+ | id | 233109e756c1465292f31e7662b429b1 | | name | admin | +-------+----------------------------------+ Configuring service tokens -------------------------- A lot of operations in OpenStack require communication between multiple services on behalf of the user. For example, the Image service storing the user's images in the Object Storage service. If the image is significantly large, the operation might fail due to the user's token having expired during upload. In the above scenarios, the Image service will attach both the user's token and its own token (called the service token), as per the diagram below. .. code-block:: console +----------------+ | User | +-------+--------+ | Access Image Data Request | X-AUTH-TOKEN: | +-------v---------+ | Glance | +-------+---------+ | Access Image Data Request | X-AUTH-TOKEN: | X-SERVICE-TOKEN: | +-------v---------+ | Swift | +-----------------+ When a service receives a call from another service, it validates that the token has the appropriate roles for a service user. This is configured in each individual service configuration, under the section ``[keystone_authtoken]``. If the service token is valid, the operation will be allowed even if the user's token has expired. The ``service_token_roles`` option is the list of roles that the service token must contain to be a valid service token. In the previous steps, we have assigned the `admin` role to service users, so set the option to that and set ``service_token_roles_required`` to ``true``. .. code-block:: ini [keystone_authtoken] service_token_roles = admin service_token_roles_required = true For more information regarding service tokens, please see the ``keystonemiddleware`` `release notes `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/manage-trusts.rst0000664000175000017500000000170400000000000022146 0ustar00zuulzuul00000000000000=============== Managing trusts =============== A trust is an OpenStack Identity extension that enables delegation and, optionally, impersonation through ``keystone``. See the :doc:`user guide on using trusts `. Removing Expired Trusts =========================================================== In the SQL trust stores expired and soft deleted trusts, that are not automatically removed. These trusts can be removed with:: $ keystone-manage trust_flush [options] OPTIONS (optional): --project-id : To purge trusts of given project-id. --trustor-user-id : To purge trusts of given trustor-id. --trustee-user-id : To purge trusts of given trustee-id. --date : To purge trusts older than date. If no date is supplied keystone-manage will use the system clock time at runtime. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/multi-factor-authentication.rst0000664000175000017500000000631600000000000025003 0ustar00zuulzuul00000000000000.. _multi_factor_authentication: =========================== Multi-Factor Authentication =========================== Configuring MFA =============== MFA is configured on a per user basis via the user options :ref:`multi_factor_auth_rules` and :ref:`multi_factor_auth_enabled`. Until these are set the user can authenticate with any one of the enabled auth methods. MFA rules --------- The MFA rules allow an admin to force a user to use specific forms of authentication or combinations of forms of authentication to get a token. The rules are specified as follows via the user option :ref:`multi_factor_auth_rules`:: [["password", "totp"], ["password", "custom-auth-method"]] They are a list of lists. The elements of the sub-lists must be strings and are intended to mirror the required authentication method names (e.g. ``password``, ``totp``, etc) as defined in the ``keystone.conf`` file in the ``[auth] methods`` option. Each list of methods specifies a rule. If the auth methods provided by a user match (or exceed) the auth methods in the list, that rule is used. The first rule found (rules will not be processed in a specific order) that matches will be used. If a user has the ruleset defined as ``[["password", "totp"]]`` the user must provide both password and totp auth methods (and both methods must succeed) to receive a token. However, if a user has a ruleset defined as ``[["password"], ["password", "totp"]]`` the user may use the ``password`` method on it's own but would be required to use both ``password`` and ``totp`` if ``totp`` is specified at all. Any auth methods that are not defined in ``keystone.conf`` in the ``[auth] methods`` option are ignored when the rules are processed. Empty rules are not allowed. If a rule is empty due to no-valid auth methods existing within it, the rule is discarded at authentication time. If there are no rules or no valid rules for the user, authentication occurs in the default manner: any single configured auth method is sufficient to receive a token. .. note:: The ``token`` auth method typically should not be specified in any MFA Rules. The ``token`` auth method will include all previous auth methods for the original auth request and will match the appropriate ruleset. This is intentional, as the ``token`` method is used for rescoping/changing active projects. Enabling MFA ------------ Before the MFA rules take effect on a user, MFA has to be enabled for that user via the user option :ref:`multi_factor_auth_enabled`. By default this is unset, and the rules will not take effect until configured. In the case a user should be exempt from MFA Rules, regardless if they are set, the User-Option may be set to ``False``. Using MFA ========= See :ref:`multi_factor_authentication_user_guide` in the user guide for some examples. Supported multi-factor authentication methods ============================================= TOTP is the only suggested second factor along with password for now, but there are plans to include more in future. TOTP ---- This is a simple 6 digit passcode generated by both the server and client from a known shared secret. This used in a multi-step fashion is the most common 2-factor method used these days. See: :ref:`auth_totp` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/oauth1.rst0000664000175000017500000000165300000000000020560 0ustar00zuulzuul00000000000000OAuth1 1.0a =========== The OAuth 1.0a feature provides the ability for Identity users to delegate roles to third party consumers via the OAuth 1.0a specification. To enable OAuth1: 1. Add the oauth1 driver to the ``[oauth1]`` section in ``keystone.conf``. For example: .. code-block:: ini [oauth1] driver = sql 2. Add the ``oauth1`` authentication method to the ``[auth]`` section in ``keystone.conf``: .. code-block:: ini [auth] methods = external,password,token,oauth1 3. If deploying under Apache httpd with ``mod_wsgi``, set the `WSGIPassAuthorization` to allow the OAuth Authorization headers to pass through `mod_wsgi`. For example, add the following to the keystone virtual host file: .. code-block:: ini WSGIPassAuthorization On See `API Specification for OAuth 1.0a `_ for the details of API definition. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/oauth2-usage-guide.rst0000664000175000017500000001233500000000000022755 0ustar00zuulzuul00000000000000====================================== OAuth2.0 Client Credentials Grant Flow ====================================== Overview ~~~~~~~~ OAuth2.0 Client Credentials Grant based on `RFC6749`_ is implemented as an extension of Keystone. This extension uses the `application credentials`_ as its back-end because they have some similar features. Users can use ``application_credentials_id`` and ``application_credentials_secret`` as client credentials to obtain the OAuth2.0 access token. The access token can then be used to access the protected resources of the OpenStack API using Keystonemiddleware that supports receiving access tokens in the Authorization header. See the `Identity API reference`_ for more information on generating OAuth2.0 access token. Guide ~~~~~ Enable Keystone identity server to support OAuth2.0 Client Credentials Grant by the following steps in this guide. In this example, ``keystone.host`` is the domain name used by the Keystone identity server. .. _application credentials: https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials .. _`Identity API reference`: https://docs.openstack.org/api-ref/identity/v3/index.html#os-oauth2-api .. warning:: It is strongly recommended that HTTPS be enabled in Keystone when using OAuth2.0 Client Credentials. See :doc:`./configure-https` for details. According to `RFC6749`_ , HTTPS **must** be enabled in the authorization server since requests include sensitive information, e.g., a client secret, in plain text. Note that you might have to enable both HTTP and HTTPS as some other OpenStack services or third-party applications don't use OAuth2.0 and need HTTP for the authentication with the Keystone identity server. Enable application credentials authentication --------------------------------------------- Due to the design of the current implementation, the application credentials must be enabled in Keystone as it is used for the management of OAuth2.0 client credentials. 1. Modify ``keystone.conf`` to support application credentials authentication. .. code-block:: console stack@oauth2-0-server:/$ vi /etc/keystone/keystone.conf [auth] methods = external,password,token,application_credential 2. Restart Keystone service so that the modified configuration information takes effect. .. code-block:: console stack@oauth2-0-server:/$ sudo systemctl restart devstack@keystone.service Try to access the Keystone APIs ------------------------------- At last, try to access the Keystone APIs to confirm that the server is working properly. 1. Create OAuth2.0 client credentials through the application credentials API. .. code-block:: console stack@oauth2-0-server:/$ openstack application credential create sample_001 +--------------+----------------------------------------------------------------------------------------+ | Field | Value | +--------------+----------------------------------------------------------------------------------------+ | description | None | | expires_at | None | | id | a7850381222a4e2cb595664dfd57d083 | | name | sample_001 | | project_id | 2b90a96668694041a640a2ef84be6de7 | | roles | admin reader member | | secret | GVm33KC6AqpDZj_ZzKhZClDqnCpNDMNh66Mvait8Dxw7Kc8kwVj7ImkwnRWvovs437f2aftbW46wEMtH0cyBQA | | system | None | | unrestricted | False | | user_id | 0b8426bb83d944bc8d0fe4c3b9a3f635 | +--------------+----------------------------------------------------------------------------------------+ 2. Obtain oauth2.0 access tokens through the "Basic" HTTP authentication with OAuth2.0 client credentials. .. code-block:: console stack@oauth2-0-server:/$ curl -sik -u "$a7850381222a4e2cb595664dfd57d083:GVm33KC6AqpDZj_ZzKhZClDqnCpNDMNh66Mvait8Dxw7Kc8kwVj7ImkwnRWvovs437f2aftbW46wEMtH0cyBQA" \ -X POST https://keystone.host/identity/v3/OS-OAUTH2/token -H "application/x-www-form-urlencoded" -d "grant_type=client_credentials" HTTP/1.1 200 OK Date: Tue, 01 Mar 2022 00:56:59 GMT Server: Apache/2.4.41 (Ubuntu) Content-Type: application/json Content-Length: 264 Vary: X-Auth-Token x-openstack-request-id: req-a8358f51-2e0f-45a7-bb1e-7d29c6a793f4 Connection: close {"access_token":"gAAAAABhi1cMynG89h8t6TJrxNiZuNzjcIUIxNctoVfuqTw7BpUedLKxjPymClVEnj9GhIT5u2mpjaJATlEAtaa3D6_t8jk_fV-mqo2IUlsmTPTnMwkcjh5FSHQVRdqvDxgY3nSqLA_Hfv-zPmjS5KWX3hmyDE5YWO1ztX6QNVQb4wTPyNL1-7I","expires_in":3600,"token_type":"Bearer"} .. _RFC6749: https://datatracker.ietf.org/doc/html/rfc6749././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/operations.rst0000664000175000017500000000034700000000000021541 0ustar00zuulzuul00000000000000=================== Keystone Operations =================== Guides for managing day-to-day operations of keystone and understanding your deployment. .. toctree:: :maxdepth: 1 upgrading case-insensitive manage-trusts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/performance.inc0000664000175000017500000000753500000000000021626 0ustar00zuulzuul00000000000000.. -*- rst -*- .. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Performance and scaling ======================= Before you begin tuning Keystone for performance and scalability, you should first know that Keystone is just a two tier horizontally-scalable web application, and the most effective methods for scaling it are going to be the same as for any other similarly designed web application: give it more processes, more memory, scale horizontally, and load balance the result. With that said, there are many opportunities for tuning the performance of Keystone, many of which are actually trade-offs between performance and security that you need to judge for yourself, and tune accordingly. Keystone configuration options that affect performance ------------------------------------------------------ These are all of the options in ``keystone.conf`` that have a direct impact on performance. See the help descriptions for these options for more specific details on how and why you might want to tune these options for yourself. * ``[DEFAULT] max_project_tree_depth``: Reduce this number to increase performance, increase this number to cater to more complicated hierarchical multitenancy use cases. * ``[DEFAULT] max_password_length``: Reduce this number to increase performance, increase this number to allow for more secure passwords. * ``[cache] enable``: Enable this option to increase performance, but you also need to configure other options in the ``[cache]`` section to actually utilize caching. * ``[token] provider``: All supported token providers have been primarily driven by performance considerations. UUID and Fernet both require online validation (cacheable HTTP calls back to keystone to validate tokens). Fernet has the highest scalability characteristics overall, but requires more work to validate, and therefore enabling caching (``[cache] enable``) is absolutely critical. * ``[fernet] max_active_keys``: If you're using Fernet tokens, decrease this option to improve performance, increase this option to support more advanced key rotation strategies. Keystonemiddleware configuration options that affect performance ---------------------------------------------------------------- This configuration actually lives in the Paste pipelines of services consuming token validation from keystone (i.e.: nova, cinder, swift, etc.). * ``cache``: When keystone's `auth_token` middleware is deployed with a swift cache, use this option to have `auth_token` middleware share a caching backend with swift. Otherwise, use the ``memcached_servers`` option instead. * ``memcached_servers``: Set this option to share a cache across ``keystonemiddleware.auth_token`` processes. * ``token_cache_time``: Increase this option to improve performance, decrease this option to respond to token revocation events more quickly (thereby increasing security). * ``revocation_cache_time``: Increase this option to improve performance, decrease this option to respond to token revocation events more quickly (thereby increasing security). * ``memcache_security_strategy``: Do not set this option to improve performance, but set it to improve security where you're sharing memcached with other processes. * ``include_service_catalog``: Disable this option to improve performance, if the protected service does not require a service catalog. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/resource-options.rst0000664000175000017500000001523700000000000022702 0ustar00zuulzuul00000000000000================ Resource Options ================ A resource option is an attribute that can be optionally set on an entity in keystone. These options are used to control specific features or behaviors within keystone. This allows flexibility on a per-resource basis as opposed to settings a configuration file value that controls a behavior for all resources in a deployment. This flexibility can be useful for deployments is setting different authentication requirements for users. For example, operators can use resource options to set the number of failed authentication attempts on a per-user basis as opposed to setting a global value that is applied to all users. The purpose of this document is to formally document the supported resource options used in keystone, their intended behavior, and how to use them. User Options ============ The following options are available on user resources. If left undefined, they are assumed to be false or disabled. These can be set either in the initial user creation (``POST /v3/users``) or by updating an existing user to include new options (``PATCH /v3/users/{user_id}``): .. code-block:: json { "user": { "options": { "ignore_lockout_failure_attempts": true } } } .. note:: User options of the ``Boolean`` type can be set to ``True``, ``False``, or ``None``; if the option is set to ``None``, it is removed from the user's data structure. .. _ignore_user_inactivity: ignore_user_inactivity ---------------------- Type: ``Boolean`` Opt into ignoring global inactivity lock settings defined in ``keystone.conf [security_compliance]`` on a per-user basis. Setting this option to ``True`` will make users not set as disabled even after the globally configured inactivity period is reached. .. code-block:: json { "user": { "options": { "ignore_user_inactivity": true } } } .. note:: Setting this option for users which are already disabled will not make them automatically enabled. Such users must be enabled manually after setting this option to True for them. See the `security compliance documentation `_ for more details. .. _ignore_change_password_upon_first_use: ignore_change_password_upon_first_use ------------------------------------- Type: ``Boolean`` Control if a user should be forced to change their password immediately after they log into keystone for the first time. This can be useful for deployments that auto-generate passwords but want to ensure a user picks a new password when they start using the deployment. .. code-block:: json { "user": { "options": { "ignore_change_password_upon_first_use": true } } } See the :ref:`security compliance documentation ` for more details. .. _ignore_password_expiry: ignore_password_expiry ---------------------- Type: ``Boolean`` Opt into ignoring global password expiration settings defined in ``keystone.conf [security_compliance]`` on a per-user basis. Setting this option to ``True`` will allow users to continue using passwords that may be expired according to global configuration values. .. code-block:: json { "user": { "options": { "ignore_password_expiry": true } } } See the :ref:`security compliance documentation ` for more details. .. _ignore_lockout_failure_attempts: ignore_lockout_failure_attempts ------------------------------- Type: ``Boolean`` If ``True``, opt into ignoring the number of times a user has authenticated and locking out the user as a result. .. code-block:: json { "user": { "options": { "ignore_lockout_failure_attempts": true } } } See the :ref:`security compliance documentation ` for more details. .. _lock_password: lock_password ------------- Type: ``Boolean`` If set to ``True``, this option disables the ability for users to change their password through self-service APIs. .. code-block:: json { "user": { "options": { "lock_password": true } } } See the :ref:`security compliance documentation ` for more details. .. _multi_factor_auth_enabled: multi_factor_auth_enabled ------------------------- Type: ``Boolean`` Specify if a user has multi-factor authentication enabled on their account. This will result in different behavior at authentication time and the user may be presented with different authentication requirements based on multi-factor configuration. .. code-block:: json { "user": { "options": { "multi_factor_auth_enabled": true } } } See :ref:`multi_factor_authentication` for further details. .. _multi_factor_auth_rules: multi_factor_auth_rules ----------------------- Type: ``List of Lists of Strings`` Define a list of strings that represent the methods required for a user to authenticate. .. code-block:: json { "user": { "options": { "multi_factor_auth_rules": [ ["password", "totp"], ["password", "u2f"] ] } } } See :ref:`multi_factor_authentication` for further details. Role Options ============ The following options are available on role resources. If left undefined, they are assumed to be false or disabled. immutable --------- Type: ``Boolean`` Specify whether a role is immutable. An immutable role may not be deleted or modified except to remove the ``immutable`` option. .. code-block:: json { "role": { "options": { "immutable": true } } } Project Options =============== The following options are available on project resources. If left undefined, they are assumed to be false or disabled. immutable --------- Type: ``Boolean`` Specify whether a project is immutable. An immutable project may not be deleted or modified except to remove the ``immutable`` option. .. code-block:: json { "project": { "options": { "immutable": true } } } Domain Options ============== The following options are available on domain resources. If left undefined, they are assumed to be false or disabled. immutable --------- Type: ``Boolean`` Specify whether a domain is immutable. An immutable domain may not be deleted or modified except to remove the ``immutable`` option. .. code-block:: json { "domain": { "options": { "immutable": true } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/security-compliance.inc0000664000175000017500000001701100000000000023272 0ustar00zuulzuul00000000000000.. -*- rst -*- .. _security_compliance: Security compliance and PCI-DSS =============================== As of the Newton release, the Identity service contains additional security compliance features, specifically to satisfy Payment Card Industry - Data Security Standard (PCI-DSS) v3.1 requirements. See `Security Hardening PCI-DSS`_ for more information on PCI-DSS. Security compliance features are disabled by default and most of the features only apply to the SQL backend for the identity driver. Other identity backends, such as LDAP, should implement their own security controls. Enable these features by changing the configuration settings under the ``[security_compliance]`` section in ``keystone.conf``. Setting an account lockout threshold ------------------------------------ The account lockout feature limits the number of incorrect password attempts. If a user fails to authenticate after the maximum number of attempts, the service disables the user. Users can be re-enabled by explicitly setting the enable user attribute with the update user `v3`_ API call. You set the maximum number of failed authentication attempts by setting the ``lockout_failure_attempts``: .. code-block:: ini [security_compliance] lockout_failure_attempts = 6 You set the number of minutes a user would be locked out by setting the ``lockout_duration`` in seconds: .. code-block:: ini [security_compliance] lockout_duration = 1800 If you do not set the ``lockout_duration``, users will be locked out indefinitely until the user is explicitly enabled via the API. You can ensure specific users are never locked out. This can be useful for service accounts or administrative users. You can do this by setting the user option for :ref:`ignore_lockout_failure_attempts`. Disabling inactive users ------------------------ PCI-DSS 8.1.4 requires that inactive user accounts be removed or disabled within 90 days. You can achieve this by setting the ``disable_user_account_days_inactive``: .. code-block:: ini [security_compliance] disable_user_account_days_inactive = 90 This above example means that users that have not authenticated (inactive) for the past 90 days are automatically disabled. Users can be re-enabled by explicitly setting the enable user attribute via the API. Force users to change password upon first use --------------------------------------------- PCI-DSS 8.2.6 requires users to change their password for first time use and upon an administrative password reset. Within the identity `user API`_, `create user` and `update user` are considered administrative password changes. Whereas, `change password for user` is a self-service password change. Once this feature is enabled, new users, and users that have had their password reset, will be required to change their password upon next authentication (first use), before being able to access any services. Prior to enabling this feature, you may want to exempt some users that you do not wish to be required to change their password. You can mark a user as exempt by setting the user options attribute :ref:`ignore_change_password_upon_first_use`. .. WARNING:: Failure to mark service users as exempt from this requirement will result in your service account passwords becoming expired after being reset. When ready, you can configure it so that users are forced to change their password upon first use by setting ``change_password_upon_first_use``: .. code-block:: ini [security_compliance] change_password_upon_first_use = True .. _`user API`: https://docs.openstack.org/api-ref/identity/v3/index.html#users Configuring password expiration ------------------------------- Passwords can be configured to expire within a certain number of days by setting the ``password_expires_days``: .. code-block:: ini [security_compliance] password_expires_days = 90 Once set, any new password changes have an expiration date based on the date/time of the password change plus the number of days defined here. Existing passwords will not be impacted. If you want existing passwords to have an expiration date, you would need to run a SQL script against the password table in the database to update the expires_at column. If there exists a user whose password you do not want to expire, keystone supports setting that via the user option :ref:`ignore_password_expiry`. Configuring password strength requirements ------------------------------------------ You can set password strength requirements, such as requiring numbers in passwords or setting a minimum password length, by adding a regular expression to the ``password_regex`` setting: .. code-block:: ini [security_compliance] password_regex = ^(?=.*\d)(?=.*[a-zA-Z]).{7,}$ The above example is a regular expression that requires a password to have: * One (1) letter * One (1) digit * Minimum length of seven (7) characters If you do set the ``password_regex``, you should provide text that describes your password strength requirements. You can do this by setting the ``password_regex_description``: .. code-block:: ini [security_compliance] password_regex_description = Passwords must contain at least 1 letter, 1 digit, and be a minimum length of 7 characters. It is imperative that the ``password_regex_description`` matches the actual regex. If the ``password_regex`` and the ``password_regex_description`` do not match, it will cause user experience to suffer since this description will be returned to users to explain why their requested password was insufficient. .. note:: You must ensure the ``password_regex_description`` accurately and completely describes the ``password_regex``. If the two options are out of sync, the help text could inaccurately describe the password requirements being applied to the password. This would lead to a poor user experience. Requiring a unique password history ----------------------------------- The password history requirements controls the number of passwords for a user that must be unique before an old password can be reused. You can enforce this by setting the ``unique_last_password_count``: .. code-block:: ini [security_compliance] unique_last_password_count= 5 The above example does not allow a user to create a new password that is the same as any of their last four previous passwords. Similarly, you can set the number of days that a password must be used before the user can change it by setting the ``minimum_password_age``: .. code-block:: ini [security_compliance] minimum_password_age = 1 In the above example, once a user changes their password, they would not be able to change it again for one day. This prevents users from changing their passwords immediately in order to wipe out their password history and reuse an old password. .. note:: When you set ``password_expires_days``, the value for the ``minimum_password_age`` should be less than the ``password_expires_days``. Otherwise, users would not be able to change their passwords before they expire. Prevent Self-Service Password Changes ------------------------------------- If there exists a user who should not be able to change her own password via the keystone password change API, keystone supports setting that via the user option :ref:`lock_password`. This is typically used in the case where passwords are managed externally to keystone. .. _Security Hardening PCI-DSS: https://specs.openstack.org/openstack/keystone-specs/specs/keystone/newton/pci-dss.html .. _v3: https://docs.openstack.org/api-ref/identity/v3/index.html#update-user ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/service-api-protection.rst0000664000175000017500000005715100000000000023756 0ustar00zuulzuul00000000000000============= Default Roles ============= ------ Primer ------ Like most OpenStack services, keystone protects its API using role-based access control (RBAC). Users can access different APIs depending on the roles they have on a project, domain, or system, which we refer to as scope. As of the Rocky release, keystone provides three roles called ``admin``, ``member``, and ``reader`` by default. Operators can grant these roles to any actor (e.g., group or user) on any scope (e.g., system, domain, or project). If you need a refresher on authorization scopes and token types, please refer to the `token guide`_. The following sections describe how each default role behaves with keystone's API across different scopes. Additionally, other service developers can use this document as a guide for implementing similar patterns in their services. Default roles and behaviors across scopes allow operators to delegate more functionality to their team, auditors, customers, and users without maintaining custom policies. In addition to ``admin``, ``member``, and ``reader`` role, from 2023.2 (Bobcat) release keystone will provide the ``service`` and ``manager`` roles by default as well. Operators can use the ``service`` role for service to service API calls instead of using ``admin`` role for the same. The service role will be separate from ``admin``, ``member``, ``reader`` and will not implicate any of these roles. Operators can give the ``manager`` role to users to within a domain to enable self-service management of users, groups, projects and role assignments within their domain. .. _`token guide`: https://docs.openstack.org/keystone/latest/admin/tokens-overview.html#authorization-scopes ----------------- Roles Definitions ----------------- The default roles provided by keystone via ``keystone-manage bootstrap`` (except for the ``service`` role) are related through role implications. The ``admin`` role implies the ``manager`` role, the ``manager`` implies the ``member`` role, and the ``member`` role implies the ``reader`` role. These implications mean users with the ``admin`` role automatically have the ``manager``, ``member`` and ``reader`` roles. Additionally, users with the ``manager`` role automatically have the ``member`` and ``reader`` roles. Users with the ``member`` role automatically have the ``reader`` role. Implying roles reduces role assignments and forms a natural hierarchy between the default roles. It also reduces the complexity of default policies by making check strings short. For example, a policy that requires ``reader`` can be expressed as: .. code-block:: yaml "identity:list_foo": "role:reader" Instead of: .. code-block:: yaml "identity:list_foo": "role:admin or role:manager or role:member or role:reader" Reader ====== .. warning:: While it's possible to use the ``reader`` role to perform audits, we highly recommend assessing the viability of using ``reader`` for auditing from the perspective of the compliance target you're pursuing. The ``reader`` role is the least-privileged role within the role hierarchy described here. As such, OpenStack development teams, by default, do not advocate exposing sensitive information to users with the ``reader`` role, regardless of the scope. We have noted the need for a formal, read-only, role that is useful for inspecting all applicable resources within a particular scope, but it shouldn't be implemented as the lowest level of authorization. This work will come in a subsequent release where we support an elevated read-only role, that implies ``reader``, but also exposes sensitive information, where applicable. This will allow operators to grant third-party auditors a permissive role for viewing sensitive information, specifically for compliance targets that require it. The ``reader`` role provides read-only access to resources within the system, a domain, or a project. Depending on the assignment scope, two users with the ``reader`` role can expect different API behaviors. For example, a user with the ``reader`` role on the system can list all projects within the deployment. A user with the ``reader`` role on a domain can only list projects within their domain. By analyzing the scope of a role assignment, we increase the re-usability of the ``reader`` role and provide greater functionality without introducing more roles. For example, to accomplish this without analyzing assignment scope, you would need ``system-reader``, ``domain-reader``, and ``project-reader`` roles in addition to custom policies for each service. It's imperative to note that ``reader`` is the least authoritative role in the hierarchy because assignments using ``admin`` or ``member`` ultimately include the ``reader`` role. We document this explicitly so that ``reader`` roles are not overloaded with read-only access to sensitive information. For example, a deployment pursuing a specific compliance target may want to leverage the ``reader`` role to perform the audit. If the audit requires the auditor to evaluate sensitive information, like license keys or administrative metadata, within a given scope, auditors shouldn't expect to perform these operations with the ``reader`` role. We justify this design decision because sensitive information should be explicitly protected, and not implicitly exposed. The ``reader`` role should be implemented and used from the perspective of least-privilege, which may or may not fulfill your auditing use case. Member ====== Within keystone, there isn't a distinct advantage to having the ``member`` role instead of the ``reader`` role. The ``member`` role is more applicable to other services. The ``member`` role works nicely for introducing granularity between ``admin`` and ``reader`` roles. Other services might write default policies that require the ``member`` role to create resources, but the ``admin`` role to delete them. For example, users with ``reader`` on a project could list instance, users with ``member`` on a project can list and create instances, and users with ``admin`` on a project can list, create, and delete instances. Service developers can use the ``member`` role to provide more flexibility between ``admin`` and ``reader`` on different scopes. Manager ======= The ``manager`` role takes a special place in keystone. It sits between the ``admin`` and ``member`` role, allowing limited identity management while being clearly differentiated from the ``admin`` role both in terms of purpose and privileges. The ``manager`` role is meant to be assigned in a domain scope and enables users to manage identity assets in a whole domain including users, projects, groups and role assignments. This enables identity self-service management capabilities for users within a domain without the need to assign the most privileged ``admin`` role to them. The keystone default policies include a special rule that specifies the list of roles a user with the ``manager`` role is be able to assign and revoke within the domain scope. This prevents such user from escalating their own privileges or those of others beyond ``manager`` and for this purpose the list excludes the ``admin`` role. The list can be adjusted by cloud administrators via policy definitions in case the role model differs. For example, if a new role is introduced for a specific cloud environment, the list can be adjusted to allow users with the ``manager`` role to also assign it. Other services might write default policies to enable the ``manager`` role to have more privileged managing rights or cross-project privileges in a domain. Admin ===== We reserve the ``admin`` role for the most privileged operations within a given scope. It is important to note that having ``admin`` on a project, domain, or the system carries separate authorization and are not transitive. For example, users with ``admin`` on the system should be able to manage every aspect of the deployment because they're operators. Users with ``admin`` on a project shouldn't be able to manage things outside the project because it would violate the tenancy of their role assignment (this doesn't apply consistently since services are addressing this individually at their own pace). Service ======= We reserve the ``service`` role for Service-to-service communication. The aim of a ``service`` role is to allow a service to communicate with another service and possibly be granted elevated privileges by the service receiving the request. Before the introduction of the ``service`` role, a service had to be granted the ``admin`` role in order to have elevated privileges, which gave a service powers way beyond what was necessary. With the ``service`` role in place, we can now allow all service-to-service APIs to default to the ``service`` role only. For example, a policy that requires ``service`` can be expressed as: .. code-block:: yaml "identity:create_foo": "role:service" There might be exception service-to-service APIs which project think are useful to be used by admin or non-admin user then they can take the exceptional decision to default them to user role and ``service`` role. For example, a policy that requires ``service`` and ``admin`` can be expressed as: .. code-block:: yaml "identity:create_foo": "role:service or role:admin" .. note:: Unlike the other default roles, the ``service`` role is *not* a member of a role hierarchy. It is a standalone role. .. note:: As of the Train release, keystone applies the following personas consistently across its API. --------------- System Personas --------------- This section describes authorization personas typically used for operators and deployers. You can find all users with system role assignments using the following query: .. code-block:: console $ openstack role assignment list --names --system all +--------+------------------------+------------------------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +--------+------------------------+------------------------+---------+--------+--------+-----------+ | admin | | system-admins@Default | | | all | False | | admin | admin@Default | | | | all | False | | admin | operator@Default | | | | all | False | | reader | | system-support@Default | | | all | False | | admin | operator@Default | | | | all | False | | member | system-support@Default | | | | all | False | +--------+------------------------+------------------------+---------+--------+--------+-----------+ System Administrators ===================== *System administrators* are allowed to manage every resource in keystone. System administrators are typically operators and cloud administrators. They can control resources that ultimately affect the behavior of the deployment. For example, they can add or remove services and endpoints in the catalog, create new domains, add federated mappings, and clean up stale resources, like a user's application credentials or trusts. You can find *system administrators* in your deployment with the following assignments: .. code-block:: console $ openstack role assignment list --names --system all --role admin +-------+------------------+-----------------------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +-------+------------------+-----------------------+---------+--------+--------+-----------+ | admin | | system-admins@Default | | | all | False | | admin | admin@Default | | | | all | False | | admin | operator@Default | | | | all | False | +-------+------------------+-----------------------+---------+--------+--------+-----------+ System Members & System Readers =============================== In keystone, *system members* and *system readers* are very similar and have the same authorization. Users with these roles on the system can view all resources within keystone. They can list role assignments, users, projects, and group memberships, among other resources. The *system reader* persona is useful for members of a support team or auditors if the audit doesn't require access to sensitive information. You can find *system members* and *system readers* in your deployment with the following assignments: .. code-block:: console $ openstack role assignment list --names --system all --role member --role reader +--------+------------------------+------------------------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +--------+------------------------+------------------------+---------+--------+--------+-----------+ | reader | | system-support@Default | | | all | False | | admin | operator@Default | | | | all | False | | member | system-support@Default | | | | all | False | +--------+------------------------+------------------------+---------+--------+--------+-----------+ .. warning:: Filtering system role assignments is currently broken and is being tracked as a `bug `_. --------------- Domain Personas --------------- This section describes authorization personas for people who manage their own domains, which contain projects, users, and groups. You can find all users with role assignments on a specific domain using the following query: .. code-block:: console $ openstack role assignment list --names --domain foobar +---------+-----------------+----------------------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +---------+-----------------+----------------------+---------+--------+--------+-----------+ | reader | support@Default | | | foobar | | False | | admin | jsmith@Default | | | foobar | | False | | admin | | foobar-admins@foobar | | foobar | | False | | manager | alice@foobar | | | foobar | | False | | member | jdoe@foobar | | | foobar | | False | +---------+-----------------+----------------------+---------+--------+--------+-----------+ Domain Administrators ===================== *Domain administrators* can manage most aspects of the domain or its contents. These users can create new projects and users within their domain. They can inspect the role assignments users have on projects within their domain. *Domain administrators* aren't allowed to access system-specific resources or resources outside their domain. Users that need control over project, group, and user creation are a great fit for *domain administrators*. You can find *domain administrators* in your deployment with the following role assignment: .. code-block:: console $ openstack role assignment list --names --domain foobar --role admin +-------+----------------+----------------------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +-------+----------------+----------------------+---------+--------+--------+-----------+ | admin | jsmith@Default | | | foobar | | False | | admin | | foobar-admins@foobar | | foobar | | False | +-------+----------------+----------------------+---------+--------+--------+-----------+ Domain Managers =============== *Domain managers* can only manage specific resources related to identity management within their domain. This includes creating new users, projects and groups as well as updating and deleting them. They can also assign and revoke roles between those or in relation to the domain. Furthermore, they can inspect role assignments within the domain. *Domain managers* cannot change any aspects of the domain itself. The role assignments they can apply within their domain is limited to a specific list of applicable roles and in the default configuration, this excludes the ``admin`` role to prevent privilege escalation. You can find *domain managers* in your deployment with the following role assignment: .. code-block:: console $ openstack role assignment list --names --domain foobar --role manager +---------+-----------------+----------------------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +---------+-----------------+----------------------+---------+--------+--------+-----------+ | manager | alice@foobar | | | foobar | | False | +---------+-----------------+----------------------+---------+--------+--------+-----------+ Domain Members & Domain Readers =============================== Domain members and domain readers have the same relationship as system members and system readers. They're allowed to view resources and information about their domain. They aren't allowed to access system-specific information or information about projects, groups, and users outside their domain. The domain member and domain reader use-cases are great for support teams, monitoring the details of an account, or auditing resources within a domain assuming the audit doesn't validate sensitive information. You can find domain members and domain readers with the following role assignments: .. code-block:: console $ openstack role assignment list --names --role member --domain foobar +--------+-------------+-------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +--------+-------------+-------+---------+--------+--------+-----------+ | member | jdoe@foobar | | | foobar | | False | +--------+-------------+-------+---------+--------+--------+-----------+ $ openstack role assignment list --names --role reader --domain foobar +--------+-----------------+-------+---------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +--------+-----------------+-------+---------+--------+--------+-----------+ | reader | support@Default | | | foobar | | False | +--------+-----------------+-------+---------+--------+--------+-----------+ ---------------- Project Personas ---------------- This section describes authorization personas for users operating within a project. These personas are commonly used by end users. You can find all users with role assignments on a specific project using the following query: .. code-block:: console $ openstack role assignment list --names --project production +--------+----------------+----------------------------+-------------------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +--------+----------------+----------------------------+-------------------+--------+--------+-----------+ | admin | jsmith@Default | | production@foobar | | | False | | admin | | production-admins@foobar | production@foobar | | | False | | member | | foobar-operators@Default | production@foobar | | | False | | reader | alice@Default | | production@foobar | | | False | | reader | | production-support@Default | production@foobar | | | False | +--------+----------------+----------------------------+-------------------+--------+--------+-----------+ Project Administrators ====================== *Project administrators* can only view and modify data within the project they have authorization on. They're able to view information about their projects and set tags on their projects. They're not allowed to view system or domain resources, as that would violate the tenancy of their role assignment. Since the majority of the resources in keystone's API are system and domain-specific, *project administrators* don't have much authorization. You can find *project administrators* in your deployment with the following role assignment: .. code-block:: console $ openstack role assignment list --names --project production --role admin +-------+----------------+--------------------------+-------------------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +-------+----------------+--------------------------+-------------------+--------+--------+-----------+ | admin | jsmith@Default | | production@foobar | | | False | | admin | | production-admins@foobar | production@foobar | | | False | +-------+----------------+--------------------------+-------------------+--------+--------+-----------+ Project Members & Project Readers ================================= *Project members* and *project readers* can discover information about their projects. They can access important information like resource limits for their project, but they're not allowed to view information outside their project or view system-specific information. You can find *project members* and *project readers* in your deployment with the following role assignments: .. code-block:: console $ openstack role assignment list --names --project production --role member +--------+------+--------------------------+-------------------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +--------+------+--------------------------+-------------------+--------+--------+-----------+ | member | | foobar-operators@Default | production@foobar | | | False | +--------+------+--------------------------+-------------------+--------+--------+-----------+ $ openstack role assignment list --names --project production --role reader +--------+---------------+----------------------------+-------------------+--------+--------+-----------+ | Role | User | Group | Project | Domain | System | Inherited | +--------+---------------+----------------------------+-------------------+--------+--------+-----------+ | reader | alice@Default | | production@foobar | | | False | | reader | | production-support@Default | production@foobar | | | False | +--------+---------------+----------------------------+-------------------+--------+--------+-----------+ ---------------- Writing Policies ---------------- If the granularity provided above doesn't meet your specific use-case, you can still override policies and maintain them manually. You can read more about how to do that in oslo.policy usage `documentation`_. .. _`documentation`: https://docs.openstack.org/oslo.policy/latest/admin/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/token-provider.rst0000664000175000017500000000325700000000000022331 0ustar00zuulzuul00000000000000============== Token provider ============== OpenStack Identity supports customizable token providers. This is specified in the ``[token]`` section of the configuration file. The token provider controls the token construction, validation, and revocation operations. You can register your own token provider by configuring the following property: .. note:: More commonly, you can use this option to change the token provider to one of the ones built in. Alternatively, you can use it to configure your own token provider. * ``provider`` - token provider driver. Defaults to ``fernet``. Implemented by :class:`keystone.token.providers.fernet.Provider`. This is the entry point for the token provider in the ``keystone.token.provider`` namespace. Below is the detailed list of the token formats supported by keystone.: Fernet ``fernet`` tokens do not need to be persisted at all, but require that you run ``keystone-manage fernet_setup`` (also see the ``keystone-manage fernet_rotate`` command). .. warning:: Fernet tokens are bearer tokens. They must be protected from unnecessary disclosure to prevent unauthorized access. JWS ``jws`` tokens do not need to be persisted at all, but require that you configure an asymmetric key pair to sign and validate tokens. The key pair can be generated using ``keystone-manage create_jws_keypair`` or it can be generated out-of-band manually so long as it is compatible with the JWT ``ES256`` Elliptic Curve Digital Signature Algorithm (ECDSA) using a P-256 curve and a SHA-256 hash algorithm. .. warning:: JWS tokens are bearer tokens. They must be protected from unnecessary disclosure to prevent unauthorized access. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/token-support-matrix.ini0000664000175000017500000001011400000000000023452 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # For information about the format of this file, refer to the documentation # for sphinx-feature-classification: # # https://docs.openstack.org/sphinx-feature-classification/latest/ [driver.fernet] title=Fernet tokens [driver.jws] title=JWS tokens [operation.create_unscoped_token] title=Create unscoped token status=mandatory notes=All token providers must be capable of issuing tokens without an explicit scope of authorization. cli=openstack --os-username= --os-user-domain-name= --os-password= token issue driver.fernet=complete driver.jws=complete [operation.create_system_token] title=Create system-scoped token status=mandatory notes=All token providers must be capable of issuing system-scoped tokens. cli=openstack --os-username= --os-user-domain-name= --os-system-scope all token issue driver.fernet=complete driver.jws=complete [operation.create_project_scoped_token] title=Create project-scoped token status=mandatory notes=All token providers must be capable of issuing project-scoped tokens. cli=openstack --os-username= --os-user-domain-name= --os-password= --os-project-name= --os-project-domain-name= token issue driver.fernet=complete driver.jws=complete [operation.create_domain_scoped_token] title=Create domain-scoped token status=optional notes=Domain-scoped tokens are not required for all use cases, and for some use cases, projects can be used instead. cli=openstack --os-username= --os-user-domain-name= --os-password= --os-domain-name= token issue driver.fernet=complete driver.jws=complete [operation.create_trust_scoped_token] title=Create trust-scoped token status=optional notes=Tokens scoped to a trust convey only the user impersonation and project-based authorization attributes included in the delegation. cli=openstack --os-username= --os-user-domain-name= --os-password= --os-trust-id= token issue driver.fernet=complete driver.jws=complete [operation.create_token_using_oauth] title=Create a token given an OAuth access token status=optional notes=OAuth access tokens can be exchanged for keystone tokens. cli= driver.fernet=complete driver.jws=complete [operation.revoke_token] title=Revoke a token status=optional notes=Tokens may be individually revoked, such as when a user logs out of Horizon. Under certain circumstances, it's acceptable for more than just a single token may be revoked as a result of this operation (such as when the revoked token was previously used to create additional tokens). cli=openstack token revoke driver.fernet=complete driver.jws=complete [feature.online_validation] title=Online validation status=mandatory notes=Keystone must be able to validate the tokens that it issues when presented with a token that it previously issued. cli= driver.fernet=complete driver.jws=complete [feature.offline_validation] title=Offline validation status=optional notes=Services using Keystone for authentication may want to validate tokens themselves, rather than calling back to keystone, in order to improve performance and scalability. cli= driver.fernet=missing driver.jws=missing [feature.non_persistent] title=Non-persistent status=optional notes=If a token format does not require persistence (such as to a SQL backend), then there is no scalability limit to the number of tokens that keystone can issue at once, and there is no need to perform clean up operations such as `keystone-manage token_flush`. cli= driver.fernet=complete driver.jws=complete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/tokens-overview.rst0000664000175000017500000001435200000000000022526 0ustar00zuulzuul00000000000000=============== Keystone tokens =============== Tokens are used to authenticate and authorize your interactions with OpenStack APIs. Tokens come in many scopes, representing various authorization and sources of identity. .. _authorization_scopes: Authorization scopes -------------------- Tokens are used to relay information about your role assignments. It's not uncommon for a user to have multiple role assignments, sometimes spanning projects, domains, or the entire system. These are referred to as authorization scopes, where a token has a single scope of operation (e.g., a project, domain, or the system). For example, a token scoped to a project can't be reused to do something else in a different project. Each level of authorization scope is useful for certain types of operations in certain OpenStack services, and are not interchangeable. Unscoped tokens ~~~~~~~~~~~~~~~ An unscoped token does not contain a service catalog, roles, or authorization scope (e.g., project, domain, or system attributes within the token). Their primary use case is simply to prove your identity to keystone at a later time (usually to generate scoped tokens), without repeatedly presenting your original credentials. The following conditions must be met to receive an unscoped token: * You must not specify an authorization scope in your authentication request (for example, on the command line with arguments such as ``--os-project-name`` or ``--os-domain-id``), * Your identity must not have a "default project" associated with it that you also have role assignments, and thus authorization, upon. Project-scoped tokens ~~~~~~~~~~~~~~~~~~~~~ Projects are containers for resources, like volumes or instances. Project-scoped tokens express your authorization to operate in a specific tenancy of the cloud and are useful for things like spinning up compute resources or carving off block storage. They contain a service catalog, a set of roles, and information about the project. Most end-users need role assignments on projects to consume resources in a deployment. Domain-scoped tokens ~~~~~~~~~~~~~~~~~~~~ Domains are namespaces for projects, users, and groups. A domain-scoped token expresses your authorization to operate on the contents of a domain or the domain itself. While some OpenStack services are still adopting the domain concept, domains are fully supported in keystone. This means users with authorization on a domain have the ability to manage things within the domain. For example, a domain administrator can create new users and projects within that domain. Domain-scoped tokens contain a service catalog, roles, and information about the domain. People who need to manage users and projects typically need domain-level access. System-scoped tokens ~~~~~~~~~~~~~~~~~~~~ Some OpenStack APIs fit nicely within the concept of projects (e.g., creating an instance) or domains (e.g., creating a new user), but there are also APIs that affect the entire deployment system (e.g. modifying endpoints, service management, or listing information about hypervisors). These operations are typically reserved for operators and require system-scoped tokens, which represents the role assignments a user has to operate on the deployment as a whole. The term *system* refers to the deployment system, which is a collection of hardware (e.g., compute nodes) and services (e.g., nova, cinder, neutron, barbican, keystone) that provide Infrastructure-as-a-Service. System-scoped tokens contain a service catalog, roles, and information about the *system*. System role assignments and system-scoped tokens are typically reserved for operators and cloud administrators. Token providers --------------- The token type issued by keystone is configurable through the ``/etc/keystone/keystone.conf`` file. Currently, there are two supported token providers, ``fernet`` and ``jws``. Fernet tokens ~~~~~~~~~~~~~ The fernet token format was introduced in the OpenStack Kilo release and now is the default token provider in Keystone. Unlike the other token types mentioned in this document, fernet tokens do not need to be persisted in a back end. ``AES256`` encryption is used to protect the information stored in the token and integrity is verified with a ``SHA256 HMAC`` signature. Only the Identity service should have access to the keys used to encrypt and decrypt fernet tokens. Like UUID tokens, fernet tokens must be passed back to the Identity service in order to validate them. For more information on the fernet token type, see the :doc:`fernet-token-faq`. A deployment might consider using the fernet provider as opposed to JWS tokens if they are concerned about public expose of the payload used to build tokens. JWS tokens ~~~~~~~~~~ The JSON Web Signature (JWS) token format is a type of JSON Web Token (JWT) and it was implemented in the Stein release. JWS tokens are signed, meaning the information used to build the token ID is not opaque to users and can it can be decoded by anyone. JWS tokens are ephemeral, or non-persistent, which means they won't bloat the database or require replication across nodes. Since the JWS token provider uses asymmetric keys, the tokens are signed with private keys and validated with public keys. The JWS token provider implementation only supports the ``ES256`` JSON Web Algorithm (JWA), which is an Elliptic Curve Digital Signature Algorithm (ECDSA) using the P-256 curve and a SHA-256 hash algorithm. A deployment might consider using JWS tokens as opposed to fernet tokens if there are security concerns about sharing symmetric encryption keys across hosts. Note that a major difference between the two providers is that JWS tokens are not opaque and can be decoded by anyone with the token ID. Fernet tokens are opaque in that the token ID is ciphertext. Despite the JWS token payload being readable by anyone, keystone reserves the right to make backwards incompatible changes to the token payload itself, which is not an API contract. We only recommend validating the token against keystone's authentication API to inspect its associated metadata. We strongly discourage relying on decoded payloads for information about tokens. More information about JWTs can be found in the `specification`_. .. _`specification`: https://tools.ietf.org/html/rfc7519 .. support_matrix:: token-support-matrix.ini ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/tokens.rst0000664000175000017500000000035600000000000020661 0ustar00zuulzuul00000000000000========================= All about keystone tokens ========================= Everything you need to know about keystone tokens. .. toctree:: :maxdepth: 2 tokens-overview fernet-token-faq jws-key-rotation token-provider ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/troubleshoot.inc0000664000175000017500000000156100000000000022047 0ustar00zuulzuul00000000000000.. -*- rst -*- Troubleshoot the Identity service ================================= To troubleshoot the Identity service, review the logs in the ``/var/log/keystone/keystone.log`` file. Use the ``/etc/keystone/logging.conf`` file to configure the location of log files. .. note:: The ``insecure_debug`` flag is unique to the Identity service. If you enable ``insecure_debug``, error messages from the API change to return security-sensitive information. For example, the error message on failed authentication includes information on why your authentication failed. The logs show the components that have come in to the WSGI request, and ideally show an error that explains why an authorization request failed. If you do not see the request in the logs, run keystone with the ``--debug`` parameter. Pass the ``--debug`` parameter before the command parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/unified-limits.rst0000664000175000017500000005124500000000000022303 0ustar00zuulzuul00000000000000============== Unified Limits ============== .. WARNING:: The unified limits API is currently labeled as experimental and can change in backwards incompatible ways. After we get feedback on the intricacies of the API and no longer expect to make API breaking changes, the API will be marked as stable. As of the Queens release, keystone has the ability to store and relay information known as a limit. Limits can be used by services to enforce quota on resources across OpenStack. This section describes the basic concepts of limits, how the information can be consumed by services, and how operators can manage resource quota across OpenStack using limits. What is a limit? ================ A limit is a threshold for resource management and helps control resource utilization. A process for managing limits allows for reallocation of resources to different users or projects as needs change. Some information needed to establish a limit may include: - project_id - domain_id - API service type (e.g. compute, network, object-storage) - a resource type (e.g. ram_mb, vcpus, security-groups) - a default limit - a project specific limit i.e resource limit - user_id (optional) - a region (optional depending on the service) .. NOTE:: The `default limit` of registered limit and the `resource limit` of project limit now are limited from `-1` to `2147483647` (integer). `-1` means no limit and `2147483647` is the max value for user to define limits. The length of unified limit's `resource type` now is limited from `1` to `255` (string). Since keystone is the source of truth for nearly everything in the above list, limits are a natural fit as a keystone resource. Two different limit resources exist in this design. The first is a registered limit and the second is a project limit. Registered limits ----------------- A registered limit accomplishes two important things in order to enforce quota across multi-tenant, distributed systems. First, it establishes resource types and associates them to services. Second, it sets a default resource limit for all projects. The first part maps specific resource types to the services that provide them. For example, a registered limit can map `vcpus`, to the compute service. The second part sets a default of 20 `vcpus` per project. This provides all the information needed for basic quota enforcement for any resource provided by a service. Domain limits ------------- A domain limit is a limit associated to a specific domain and it acts as an override for a registered limit. Similar to registered limits, domain limits require a resource type and a service. Additionally, a registered limit must exist before you can create a domain-specific override. For example, let's assume a registered limit exists for `vcpus` provided by the compute service. It wouldn't be possible to create a domain limit for `cores` on the compute service. Domain limits can only override limits that have already been registered. In a general sense, registered limits are likely established when a new service or cloud is deployed. Domain limits are used continuously to manage the flow of resource allocation. Domain limits may affect the limits of projects within the domain. This is particularly important to keep in mind when choosing an enforcement model, documented below. Project limits -------------- Project limits have the same properties as domain limits, but are specific to projects instead of domains. You must register a limit before creating a project-specific override. Just like with domain limits, the flow of resources between related projects may vary depending on the configured enforcement model. The support enforcement models below describe how limit validation and enforcement behave between related projects and domains. Together, registered limits, domain limits, and project limits give deployments the ability to restrict resources across the deployment by default, while being flexible enough to freely marshal resources across projects. Limits and usage ================ When we talk about a quota system, we’re really talking about two systems. A system for setting and maintaining limits, the theoretical maximum usage, and a system for enforcing that usage does not exceed limits. While they are coupled, they are distinct. Up to this point, we've established that keystone is the system for maintaining limit information. Keystone’s responsibility is to ensure that any changes to limits are consistent with related limits currently stored in keystone. Individual services maintain and enforce usage. Services check enforcement against the current limits at the time a user requests a resource. Usage reflects the actual resource allocation in units to a consumer. Given the above, the following is a possible and legal flow: - User Jane is in project Foo - Project Foo has a default CPU limit of 20 - User Jane allocated 18 CPUs in project Foo - Administrator Kelly sets project Foo CPU limit to 10 - User Jane can no longer allocate instance resources in project Foo, until she (or others in the project) have deleted at least 9 CPUs to get under the new limit The following would be another permutation: - User Jane is in project Foo - Project Foo has a default CPU limit of 20 - User Jane allocated 20 CPUs in project Foo - User Jane attempts to create another instance, which results in a failed resource request since the request would violate usage based on the current limit of CPUs - User Jane requests more resources - Administrator Kelly adjust the project limit for Foo to be 30 CPUs - User Jane resends her request for an instance, which succeeds since the usage for project Foo is under the project limit of 30 CPUs This behavior lets administrators set the policy of what the future should be when convenient, and prevent those projects from creating any more resources that would exceed the limits in question. Members of a project can fix this for themselves by bringing down the project usage to where there is now headroom. If they don’t, at some point the administrators can more aggressively delete things themselves. Enforcement models ================== Project resources in keystone can be organized in hierarchical structures, where projects can be nested. As a result, resource limits and usage should respect that hierarchy if present. It's possible to think of different cases where limits or usage assume different characteristics, regardless of the project structure. For example, if a project's usage for a particular resource hasn't been met, should the projects underneath that project assume those limits? Should they not assume those limits? These opinionated models are referred to as enforcement models. This section is dedicated to describing different enforcement models that are implemented. It is important to note that enforcement must be consistent across the entire deployment. Grouping certain characteristics into a model makes referring to behaviors consistent across services. Operators should be aware that switching between enforcement models may result in backwards incompatible changes. We recommend extremely careful planning and understanding of various enforcement models if you're planning on switching from one model to another in a deployment. Keystone exposes a ``GET /limits/model`` endpoint that returns the enforcement model selected by the deployment. This allows limit information to be discoverable and preserves interoperability between OpenStack deployments with different enforcement models. Flat ---- Flat enforcement ignores all aspects of a project hierarchy. Each project is considered a peer to all other projects. The limits associated to the parents, siblings, or children have no affect on a particular project. This model exercises the most isolation between projects because there are no assumptions between limits, regardless of the hierarchy. Validation of limits via the API will allow operations that might not be considered accepted in other models. For example, assume project `Charlie` is a child of project `Beta`, which is a child of project `Alpha`. All projects assume a default limit of 10 cores via a registered limit. The labels in the diagrams below use shorthand notation for `limit` and `usage` as `l` and `u`, respectively: .. blockdiag:: blockdiag { orientation = portrait; Alpha [label="Alpha (u=0)"]; Beta [label=" Beta (u=0)"]; Charlie [label="Charlie (u=0)"]; } Each project may use up to 10 cores because of the registered limit and none of the projects have an override. Using flat enforcement, you're allowed to ``UPDATE LIMIT on Alpha to 20``: .. blockdiag:: blockdiag { orientation = portrait; Alpha [label="Alpha (l=20, u=0)", textcolor = "#00af00"]; Beta [label=" Beta (u=0)"]; Charlie [label="Charlie (u=0)"]; } You're also allowed to ``UPDATE LIMIT on Charlie to 30``, even though `Charlie` is a sub-project of both `Beta` and `Alpha`. .. blockdiag:: blockdiag { orientation = portrait; Alpha [label="Alpha (l=20, u=0)"]; Beta [label=" Beta (u=0)"]; Charlie [label="Charlie (l=30, u=0)", textcolor = "#00af00"]; } This is allowed with flat enforcement because the hierarchy is not taken into consideration during limit validation. Child projects may have a higher limit than a parent project. Conversely, you can simulate hierarchical enforcement by adjusting limits through the project tree manually. For example, let's still assume 10 is the default limit imposed by an existing registered limit: .. blockdiag:: blockdiag { orientation = portrait; Alpha [label="Alpha (u=0)"]; Beta [label=" Beta (u=0)"]; Charlie [label="Charlie (u=0)"]; } You may set a project-specific override to ``UPDATE LIMIT on Alpha to 30``: .. blockdiag:: blockdiag { orientation = portrait; Alpha [label="Alpha (l=30, u=0)", textcolor = "#00af00"]; Beta [label=" Beta (u=0)"]; Charlie [label="Charlie (u=0)"]; } Next you can ``UPDATE LIMIT on Beta to 20``: .. blockdiag:: blockdiag { orientation = portrait; Alpha [label="Alpha (l=30, u=0)"]; Beta [label=" Beta (l=20, u=0)", textcolor = "#00af00"]; Charlie [label="Charlie (u=0)"]; } Theoretically, the entire project tree consisting of `Alpha`, `Beta`, and `Charlie` is limited to 60 cores. If you'd like to ensure only 30 cores are used within the entire hierarchy, you can ``UPDATE LIMIT on Alpha to 0``: .. blockdiag:: blockdiag { orientation = portrait; Alpha [label="Alpha (l=0, u=0)", textcolor = "#00af00"]; Beta [label=" Beta (l=20, u=0)"]; Charlie [label="Charlie (u=0)"]; } You should use this model if you: * Have project hierarchies greater than two levels * Want extremely strict control of project usage and don't want resource usage to bleed across projects or domains Advantages ~~~~~~~~~~ * Allows you to model specific and strict limits * Works with any project hierarchy or depth * Usage is only calculated for the project in question Disadvantages ~~~~~~~~~~~~~ * Resources aren't allowed to flow gracefully between projects in a hierarchy * Requires intervention and verification to move resources across projects * Project limit validation isn't performed with respect to other projects or domains Strict Two Level ---------------- The ``strict_two_level`` enforcement model assumes the project hierarchy does not exceed two levels. The top layer can consist of projects or domains. For example, project `Alpha` can have a sub-project called `Beta` within this model. Project `Beta` cannot have a sub-project. The hierarchy is restrained to two layers. `Alpha` can also be a domain that contains project `Beta`, but `Beta` cannot have a sub-project. Regardless of the top layer consisting of projects or domains, the hierarchical depth is limited to two layers. Resource utilization is allowed to flow between projects in the hierarchy, depending on the limits. This property allows for more flexibility than the ``flat`` enforcement model. The model is strict in that operators can set limits on parent projects or domains and the limits of the children may never exceed the parent. For example, assume domain `Alpha` contains two projects, `Beta` and `Charlie`. Projects `Beta` and `Charlie` are siblings so the hierarchy maintains a depth of two. A system administrator sets the limit of a resource on `Alpha` to 20. Both projects `Beta` and `Charlie` can consume resources until the total usage of `Alpha`, `Beta`, and `Charlie` reach 20. At that point, no more resources should be allocated to the tree. System administrators can also reserve portions of domain `Alpha`'s resource in sub-projects directly. Using the previous example, project `Beta` could have a limit of 12 resources, implicitly leaving 8 resources for `Charlie` to consume. The following diagrams illustrate the behaviors described above, using projects named `Alpha`, `Beta`, `Charlie`, and `Delta`. Assume the resource in question is cores and the default registered limit for cores is 10. Also assume we have the following project hierarchy where `Alpha` has a limit of 20 cores and its usage is currently 4: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=4)"]; Beta [label="Beta (u=0)"]; Charlie [label="Charlie (u=0)"]; } Technically, both `Beta` and `Charlie` can use up to 8 cores each: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=4)"]; Beta [label="Beta (u=8)", textcolor = "#00af00"]; Charlie [label="Charlie (u=8)", textcolor = "#00af00"]; } If `Alpha` attempts to claim two cores the usage check will fail because the service will fetch the hierarchy from keystone using ``oslo.limit`` and check the usage of each project in the hierarchy to see that the total usage of `Alpha`, `Beta`, and `Charlie` is equal to the limit of the tree, set by `Alpha.limit`: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=6)", textcolor = "#FF0000"]; Beta [label="Beta (u=8)"]; Charlie [label="Charlie (u=8)"]; } Despite the usage of the tree being equal to the limit, we can still add children to the tree: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha -> Delta; Alpha [label="Alpha (l=20, u=4)"]; Beta [label="Beta (u=8)"]; Charlie [label="Charlie (u=8)"]; Delta [label="Delta (u=0)", textcolor = "#00af00"]; } Even though the project can be created, the current usage of cores across the tree prevents `Delta` from claiming any cores: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha -> Delta; Alpha [label="Alpha (l=20, u=4)"]; Beta [label="Beta (u=8)"]; Charlie [label="Charlie (u=8)"]; Delta [label="Delta (u=2)", textcolor = "#FF0000"]; } Creating a grandchild of project `Alpha` is forbidden because it violates the two-level hierarchical constraint: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Charlie -> Delta; Alpha [label="Alpha (l=20, u=4)"]; Beta [label="Beta (u=8)"]; Charlie [label="Charlie (u=8)"]; Delta [label="Delta (u=0)", textcolor = "#FF0000"]; } This is a fundamental constraint of this design because it provides a very clear escalation path. When a request fails because the tree limit has been exceeded, a user has all the information they need to provide meaningful context in a support ticket (e.g., their project ID and the parent project ID). An administrator should be able to reshuffle usage accordingly. Providing this information in tree structures with more than a depth of two is much harder, but may be implemented with a separate model. Granting `Beta` the ability to claim more cores can be done by giving `Beta` a project-specific override for cores .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=4)"]; Beta [label="Beta (l=12, u=8)", textcolor = "#00af00"]; Charlie [label="Charlie (u=8)"]; } Note that regardless of this update, any subsequent requests to claim more cores in the tree will be rejected since the usage is equal to the limit of the `Alpha`. `Beta` can claim cores if they are released from `Alpha` or `Charlie`: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=2)", textcolor = "#00af00"]; Beta [label="Beta (l=12, u=8)"]; Charlie [label="Charlie (u=6)", textcolor = "#00af00"]; } .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=2)"]; Beta [label="Beta (l=12, u=12)", textcolor = "#00af00"]; Charlie [label="Charlie (u=6)"]; } While `Charlie` is still under its default allocation of 10 cores, it won't be able to claim any more cores because the total usage of the tree is equal to the limit of `Alpha`, thus preventing `Charlie` from reclaiming the cores it had: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=2)"]; Beta [label="Beta (l=12, u=12)"]; Charlie [label="Charlie (u=8)", textcolor = "#FF0000"]; } Creating or updating a project with a limit that exceeds the limit of `Alpha` is forbidden. Even though it is possible for the sum of all limits under `Alpha` to exceed the limit of `Alpha`, the total usage is capped at `Alpha.limit`. Allowing children to have explicit overrides greater than the limit of the parent would result in strange user experience and be misleading since the total usage of the tree would be capped at the limit of the parent: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha [label="Alpha (l=20, u=0)"]; Beta [label="Beta (l=30, u=0)", textcolor = "#FF0000"]; Charlie [label="Charlie (u=0)"]; } .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha -> Delta; Alpha [label="Alpha (l=20, u=0)"]; Beta [label="Beta (u=0)"]; Charlie [label="Charlie (u=0)"]; Delta [label="Delta (l=30, u=0)", textcolor = "#FF0000"]; } Finally, let's still assume the default registered limit for cores is 10, but we're going to create project `Alpha` with a limit of 6 cores. .. blockdiag:: blockdiag { orientation = portrait; Alpha; Alpha [label="Alpha (l=6, u=0)", textcolor = "#00af00"]; } When we create project `Beta`, which is a child of project `Alpha`, the limit API ensures that project `Beta` doesn't assume the default of 10, despite the registered limit of 10 cores. Instead, the child assumes the parent's limit since no single child limit should exceed the limit of the parent: .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha [label="Alpha (l=6, u=0)"]; Beta [label="Beta (l=6, u=0)", textcolor = "#00af00"]; } This behavior is consistent regardless of the number of children added under project `Alpha`. .. blockdiag:: blockdiag { orientation = portrait; Alpha -> Beta; Alpha -> Charlie; Alpha -> Delta; Alpha [label="Alpha (l=6, u=0)"]; Beta [label="Beta (l=6, u=0)"]; Charlie [label="Charlie (l=6, u=0)", textcolor = "#00af00"]; Delta [label="Delta (l=6, u=0)", textcolor = "#00af00"]; } Creating limit overrides while creating projects seems counter-productive given the whole purpose of a registered default, but it also seems unlikely to throttle a parent project by specifying it's default to be less than a registered default. This behavior maintains consistency with the requirement that the sum of all child limits may exceed the parent limit, but the limit of any one child may not. You should use this model if you: * Want resources to flow between projects and domains within a hierarchy * Don't have a project depth greater than two levels * Are not concerned about usage calculation performance or don't have project trees that are wide Advantages ~~~~~~~~~~ * Allows resources to flow between projects and domains within a strict two-level hierarchy * Limits are validated when they are created and updated Disadvantages ~~~~~~~~~~~~~ * Project depth cannot exceed two levels * Performance may suffer in wide and flat project hierarchies during usage calculation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/upgrading.rst0000664000175000017500000002723400000000000021342 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================== Upgrading Keystone ================== As of the Newton release, keystone supports two different approaches to upgrading across releases. The traditional approach requires a significant outage to be scheduled for the entire duration of the upgrade process. The more modern approach results in zero downtime, but is more complicated due to a longer upgrade procedure. .. NOTE:: The details of these steps are entirely dependent on the details of your specific deployment, such as your chosen application server and database management system. Use it only as a guide when implementing your own upgrade process. Before you begin ---------------- Plan your upgrade: * Read and ensure you understand the `release notes `_ for the next release. * Resolve any outstanding deprecation warnings in your logs. Some deprecation cycles are as short as a single release, so it's possible to break a deployment if you leave *any* outstanding warnings. It might be a good idea to re-read the release notes for the previous release (or two!). * Prepare your new configuration files, including ``keystone.conf``, ``logging.conf``, ``policy.yaml``, ``keystone-paste.ini``, and anything else in ``/etc/keystone/``, by customizing the corresponding files from the next release. Upgrading with downtime ----------------------- This is a high-level description of our upgrade strategy built around ``keystone-manage db_sync``. It assumes that you are willing to have downtime of your control plane during the upgrade process and presents minimal risk. With keystone unavailable, no other OpenStack services will be able to authenticate requests, effectively preventing the rest of the control plane from functioning normally. #. Stop all keystone processes. Otherwise, you'll risk multiple releases of keystone trying to write to the database at the same time, which may result in data being inconsistently written and read. #. Make a backup of your database. Keystone does not support downgrading the database, so restoring from a full backup is your only option for recovery in the event of an upgrade failure. #. Upgrade all keystone nodes to the next release. #. Update your configuration files (``/etc/keystone/``) with those corresponding from the latest release. #. Run ``keystone-manage db_sync`` from any single node to upgrade both the database schema and run any corresponding database migrations. #. (*New in Newton*) Run ``keystone-manage doctor`` to diagnose symptoms of common deployment issues and receive instructions for resolving them. #. Start all keystone processes. Upgrading with minimal downtime ------------------------------- If you run a multi-node keystone cluster that uses a replicated database, like a Galera cluster, it is possible to upgrade with minimal downtime. This method also optimizes recovery time from a failed upgrade. This section assumes familiarity with the base case (`Upgrading with downtime`_) outlined above. In these steps the nodes will be divided into ``first`` and ``other`` nodes. #. Backup your database. There is no way to rollback the upgrade of keystone and this is your worst-case fallback option. #. Disable keystone on all nodes but the ``first`` node. This can be done via a variety of mechanisms that will depend on the deployment. If you are unable to disable a service or place a service into maintenance mode in your load balancer, you can stop the keystone processes. #. Stop the database service on one of the ``other`` nodes in the cluster. This will isolate the old dataset on a single node in the cluster. In the event of a failed update this data can be used to rebuild the cluster without having to restore from backup. #. Update the configuration files on the ``first`` node. #. Upgrade keystone on the ``first`` node. keystone is now down for your cloud. #. Run ``keystone-manage db_sync`` on the ``first`` node. As soon as this finishes, keystone is now working again on a single node in the cluster. #. keystone is now upgraded on a single node. Your load balancers will be sending all traffic to this single node. This is your chance to run ensure keystone up and running, and not broken. If keystone is broken, see the `Rollback after a failed upgrade`_ section below. #. Once you have verified that keystone is up and running, begin the upgrade on the ``other`` nodes. This entails updating configuration files and upgrading the code. The ``db_sync`` does not need to be run again. #. On the node where you stopped the database service, be sure to restart it and ensure that it properly rejoins the cluster. Using this model, the outage window is minimized because the only time when your cluster is totally offline is between loading the newer version of keystone and running the ``db_sync`` command. Typically the outage with this method can be measured in tens of seconds especially if automation is used. Rollback after a failed upgrade ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the upgrade fails, only a single node has been affected. This makes the recovery simpler and quicker. If issues are not discovered until the entire cluster is upgraded, a full shutdown and restore from backup will be required. That will take much longer than just fixing a single node with an old copy of the database still available. This process will be dependent on your architecture and it is highly recommended that you've practiced this in a development environment before trying to use it for the first time. #. Isolate the bad node. Shutdown keystone and the database services on the upgraded "bad" node. #. Bootstrap the database cluster from the node holding the old data. This may require wiping the data first on any nodes who are not holding old data. #. Enable keystone on the old nodes in your load balancer or if the processes were stopped, restart them. #. Validate that keystone is working. #. Downgrade the code and config files on the bad node. This process should be doable in a matter of minutes and will minimize cloud downtime if it is required. Upgrading without downtime -------------------------- .. versionadded:: 10.0.0 (Newton) Upgrading without downtime is only supported in deployments upgrading *from* Newton or a newer release. If upgrading a Mitaka deployment to Newton, the commands described here will be available as described below, but the ``keystone-manage db_sync --expand`` command will incur downtime (similar to running ``keystone-manage db_sync``), as it runs legacy (downtime-incurring) migrations prior to running schema expansions. .. versionchanged:: 21.0.0 (Yoga) The migration tooling was changed from *SQLAlchemy-Migrate* to *Alembic*. As part of this change, the data migration phase of the database upgrades was dropped. This is a high-level description of our upgrade strategy built around additional options in ``keystone-manage db_sync``. Although it is much more complex than the upgrade process described above, it assumes that you are not willing to have downtime of your control plane during the upgrade process. With this upgrade process, end users will still be able to authenticate to receive tokens normally, and other OpenStack services will still be able to authenticate requests normally. #. Make a backup of your database. keystone does not support downgrading the database, so restoring from a full backup is your only option for recovery in the event of an upgrade failure. #. Stop the keystone processes on the first node (or really, any arbitrary node). This node will serve to orchestrate database upgrades. #. Upgrade your first node to the next release, but do not start any keystone processes. #. Update your configuration files on the first node (``/etc/keystone/``) with those corresponding to the latest release. #. Run ``keystone-manage doctor`` on the first node to diagnose symptoms of common deployment issues and receive instructions for resolving them. #. Run ``keystone-manage db_sync --expand`` on the first node to expand the database schema to a superset of what both the previous and next release can utilize, and create triggers to facilitate the live migration process. .. warning:: For MySQL, using the ``keystone-manage db_sync --expand`` command requires that you either grant your keystone user ``SUPER`` privileges, or run ``set global log_bin_trust_function_creators=1;`` in mysql beforehand. At this point, new columns and tables may exist in the database, but will *not* all be populated in such a way that the next release will be able to function normally. As the previous release continues to write to the old schema, database triggers will live migrate the data to the new schema so it can be read by the next release. .. note:: Prior to Yoga, data migrations were treated separatly and required the use of the ``keystone-manage db_sync --migrate`` command after applying the expand migrations. This is no longer necessary and the ``keystone-manage db_sync --migrate`` command is now a no-op. #. Update your configuration files (``/etc/keystone/``) on all nodes (except the first node, which you've already done) with those corresponding to the latest release. #. Upgrade all keystone nodes to the next release, and restart them one at a time. During this step, you'll have a mix of releases operating side by side, both writing to the database. As the next release begins writing to the new schema, database triggers will also migrate the data to the old schema, keeping both data schemas in sync. #. Run ``keystone-manage db_sync --contract`` to remove the old schema and all data migration triggers. When this process completes, the database will no longer be able to support the previous release. Using ``db_sync check`` ~~~~~~~~~~~~~~~~~~~~~~~ .. versionadded:: 12.0.0 (Pike) .. versionchanged:: 21.0.0 (Yoga) Previously this command would return ``3`` if data migrations were required. Data migrations are now part of the expand schema migrations, therefore this step is no longer necessary. In order to check the current state of your rolling upgrades, you may run the command ``keystone-manage db_sync --check``. This will inform you of any outstanding actions you have left to take as well as any possible upgrades you can make from your current version. Here are a list of possible return codes. * A return code of ``0`` means you are currently up to date with the latest migration script version and all ``db_sync`` commands are complete. * A return code of ``1`` generally means something serious is wrong with your database and operator intervention will be required. * A return code of ``2`` means that an upgrade from your current database version is available, your database is not currently under version control, or the database is already under control. Your first step is to run ``keystone-manage db_sync --expand``. * A return code of ``4`` means that the expansion and data migration stages are complete, and the next step is to run ``keystone-manage db_sync --contract``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/admin/url-safe-naming.inc0000664000175000017500000000273100000000000022303 0ustar00zuulzuul00000000000000.. -*- rst -*- URL safe naming of projects and domains ======================================= In the future, keystone may offer the ability to identify a project in a hierarchy via a URL style of naming from the root of the hierarchy (for example specifying 'projectA/projectB/projectC' as the project name in an authentication request). In order to prepare for this, keystone supports the optional ability to ensure both projects and domains are named without including any of the reserved characters specified in section 2.2 of `rfc3986 `_. The safety of the names of projects and domains can be controlled via two configuration options: .. code-block:: ini [resource] project_name_url_safe = off domain_name_url_safe = off When set to ``off`` (which is the default), no checking is done on the URL safeness of names. When set to ``new``, an attempt to create a new project or domain with an unsafe name (or update the name of a project or domain to be unsafe) will cause a status code of 400 (Bad Request) to be returned. Setting the configuration option to ``strict`` will, in addition to preventing the creation and updating of entities with unsafe names, cause an authentication attempt which specifies a project or domain name that is unsafe to return a status code of 401 (Unauthorized). It is recommended that installations take the steps necessary to where they can run with both options set to ``strict`` as soon as is practical. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/api_curl_examples.rst0000664000175000017500000007175500000000000021775 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================= API Examples using Curl ======================= -------------------------- v3 API Examples Using Curl -------------------------- .. note:: Following are some API examples using curl. Note that these examples are not automatically generated. They can be outdated as things change and are subject to regular updates and changes. GET / ===== Discover API version information, links to documentation (PDF, HTML, WADL), and supported media types: .. WARNING:: The v2.0 portion of this response will be removed in the T release. It is only advertised here because the v2.0 API supports the ec2tokens API until the T release. All other functionality of the v2.0 has been removed as of the Queens release. Use v3 for all functionality as it is more complete and secure. .. code-block:: bash $ curl "http://localhost:5000" .. code-block:: javascript { "versions": { "values": [ { "id": "v3.10", "links": [ { "href": "http://127.0.0.1:5000/v3/", "rel": "self" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v3+json" } ], "status": "stable", "updated": "2018-02-28T00:00:00Z" }, { "id": "v2.0", "links": [ { "href": "http://127.0.0.1:5000/v2.0/", "rel": "self" }, { "href": "https://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.identity-v2.0+json" } ], "status": "deprecated", "updated": "2016-08-04T00:00:00Z" } ] } } Tokens ====== Unscoped -------- Get an unscoped token: .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": "admin", "domain": { "id": "default" }, "password": "adminpwd" } } } } }' \ "http://localhost:5000/v3/auth/tokens" ; echo Example response: .. code-block:: bash HTTP/1.1 201 Created X-Subject-Token: MIIFvgY... Vary: X-Auth-Token Content-Type: application/json Content-Length: 312 Date: Fri, 11 May 2018 03:15:01 GMT { "token": { "issued_at": "2018-05-11T03:15:01.000000Z", "audit_ids": [ "0PKh_BDKTWqqaFONE-Sxbg" ], "methods": [ "password" ], "expires_at": "2018-05-11T04:15:01.000000Z", "user": { "password_expires_at": null, "domain": { "id": "default", "name": "Default" }, "id": "9a7e43333cc44ef4b988f05fc3d3a49d", "name": "admin" } } } Project-scoped -------------- Get a project-scoped token: .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": "admin", "domain": { "id": "default" }, "password": "adminpwd" } } }, "scope": { "project": { "name": "admin", "domain": { "id": "default" } } } } }' \ "http://localhost:5000/v3/auth/tokens" ; echo Example response: .. code-block:: bash HTTP/1.1 201 Created X-Subject-Token: MIIFfQ... Vary: X-Auth-Token Content-Type: application/json Content-Length: 3518 Date: Fri, 11 May 2018 03:38:39 GMT { "token": { "is_domain": false, "methods": [ "password" ], "roles": [ { "id": "b57680c826b44b5ca6122d0f792c3184", "name": "Member" }, { "id": "3a7bd258345f47479a26aea11a6cc2bb", "name": "admin" } ], "expires_at": "2018-05-11T04:38:39.000000Z", "project": { "domain": { "id": "default", "name": "Default" }, "id": "3a705b9f56bb439381b43c4fe59dccce", "name": "admin" }, "catalog": [ { "endpoints": [ { "url": "http://localhost/identity", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "30a91932e4e94a8ca4dc145bb1bb6b4b" }, { "url": "http://localhost/identity", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "id": "94d4768735104c9091f0468e7d31c189" } ], "type": "identity", "id": "09af9253500b41ef976a07322b2fa388", "name": "keystone" }, { "endpoints": [ { "url": "http://localhost/volume/v2/3a705b9f56bb439381b43c4fe59dccce", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "1c4ffe935e7643d99b55938cb12bc38d" } ], "type": "volumev2", "id": "413a44234e1a4c3781d4a3c7a7e4c895", "name": "cinderv2" }, { "endpoints": [ { "url": "http://localhost/image", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "33237fdd1a744d0fb40f9127f21ddad4" } ], "type": "image", "id": "4d473252145546d2aa589605f1e177c7", "name": "glance" }, { "endpoints": [ { "url": "http://localhost/placement", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "1a421e2f97684d3f86ab4d2cc9c86362" } ], "type": "placement", "id": "5dcecbdd4a1d44d0855c560301b27bb5", "name": "placement" }, { "endpoints": [ { "url": "http://localhost/compute/v2.1", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "8e7ea663cc41477c9629cc710bbb1c7d" } ], "type": "compute", "id": "87d49efa8fb64006bdb123d223ddcae2", "name": "nova" }, { "endpoints": [ { "url": "http://localhost/volume/v1/3a705b9f56bb439381b43c4fe59dccce", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "97a2c0ac7e304316a1eb58a3757e6ef8" } ], "type": "volume", "id": "9408080f1970482aa0e38bc2d4ea34b7", "name": "cinder" }, { "endpoints": [ { "url": "http://localhost:8080/v1/AUTH_3a705b9f56bb439381b43c4fe59dccce", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "d0d823615b0747a9aeca8b83fba105f0" }, { "url": "http://localhost:8080", "interface": "admin", "region": "RegionOne", "region_id": "RegionOne", "id": "e4cb86d9232349f091e0a02390deeb79" } ], "type": "object-store", "id": "957ba1fe8b0443f0afe64bfd0858ba5e", "name": "swift" }, { "endpoints": [ { "url": "http://localhost:9696/", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "aa4a0e61cdc54372967ee9e2298f1d53" } ], "type": "network", "id": "960fbc66bfcb4fa7900023f647fdc3a5", "name": "neutron" }, { "endpoints": [ { "url": "http://localhost/volume/v3/3a705b9f56bb439381b43c4fe59dccce", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "0c38045a91c34d798e0d2008fee7521d" } ], "type": "volumev3", "id": "98adb083914f423d9cb74ad5527e37cb", "name": "cinderv3" }, { "endpoints": [ { "url": "http://localhost/compute/v2/3a705b9f56bb439381b43c4fe59dccce", "interface": "public", "region": "RegionOne", "region_id": "RegionOne", "id": "562e12b9ee9549e8b857218ccf2ae321" } ], "type": "compute_legacy", "id": "a31e688016614430b28cddddf12d7b88", "name": "nova_legacy" } ], "user": { "password_expires_at": null, "domain": { "id": "default", "name": "Default" }, "id": "9a7e43333cc44ef4b988f05fc3d3a49d", "name": "admin" }, "audit_ids": [ "TbdrnW4MQDq_GPAVN9-JOQ" ], "issued_at": "2018-05-11T03:38:39.000000Z" } } Domain-Scoped ------------- Get a domain-scoped token (Note that you're going to need a role-assignment on the domain first!): .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["password"], "password": { "user": { "name": "admin", "domain": { "id": "default" }, "password": "adminpwd" } } }, "scope": { "domain": { "id": "default" } } } }' \ "http://localhost:5000/v3/auth/tokens" ; echo Example response: .. code-block:: bash HTTP/1.1 201 Created X-Subject-Token: MIIFNg... Vary: X-Auth-Token Content-Type: application/json Content-Length: 2590 Date: Fri, 11 May 2018 03:37:09 GMT { "token": { "domain": { "id": "default", "name": "Default" }, "methods": [ "password" ], "roles": [ { "id": "b57680c826b44b5ca6122d0f792c3184", "name": "Member" }, { "id": "3a7bd258345f47479a26aea11a6cc2bb", "name": "admin" } ], "expires_at": "2018-05-11T04:37:09.000000Z", "catalog": [ { "endpoints": [ { "region_id": "RegionOne", "url": "http://localhost/identity", "region": "RegionOne", "interface": "public", "id": "30a91932e4e94a8ca4dc145bb1bb6b4b" }, { "region_id": "RegionOne", "url": "http://localhost/identity", "region": "RegionOne", "interface": "admin", "id": "94d4768735104c9091f0468e7d31c189" } ], "type": "identity", "id": "09af9253500b41ef976a07322b2fa388", "name": "keystone" }, { "endpoints": [], "type": "volumev2", "id": "413a44234e1a4c3781d4a3c7a7e4c895", "name": "cinderv2" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://localhost/image", "region": "RegionOne", "interface": "public", "id": "33237fdd1a744d0fb40f9127f21ddad4" } ], "type": "image", "id": "4d473252145546d2aa589605f1e177c7", "name": "glance" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://localhost/placement", "region": "RegionOne", "interface": "public", "id": "1a421e2f97684d3f86ab4d2cc9c86362" } ], "type": "placement", "id": "5dcecbdd4a1d44d0855c560301b27bb5", "name": "placement" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://localhost/compute/v2.1", "region": "RegionOne", "interface": "public", "id": "8e7ea663cc41477c9629cc710bbb1c7d" } ], "type": "compute", "id": "87d49efa8fb64006bdb123d223ddcae2", "name": "nova" }, { "endpoints": [], "type": "volume", "id": "9408080f1970482aa0e38bc2d4ea34b7", "name": "cinder" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://localhost:8080", "region": "RegionOne", "interface": "admin", "id": "e4cb86d9232349f091e0a02390deeb79" } ], "type": "object-store", "id": "957ba1fe8b0443f0afe64bfd0858ba5e", "name": "swift" }, { "endpoints": [ { "region_id": "RegionOne", "url": "http://localhost:9696/", "region": "RegionOne", "interface": "public", "id": "aa4a0e61cdc54372967ee9e2298f1d53" } ], "type": "network", "id": "960fbc66bfcb4fa7900023f647fdc3a5", "name": "neutron" }, { "endpoints": [], "type": "volumev3", "id": "98adb083914f423d9cb74ad5527e37cb", "name": "cinderv3" }, { "endpoints": [], "type": "compute_legacy", "id": "a31e688016614430b28cddddf12d7b88", "name": "nova_legacy" } ], "user": { "password_expires_at": null, "domain": { "id": "default", "name": "Default" }, "id": "9a7e43333cc44ef4b988f05fc3d3a49d", "name": "admin" }, "audit_ids": [ "Sfc8_kywQx-tWNkEVqA1Iw" ], "issued_at": "2018-05-11T03:37:09.000000Z" } } Getting a token from a token ---------------------------- Get a token from a token: .. code-block:: bash curl -i \ -H "Content-Type: application/json" \ -d ' { "auth": { "identity": { "methods": ["token"], "token": { "id": "'$OS_TOKEN'" } } } }' \ "http://localhost:5000/v3/auth/tokens" ; echo Example response: .. code-block:: bash HTTP/1.1 201 Created X-Subject-Token: MIIFxw... Vary: X-Auth-Token Content-Type: application/json Content-Length: 347 Date: Fri, 11 May 2018 03:41:29 GMT { "token": { "issued_at": "2018-05-11T03:41:29.000000Z", "audit_ids": [ "zS_C_KROTFeZm-VlG1LjbA", "RAjE82q8Rz-Cd50ogCpx3Q" ], "methods": [ "token", "password" ], "expires_at": "2018-05-11T04:40:00.000000Z", "user": { "password_expires_at": null, "domain": { "id": "default", "name": "Default" }, "id": "9a7e43333cc44ef4b988f05fc3d3a49d", "name": "admin" } } } .. note:: If a scope was included in the request body then this would get a token with the new scope. DELETE /v3/auth/tokens ---------------------- Revoke a token: .. code-block:: bash curl -i -X DELETE \ -H "X-Auth-Token: $OS_TOKEN" \ -H "X-Subject-Token: $OS_TOKEN" \ "http://localhost:5000/v3/auth/tokens" If there's no error then the response is empty. Domains ======= GET /v3/domains --------------- List domains: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ "http://localhost:5000/v3/domains" | python -mjson.tool Example response: .. code-block:: javascript { "domains": [ { "description": "Owns users and tenants (i.e. projects) available on Identity API v2.", "enabled": true, "id": "default", "links": { "self": "http://identity-server:5000/v3/domains/default" }, "name": "Default" } ], "links": { "next": null, "previous": null, "self": "http://identity-server:5000/v3/domains" } } POST /v3/domains ---------------- Create a domain: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{ "domain": { "name": "newdomain"}}' \ "http://localhost:5000/v3/domains" | python -mjson.tool Example response: .. code-block:: javascript { "domain": { "enabled": true, "id": "3a5140aecd974bf08041328b53a62458", "links": { "self": "http://identity-server:5000/v3/domains/3a5140aecd974bf08041328b53a62458" }, "name": "newdomain" } } Projects ======== GET /v3/projects ---------------- List projects: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ "http://localhost:5000/v3/projects" | python -mjson.tool Example response: .. code-block:: javascript { "links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/projects" }, "projects": [ { "description": null, "domain_id": "default", "enabled": true, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "links": { "self": "http://localhost:5000/v3/projects/3d4c2c82bd5948f0bcab0cf3a7c9b48c" }, "name": "demo" } ] } PATCH /v3/projects/{id} ----------------------- Disable a project: .. code-block:: bash curl -s -X PATCH \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d ' { "project": { "enabled": false } }'\ "http://localhost:5000/v3/projects/$PROJECT_ID" | python -mjson.tool Example response: .. code-block:: javascript { "project": { "description": null, "domain_id": "default", "enabled": false, "extra": {}, "id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "links": { "self": "http://localhost:5000/v3/projects/3d4c2c82bd5948f0bcab0cf3a7c9b48c" }, "name": "demo" } } GET /v3/services ================ List the services: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ "http://localhost:5000/v3/services" | python -mjson.tool Example response: .. code-block:: javascript { "links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/services" }, "services": [ { "description": "Keystone Identity Service", "enabled": true, "id": "bd7397d2c0e14fb69bae8ff76e112a90", "links": { "self": "http://localhost:5000/v3/services/bd7397d2c0e14fb69bae8ff76e112a90" }, "name": "keystone", "type": "identity" } ] } GET /v3/endpoints ================= List the endpoints: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ "http://localhost:5000/v3/endpoints" | python -mjson.tool Example response: .. code-block:: javascript { "endpoints": [ { "enabled": true, "id": "29beb2f1567642eb810b042b6719ea88", "interface": "admin", "links": { "self": "http://localhost:5000/v3/endpoints/29beb2f1567642eb810b042b6719ea88" }, "region": "RegionOne", "service_id": "bd7397d2c0e14fb69bae8ff76e112a90", "url": "http://localhost:5000/v3" } ], "links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/endpoints" } } Users ===== GET /v3/users ------------- List users: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ "http://localhost:5000/v3/users" | python -mjson.tool POST /v3/users -------------- Create a user: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{"user": {"name": "newuser", "password": "changeme"}}' \ "http://localhost:5000/v3/users" | python -mjson.tool Example response: .. code-block:: javascript { "user": { "domain_id": "default", "enabled": true, "id": "ec8fc20605354edd91873f2d66bf4fc4", "links": { "self": "http://identity-server:5000/v3/users/ec8fc20605354edd91873f2d66bf4fc4" }, "name": "newuser" } } GET /v3/users/{user_id} ----------------------- Show details for a user: .. code-block:: bash USER_ID=ec8fc20605354edd91873f2d66bf4fc4 curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ "http://localhost:5000/v3/users/$USER_ID" | python -mjson.tool Example response: .. code-block:: javascript { "user": { "domain_id": "default", "enabled": true, "id": "ec8fc20605354edd91873f2d66bf4fc4", "links": { "self": "http://localhost:5000/v3/users/ec8fc20605354edd91873f2d66bf4fc4" }, "name": "newuser" } } POST /v3/users/{user_id}/password --------------------------------- Change password (using the default policy, this can be done as the user): .. code-block:: bash USER_ID=b7793000f8d84c79af4e215e9da78654 ORIG_PASS=userpwd NEW_PASS=newuserpwd curl \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{ "user": {"password": "'$NEW_PASS'", "original_password": "'$ORIG_PASS'"} }' \ "http://localhost:5000/v3/users/$USER_ID/password" .. note:: This command doesn't print anything if the request was successful. PATCH /v3/users/{user_id} ------------------------- Reset password (using the default policy, this requires admin): .. code-block:: bash USER_ID=b7793000f8d84c79af4e215e9da78654 NEW_PASS=newuserpwd curl -s -X PATCH \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d '{ "user": {"password": "'$NEW_PASS'"} }' \ "http://localhost:5000/v3/users/$USER_ID" | python -mjson.tool Example response: .. code-block:: javascript { "user": { "default_project_id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "domain_id": "default", "email": "demo@example.com", "enabled": true, "extra": { "email": "demo@example.com" }, "id": "269348fdd9374b8885da1418e0730af1", "links": { "self": "http://localhost:5000/v3/users/269348fdd9374b8885da1418e0730af1" }, "name": "demo" } } PUT /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} =============================================================== Create group role assignment on project: .. code-block:: bash curl -s -X PUT \ -H "X-Auth-Token: $OS_TOKEN" \ "http://localhost:5000/v3/projects/$PROJECT_ID/groups/$GROUP_ID/roles/$ROLE_ID" | python -mjson.tool There's no data in the response if the operation is successful. POST /v3/OS-TRUST/trusts ======================== Create a trust: .. code-block:: bash curl -s \ -H "X-Auth-Token: $OS_TOKEN" \ -H "Content-Type: application/json" \ -d ' { "trust": { "expires_at": "2014-12-30T23:59:59.999999Z", "impersonation": false, "project_id": "'$PROJECT_ID'", "roles": [ { "name": "admin" } ], "trustee_user_id": "'$DEMO_USER_ID'", "trustor_user_id": "'$ADMIN_USER_ID'" }}'\ "http://localhost:5000/v3/OS-TRUST/trusts" | python -mjson.tool Example response: .. code-block:: javascript { "trust": { "expires_at": "2014-12-30T23:59:59.999999Z", "id": "394998fa61f14736b1f0c1f322882949", "impersonation": false, "links": { "self": "http://localhost:5000/v3/OS-TRUST/trusts/394998fa61f14736b1f0c1f322882949" }, "project_id": "3d4c2c82bd5948f0bcab0cf3a7c9b48c", "remaining_uses": null, "roles": [ { "id": "c703057be878458588961ce9a0ce686b", "links": { "self": "http://localhost:5000/v3/roles/c703057be878458588961ce9a0ce686b" }, "name": "admin" } ], "roles_links": { "next": null, "previous": null, "self": "http://localhost:5000/v3/OS-TRUST/trusts/394998fa61f14736b1f0c1f322882949/roles" }, "trustee_user_id": "269348fdd9374b8885da1418e0730af1", "trustor_user_id": "3ec3164f750146be97f21559ee4d9c51" } } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4821143 keystone-26.0.0/doc/source/cli/0000775000175000017500000000000000000000000016277 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/cli/commands.rst0000664000175000017500000000266600000000000020644 0ustar00zuulzuul00000000000000General keystone-manage options: -------------------------------- * ``--help`` : display verbose help output. Invoking ``keystone-manage`` by itself will give you some usage information. Available commands: * ``bootstrap``: Perform the basic bootstrap process. * ``create_jws_keypair``: Create an ECDSA key pair for JWS token signing. * ``credential_migrate``: Encrypt credentials using a new primary key. * ``credential_rotate``: Rotate Fernet keys for credential encryption. * ``credential_setup``: Setup a Fernet key repository for credential encryption. * ``db_sync``: Sync the database. * ``db_version``: Print the current migration version of the database. * ``doctor``: Diagnose common problems with keystone deployments. * ``domain_config_upload``: Upload domain configuration file. * ``fernet_rotate``: Rotate keys in the Fernet key repository. * ``fernet_setup``: Setup a Fernet key repository for token encryption. * ``mapping_populate``: Prepare domain-specific LDAP backend. * ``mapping_purge``: Purge the identity mapping table. * ``mapping_engine``: Test your federation mapping rules. * ``receipt_rotate``: Rotate auth receipts encryption keys. * ``receipt_setup``: Setup a key repository for auth receipts. * ``saml_idp_metadata``: Generate identity provider metadata. * ``token_rotate``: Rotate token keys in the key repository. * ``token_setup``: Setup a token key repository for token encryption. * ``trust_flush``: Purge expired trusts. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/cli/index.rst0000664000175000017500000000140700000000000020142 0ustar00zuulzuul00000000000000.. Copyright 2017 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= CLI Documentation ================= .. toctree:: :maxdepth: 1 keystone-manage keystone-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/cli/keystone-manage.rst0000664000175000017500000001147100000000000022124 0ustar00zuulzuul00000000000000keystone-manage ~~~~~~~~~~~~~~~ --------------------------- Keystone Management Utility --------------------------- :Author: openstack@lists.openstack.org :Date: 2017-02-23 :Copyright: OpenStack Foundation :Version: 11.0.0 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== keystone-manage [options] DESCRIPTION =========== ``keystone-manage`` is the command line tool which interacts with the Keystone service to initialize and update data within Keystone. Generally, ``keystone-manage`` is only used for operations that cannot be accomplished with the HTTP API, such data import/export and database migrations. USAGE ===== ``keystone-manage [options] action [additional args]`` .. include:: commands.rst OPTIONS ======= -h, --help show this help message and exit --config-dir DIR Path to a config directory to pull \*.conf files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. --config-file PATH Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. Defaults to None. --debug, -d If set to true, the logging level will be set to DEBUG instead of the default INFO level. --log-config-append PATH, --log_config PATH The name of a logging configuration file. This file is appended to any existing logging configuration files. For details about logging configuration files, see the Python logging module documentation. Note that when logging configuration files are used then all logging configuration is set in the configuration file and other logging configuration options are ignored (for example, logging_context_format_string). --log-date-format DATE_FORMAT Defines the format string for %(asctime)s in log records. Default: None . This option is ignored if log_config_append is set. --log-dir LOG_DIR, --logdir LOG_DIR (Optional) The base directory used for relative log_file paths. This option is ignored if log_config_append is set. --log-file PATH, --logfile PATH (Optional) Name of log file to send logging output to. If no default is set, logging will go to stderr as defined by use_stderr. This option is ignored if log_config_append is set. --nodebug The inverse of --debug --nostandard-threads The inverse of --standard-threads --nouse-syslog The inverse of --use-syslog --noverbose The inverse of --verbose --nowatch-log-file The inverse of --watch-log-file --pydev-debug-host PYDEV_DEBUG_HOST Host to connect to for remote debugger. --pydev-debug-port PYDEV_DEBUG_PORT Port to connect to for remote debugger. --standard-threads Do not monkey-patch threading system modules. --syslog-log-facility SYSLOG_LOG_FACILITY Syslog facility to receive log lines. This option is ignored if log_config_append is set. --use-syslog Use syslog for logging. Existing syslog format is DEPRECATED and will be changed later to honor RFC5424. This option is ignored if log_config_append is set. --verbose, -v If set to false, the logging level will be set to WARNING instead of the default INFO level. --version show program's version number and exit --watch-log-file Uses logging handler designed to watch file system. When log file is moved or removed this handler will open a new log file with specified path instantaneously. It makes sense only if log_file option is specified and Linux platform is used. This option is ignored if log_config_append is set. FILES ===== None SEE ALSO ======== * `OpenStack Keystone `__ SOURCE ====== * Keystone is sourced in Gerrit git `Keystone `__ * Keystone bugs are managed at Launchpad `Keystone `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/cli/keystone-status.rst0000664000175000017500000000555500000000000022225 0ustar00zuulzuul00000000000000keystone-status ~~~~~~~~~~~~~~~ ----------------------- Keystone Status Utility ----------------------- :Author: openstack@lists.openstack.org :Date: 2018-10-15 :Copyright: OpenStack Foundation :Version: 15.0.0 :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== .. code-block:: console keystone-status [options] DESCRIPTION =========== ``keystone-status`` is a command line tool that helps operators upgrade their deployment. USAGE ===== .. code-block:: console keystone-status [options] action [additional args] Categories are: * ``upgrade`` Detailed descriptions are below. You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: keystone-status upgrade These sections describe the available categories and arguments for :command:`keystone-status`. Categories and commands ----------------------- ``keystone-status upgrade check`` Performs a release-specific readiness check before restarting services with new code, or upgrading. This command expects to have complete configuration and access to the database. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **15.0.0 (Stein)** * Placeholder to be filled in with checks as they are added in Stein. OPTIONS ======= .. code-block:: console -h, --help show this help message and exit --config-dir DIR Path to a config directory to pull \*.conf files from. This file set is sorted, so as to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. --config-file PATH Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. Defaults to None. FILES ===== None SEE ALSO ======== * `OpenStack Keystone `__ SOURCE ====== * Keystone is sourced on `opendev.org `__ * Keystone bugs are managed at Launchpad `Keystone `__ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/code_documentation.rst0000664000175000017500000000012200000000000022120 0ustar00zuulzuul00000000000000Code Documentation ================== .. toctree:: :maxdepth: 1 api/modules././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/conf.py0000664000175000017500000002231600000000000017033 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # keystone documentation build configuration file, created by # sphinx-quickstart on Mon Jan 9 12:02:59 2012. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinx.ext.todo', 'oslo_config.sphinxconfiggen', 'oslo_config.sphinxext', 'oslo_policy.sphinxpolicygen', 'openstackdocstheme', 'oslo_policy.sphinxext', 'sphinxcontrib.apidoc', 'sphinxcontrib.seqdiag', 'sphinx_feature_classification.support_matrix', 'sphinxcontrib.blockdiag', ] blockdiag_html_image_format = 'SVG' blockdiag_fontpath = 'DejaVuSans.ttf' # sphinxcontrib.apidoc options apidoc_module_dir = '../../keystone' apidoc_output_dir = 'api' apidoc_excluded_paths = [ 'tests/*', 'tests', 'test', # TODO(gmann): with new release of SQLAlchemy(1.4.27) TypeDecorator used # in common/sql/core.py file started failing. Remove this oncethe issue of # TypeDecorator is fixed. 'common/sql/core.py', 'common/sql/migrations/*', 'common/sql/migrations', ] apidoc_separate_modules = True # sphinxcontrib.seqdiag options seqdiag_antialias = True seqdiag_html_image_format = 'SVG' config_generator_config_file = '../../config-generator/keystone.conf' sample_config_basename = '_static/keystone' policy_generator_config_file = ( '../../config-generator/keystone-policy-generator.conf' ) sample_policy_basename = '_static/keystone' todo_include_todos = True # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2012-Present, OpenInfra Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['old'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['keystone.'] # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ( 'cli/keystone-manage', 'keystone-manage', 'Keystone Management Utility', ['OpenStack'], 1, ) ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'keystonedoc' # -- Options for LaTeX output ------------------------------------------------- # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', 'maxlistdepth': 10, } # Grouping the document tree into LaTeX files. List of tuples (source # start file, target name, title, author, documentclass # [howto/manual]). # NOTE(gyee): Specify toctree_only=True for a better document structure of # the generated PDF file. latex_documents = [ ( 'index', 'doc-keystone.tex', 'Keystone Documentation', 'OpenStack', 'manual', True, ) ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( 'index', 'keystone', 'Keystone Documentation', 'OpenStack', 'keystone', 'One line description of project.', 'Miscellaneous', ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_repo_name = 'openstack/keystone' openstackdocs_bug_project = 'keystone' openstackdocs_bug_tag = 'documentation' openstackdocs_projects = ['python-openstackclient'] openstackdocs_pdf_link = True ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4821143 keystone-26.0.0/doc/source/configuration/0000775000175000017500000000000000000000000020377 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/configuration/config-options.rst0000664000175000017500000000141500000000000024070 0ustar00zuulzuul00000000000000========================= API Configuration options ========================= Configuration ~~~~~~~~~~~~~ The Identity service is configured in the ``/etc/keystone/keystone.conf`` file. The following tables provide a comprehensive list of the Identity service options. .. only:: html For a sample configuration file, refer to :doc:`samples/keystone-conf`. .. show-options:: :config-file: config-generator/keystone.conf Domain-specific Identity drivers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Identity service supports domain-specific Identity drivers installed on an SQL or LDAP back end, and supports domain-specific Identity configuration options, which are stored in domain-specific configuration files. See :ref:`domain_specific_configuration` for more information. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/configuration/index.rst0000664000175000017500000000111600000000000022237 0ustar00zuulzuul00000000000000.. _keystone_configuration_options: ============================== Keystone Configuration Options ============================== This section provides a list of all possible options and sample files for keystone configuration. .. toctree:: :maxdepth: 2 config-options.rst policy.rst .. # NOTE(gyee): Sample files are only available in HTML document. # Inline sample files with literalinclude hit LaTeX processing error # like TeX capacity exceeded and direct links are discouraged in PDF doc. .. only:: html .. toctree:: :maxdepth: 2 samples/index.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/configuration/policy.rst0000664000175000017500000000124000000000000022425 0ustar00zuulzuul00000000000000==================== Policy configuration ==================== .. warning:: JSON formatted policy file is deprecated since Keystone 19.0.0 (Wallaby). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/latest/cli/oslopolicy-convert-json-to-yaml.html Configuration ~~~~~~~~~~~~~ The following is an overview of all available policies in Keystone. .. only:: html For a sample configuration file, refer to :doc:`samples/policy-yaml`. .. show-policy:: :config-file: ../../config-generator/keystone-policy-generator.conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4821143 keystone-26.0.0/doc/source/configuration/samples/0000775000175000017500000000000000000000000022043 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/configuration/samples/index.rst0000664000175000017500000000050500000000000023704 0ustar00zuulzuul00000000000000========================== Sample configuration files ========================== Configuration files can alter how keystone behaves at runtime and by default are located in ``/etc/keystone/``. Links to sample configuration files can be found below: .. toctree:: keystone-conf.rst logging-conf.rst policy-yaml.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/configuration/samples/keystone-conf.rst0000664000175000017500000000042300000000000025360 0ustar00zuulzuul00000000000000============= keystone.conf ============= Use the ``keystone.conf`` file to configure most Identity service options. This sample configuration can also be viewed in `raw format <../../_static/keystone.conf.sample>`_. .. literalinclude:: ../../_static/keystone.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/configuration/samples/logging-conf.rst0000664000175000017500000000056200000000000025151 0ustar00zuulzuul00000000000000============ logging.conf ============ You can specify a special logging configuration file in the ``keystone.conf`` configuration file. For example, ``/etc/keystone/logging.conf``. For details, see the `Python logging module documentation `__. .. literalinclude:: ../../../../etc/logging.conf.sample ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/configuration/samples/policy-yaml.rst0000664000175000017500000000031200000000000025030 0ustar00zuulzuul00000000000000=========== policy.yaml =========== Use the ``policy.yaml`` file to define additional access controls that apply to the Identity service: .. literalinclude:: ../../_static/keystone.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4861143 keystone-26.0.0/doc/source/contributor/0000775000175000017500000000000000000000000020102 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/api_change_tutorial.rst0000664000175000017500000002021600000000000024636 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==================== Making an API Change ==================== This document will guide you through the process of proposing and submitting an API change to keystone. Prerequisites ------------- In order to follow this tutorial, it is assumed that you have read our :doc:`index` and :doc:`../getting-started/architecture` documents. Proposing a change ------------------ You need to create a RFE bug report, submit a specification against the `keystone-specs`_ repository and bring it up to discussion with the `keystone core team`_ for agreement. Please refer to the :ref:`guide for proposing features ` to learn more about the process. .. _`keystone-specs`: https://opendev.org/openstack/keystone-specs/ .. _`keystone core team`: https://review.opendev.org/#/admin/groups/9,members Create ~~~~~~ #. `Create a RFE bug report`_ in launchpad; #. git clone https://opendev.org/openstack/keystone-specs; #. cp `specs/template.rst` `specs/backlog/.rst`; #. Write the spec based on the template. Ensure the bug link points to the one created in step 1; #. Also update the documentation at `api/v3/identity-api-v3.rst` to reflect the proposed API changes; #. Push to gerrit for review; #. Propose agenda items to the `keystone meeting`_, and make sure someone who understands the subject can attend the meeting to answer questions. .. _`Create a RFE bug report`: https://bugs.launchpad.net/keystone/+filebug .. _`template`: https://opendev.org/openstack/keystone-specs/src/branch/master/specs/template.rst .. _`keystone meeting`: https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting Agreement ~~~~~~~~~ The `keystone core team`_ will evaluate the specification and vote on accepting it or not. If accepted, the proposal will be targeted to a release; otherwise, the specification will be abandoned. As soon as there is an agreement on the specification, the change may start rolling out. Implementing a change --------------------- In this section, let's assume that a specification proposing the addition of a `description` field to the roles API was accepted. In the next subsections, you will find a detailed explanation on the needed code changes to the keystone code to implement such change. Architectural Recapitulation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ As you saw in the :doc:`../getting-started/architecture` document, there are three logical levels of code at which a request passes: the API routing and request handling layer, the resource manager, and the driver. For the role backend, the API resource can be found under the `keystone/api` directory in the `roles.py` file, and the manager and driver can be found in the `keystone/assignment` directory in the `core.py` and `role_backends/sql.py` files, respectively (currently only the SQL driver is supported). Changing the SQL Model and Driver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: The below guidance is out-of-date and refers to the legacy ``migrate_repo`` migration repository, which was removed in 21.0.0 (Yoga). Nowadays, for a change like this, you would create an additive or "expand" migration in the ``expand_repo`` repository along with null migrations in the ``contract_repo`` and ``data_migration_repo`` repositories. For more information, refer to :doc:`/contributor/database-migrations`. .. todo:: Update this section to reflect the new migration model. First, you need to change the role model to include the description attribute. Go to `keystone/assignment/role_backends/sql.py` and update it like:: class RoleTable(sql.ModelBase, sql.ModelDictMixin): attributes = ['id', 'name', 'domain_id', 'description'] description = sql.Column(sql.String(255), nullable=True) ... Now, when keystone runs, the table will be created with the new attribute. However, what about existing deployments which already have the role table created? You need to migrate their database schema! The directory `keystone/common/sql/migrate_repo/versions` owns all the migrations since keystone day 1. Create a new file there with the next migration number. For example, if the latest migration number there is `101`, create yours as `102_add_role_description.py`, which will look like:: def upgrade(migrate_engine): meta = sql.MetaData() meta.bind = migrate_engine role_table = sql.Table('role', meta, autoload=True) description = sql.Column('description', sql.String(255), nullable=True) role_table.create_column(description) Do not forget to add tests for your migration at `keystone/tests/unit/test_sql_upgrade.py`, you may take other tests as example and learn how to develop yours. In this case, you would need to upgrade to `102` check the migration has added the `description` column to the role table. Changing the role driver itself in `keystone/assignment/role_backends/sql.py` will not be necessary, because the driver handles the role entities as Python dictionaries, thus the new attribute will be handled automatically. Changing the Manager ~~~~~~~~~~~~~~~~~~~~ Managers handle the business logic. Keystone provides the basic CRUD for role entities, that means that the role manager simply calls the driver with the arguments received from the API resource, and then returns the driver's result back to API resource. Additionally, it handles the cache management. Thus, there is no manager change needed to make it able to operate role entities with the new `description` attribute. However, you should add tests for the role CRUD operations with the new attribute to `keystone/tests/unit/assignment/test_core.py`. When trying to determine whether a change goes in the driver or in the manager, the test is whether the code is business logic and/or needs to be executed for each driver. Both common and business logics go in the manager, while backend specific logic goes in the drivers. Changing the API Interface ~~~~~~~~~~~~~~~~~~~~~~~~~~ Business logic should not go in the API resource. The API resource should be viewed as a binding between the business logic and the HTTP protocol. Thus, it is in charge of calling the appropriate manager call and wrapping responses into HTTP format. API resource use JSON schemas do determine whether a provided role is a valid representation or not. Role create and role update schemas are available at `keystone/assignment/schema.py`. You will need to update their properties to include a `description` attribute:: _role_properties = { 'name': parameter_types.name, 'description': parameter_types.description } Besides doing the entity validation using such schemas, API resource pass and accept all the attributes to and from the manager. Thus, there is no further change needed at the API resource level. You should add tests for API unit test to `keystone/tests/unit/test_v3_role.py` and document about the new parameter in the `api-ref`_. .. _api-ref: https://docs.openstack.org/api-ref/identity/ Furthermore, as role entities are passed in the request body to keystone calls, the role routes do not need to be changed; i.e the routes still are:: POST /v3/roles GET /v3/roles/{id} HEAD /v3/roles/{id} PATCH /v3/roles/{id} DELETE /v3/roles/{id} Conclusion ---------- At this point, keystone role entities contain a `description` attribute. In order to make it happen, you have learned how the keystone architecture is, what is the responsibility of each layer, how database migrations occur and the way entities are represented into tables. The pattern of the change made in this tutorial applies to other keystone subsystems as well, such as `resource` and `identity`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/architecture.rst0000664000175000017500000000153600000000000023323 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================== Learning Architecture Internals =============================== .. toctree:: :maxdepth: 1 caching-layer.rst filtering-responsibilities.rst list-truncation.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/auth-plugins.rst0000664000175000017500000000760500000000000023264 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _auth_plugins: Authentication Plugins ====================== .. NOTE:: This feature is only supported by keystone for the Identity API v3 clients. Keystone supports authentication plugins and they are specified in the ``[auth]`` section of the configuration file. However, an authentication plugin may also have its own section in the configuration file. It is up to the plugin to register its own configuration options. * ``methods`` - comma-delimited list of authentication plugin names * ```` - specify the class which handles to authentication method, in the same manner as one would specify a backend driver. Keystone provides three authentication methods by default. ``password`` handles password authentication and ``token`` handles token authentication. ``external`` is used in conjunction with authentication performed by a container web server that sets the ``REMOTE_USER`` environment variable. For more details, refer to :doc:`External Authentication <../admin/external-authentication>`. How to Implement an Authentication Plugin ----------------------------------------- All authentication plugins must extend the :class:`keystone.auth.plugins.base.AuthMethodHandler` class and implement the ``authenticate()`` method. The ``authenticate()`` method expects the following parameters. * ``context`` - keystone's request context * ``auth_payload`` - the content of the authentication for a given method * ``auth_context`` - user authentication context, a dictionary shared by all plugins. It contains ``method_names`` and ``bind`` by default. ``method_names`` is a list and ``bind`` is a dictionary. If successful, the ``authenticate()`` method must provide a valid ``user_id`` in ``auth_context`` and return ``None``. ``method_name`` is used to convey any additional authentication methods in case authentication is for re-scoping. For example, if the authentication is for re-scoping, a plugin must append the previous method names into ``method_names``. If authentication requires multiple steps, the ``authenticate()`` method must return the payload in the form of a dictionary for the next authentication step. If authentication is unsuccessful, the ``authenticate()`` method must raise a :class:`keystone.exception.Unauthorized` exception. Simply add the new plugin name to the ``methods`` list along with your plugin class configuration in the ``[auth]`` sections of the configuration file to deploy it. If the plugin requires additional configurations, it may register its own section in the configuration file. Plugins are invoked in the order in which they are specified in the ``methods`` attribute of the ``authentication`` request body. If multiple plugins are invoked, all plugins must succeed in order to for the entire authentication to be successful. Furthermore, all the plugins invoked must agree on the ``user_id`` in the ``auth_context``. The ``REMOTE_USER`` environment variable is only set from a containing webserver. However, to ensure that a user must go through other authentication mechanisms, even if this variable is set, remove ``external`` from the list of plugins specified in ``methods``. This effectively disables external authentication. For more details, refer to :doc:`External Authentication <../admin/external-authentication>`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/caching-layer.rst0000664000175000017500000000711400000000000023345 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============= Caching Layer ============= The caching layer is designed to be applied to any ``manager`` object within Keystone via the use of ``keystone.common.cache`` module. This leverages `oslo.cache`_ caching system to provide a flexible caching backend. .. _oslo.cache: https://opendev.org/openstack/oslo.cache The caching can be setup for all or some subsystems. It is recommended that each of the managers have an independent toggle within the config file to enable caching. The easiest method to utilize the toggle within the configuration file is to define a ``caching`` boolean option within that manager's configuration section (e.g. ``identity``). Enable the global cache ``enabled`` option as well as the specific manager's caching enable toggle in order to cache that subsystem. The `oslo.cache`_ is simple and easy to adopt by any system. See the `usage guide`_ of it. There are various cache :ref:`backends ` supported by it. Example use of `oslo.cache`_ in keystone (in this example, ``token`` is the manager): .. code-block:: python from keystone.common import cache TOKENS_REGION = cache.create_region(name='tokens') MEMOIZE_TOKENS = cache.get_memoization_decorator( group='token', region=TOKENS_REGION) @MEMOIZE_TOKENS def _validate_token(self, token_id): ... return token .. _usage guide: https://docs.openstack.org/oslo.cache/latest/user/usage.html With the above example, each call to the ``cacheable_function`` would check to see if the arguments passed to it matched a currently valid cached item. If the return value was cached, the caching layer would return the cached value; if the return value was not cached, the caching layer would call the function, pass the value to the ``MEMOIZE_TOKEN`` decorator, which would then determine if caching was globally enabled and enabled for the ``token`` manager. If either caching toggle is disabled, the value is returned but not cached. It is recommended that each of the managers have an independent configurable time-to-live (TTL). The option ``cache_time`` is to be set for every manager under its section in keystone.conf file. If the ``cache_time`` is set to ``None``, the expiration time will be set to the global default ``expiration_time`` option in the ``[cache]`` configuration section. These options are passed to and handled by oslo.cache. :ref:`Cache invalidation ` can be done if specific cache entries are changed. Example of invalidating a cache (in this example, ``token`` is the manager): .. code-block:: python def invalidate_individual_token_cache(self, token_id): ... self._validate_token.invalidate(self, token_id) For cache invalidation, there is an ``invalidate`` method (attribute) on the decorated function. To invalidate the cache, pass the same arguments to the ``invalidate`` method as you would the normal function. This means you need to pass ``self`` as the first argument. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000553400000000000023352 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Keystone. Communication ~~~~~~~~~~~~~~ For communicating with Keystone Team, you can `reach out`_ to us through mailing lists and IRC channels. .. _reach out: https://docs.openstack.org/keystone/latest/getting-started/community.html Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~~ For any help contact `keystone maintainers`_ , the core team of keystone. .. _keystone maintainers: https://review.opendev.org/#/admin/groups/9,members New Feature Planning ~~~~~~~~~~~~~~~~~~~~ If you are planning to propose a feature in keystone , check out `Proposing Features`_ .. _Proposing Features: https://docs.openstack.org/keystone/latest/contributor/proposing-features.html Task Tracking ~~~~~~~~~~~~~~ We track our tasks in `keystone bug tracker`_. You can also track the tasks of other keystone repositories also. * `keystonemiddleware `__ * `keystoneauth `__ * `python-keystoneclient `__ If you're looking for some smaller, easier work item to pick up and get started on, search for the `low-hanging-fruit`_ tag in `bugs launchpad`_. .. _keystone bug tracker: https://bugs.launchpad.net/keystone/+bugs?field.status=New .. _low-hanging-fruit: https://docs.openstack.org/keystone/train/contributor/how-can-i-help.html#the-meaning-of-low-hanging-fruit .. _bugs launchpad: https://bugs.launchpad.net/keystone/+bugs?field.tag=low-hanging-fruit Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so in `keystone bug tracker`_ by following the `bug triage `_ procedure. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ After submitting a Patch, anyone can cooperate by `reviewing`_ the patch on `gerrit`_. Finally, the patch will be `merged`_ by the `keystone maintainers`_. .. _gerrit: https://review.opendev.org/ .. _reviewing: https://docs.opendev.org/opendev/infra-manual/latest/developers.html#peer-review .. _merged: https://docs.opendev.org/opendev/infra-manual/latest/developers.html#merging Project Team Lead Duties ------------------------ All common PTL duties are enumerated here in the `PTL guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/database-migrations.rst0000664000175000017500000001165500000000000024562 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Database Migrations =================== .. versionchanged:: 21.0.0 (Yoga) The database migration framework was changed from SQLAlchemy-Migrate to Alembic in the Yoga release. Previously there were three SQLAlchemy-Migrate repos, corresponding to different type of migration operation: the *expand* repo, the *data migration* repo, and the *contract* repo. There are now only two Alembic branches, the *expand* branch and the *contract* branch, and data migration operations have been folded into the former .. versionchanged:: 24.0.0 (Bobcat) Added support for auto-generation of migrations using the ``keystone.common.sql.migrations.manage`` script. Starting with Newton, keystone supports upgrading both with and without downtime. In order to support this, there are two separate branches (all under ``keystone/common/sql/migrations``): the *expand* and the *contract* branch. *expand* For additive schema modifications and triggers to ensure data is kept in sync between the old and new schema until the point when there are no keystone instances running old code. May also contain data migrations to ensure new tables/columns are fully populated with data from the old schema. *contract* Run after all old code versions have been upgraded to running the new code, so remove any old schema columns/tables that are not used by the new version of the code. Drop any triggers added in the expand phase. A migration script must belong to one branch. If a migration has both additive and destruction operations, it must be split into two migrations scripts, one in each branch. In order to support rolling upgrades, where two releases of keystone briefly operate side-by-side using the same database without downtime, each phase of the migration must adhere to following constraints: Expand phase: Only additive schema changes, such as new columns, tables, indices, and triggers, and data insertion are allowed. Data modification or removal is not allowed. Triggers must be created to keep data in sync between the previous release and the next release. Data written by the previous release must be readable by both the previous release and the next release. Data written by the next release must be readable by both the next release and the previous release. In cases it is not possible for triggers to maintain data integrity across multiple schemas, writing data should be forbidden using triggers. Contract phase: Only destructive schema changes, such as dropping or altering columns, tables, indices, and triggers, or data modification or removal are allowed. Triggers created during the expand phase must be dropped. Writing your own migrations --------------------------- Because Keystone uses the expand-contract pattern for database migrations, it is not possible to use the standard ``alembic`` CLI tool. Instead, Keystone provides its own tool which provides a similar UX to the ``alembic`` tool but which auto-configures alembic (the library) for this pattern. To create a new *expand* branch migration: .. code-block:: bash $ tox -e venv -- python -m keystone.common.sql.migrations.manage \ revision --expand -m "My expand migration" To create a new *contract* branch migration: .. code-block:: bash $ tox -e venv -- python -m keystone.common.sql.migrations.manage \ revision --contract -m "My contract migration" To auto-generate an *expand* and/or *contract* branch migration: .. code-block:: bash $ tox -e venv -- python -m keystone.common.sql.migrations.manage \ revision --autogenerate -m "My auto-generated migration" .. important:: Because of discrepancies between the migrations and models which are yet to be ironed out, a number of columns are intentionally ignored. You can view these by inspecting the ``env.py`` file in ``keystone/common/sql/migrations``. To view the help page: .. code-block:: bash python -m keystone.common.sql.migrations.manage --help For information on how this tool works, refer to `this blog post`_. For more information on writing migration scripts in general refer to the `Alembic`_ documentation. .. _this blog post: https://that.guru/blog/zero-downtime-upgrades-with-alembic-and-sqlalchemy/ .. _Alembic: https://alembic.sqlalchemy.org/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/developing-drivers.rst0000664000175000017500000001417400000000000024453 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _developing_drivers: =========================== Developing Keystone Drivers =========================== A driver, also known as a backend, is an important architectural component of Keystone. It is an abstraction around the data access needed by a particular subsystem. This pluggable implementation is not only how Keystone implements its own data access, but how you can implement your own! Each major subsystem (that has data access needs) implements the data access by using drivers. Some examples of Keystone's drivers: - :class:`keystone.identity.backends.ldap.Identity` - :class:`keystone.token.providers.fernet.core.Provider` - :class:`keystone.contrib.federation.backends.sql.Federation` In/Out of Tree -------------- It's best to start developing your custom driver outside of the Keystone development process. This means developing it in your own public or private git repository and not worrying about getting it upstream (for now). This is better for you because it gives you more freedom and you are not bound to the strict OpenStack development rules or schedule. You can iterate faster and take whatever shortcuts you need to get your product out of the door. This is also good for Keystone because it will limit the amount of drivers that must be maintained by the team. If the team had to maintain a driver for each NoSQL DB that deployers want to use in production there would be less time to make Keystone itself better. Not to mention that the team would have to start gaining expertise in potentially dozens of new technologies. As you'll see below there is no penalty for open sourcing your driver, on GitHub for example, or even keeping your implementation private. We use `Setuptools entry points`_ to load your driver from anywhere in the Python path. .. _Setuptools entry points: https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins How To Make a Driver -------------------- The TLDR; steps (and too long didn't write yet): 1. Determine which subsystem you would like write a driver for 2. Subclass the most current version of the driver interface 3. Implement each of the abstract methods for that driver a. We are currently not documenting the exact input/outputs of the driver methods. The best approach right now is to use an existing driver as an example of what data your driver will receive and what data your driver will be required to return. b. There is a plan in place to document these APIs in more detail. 4. Register your new driver as an entry point 5. Configure your new driver in ``keystone.conf`` 6. Sit back and enjoy! Identity Driver Configuration ----------------------------- As described in the :ref:`domain_specific_configuration` there are 2 ways of configuring domain specific drivers: using files and using database. Configuration with files is straight forward but is having a major disadvantage of requiring restart of Keystone for the refresh of configuration or even for Keystone to start using chosen driver after adding a new domain. Configuring drivers using database is a flexible alternative that allows dynamic reconfiguration and even changes using the API (requires admin privileges by default). There are 2 independent parts for this to work properly: Defining configuration options ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Driver class (as pointed by EntryPoints) may have a static method `register_opts` accepting `conf` argument. This method, if present, is being invoked during loading the driver and registered options are then available when the driver is being instantiated. .. code-block:: python class CustomDriver(base.IdentityDriverBase): @classmethod def register_opts(cls, conf): grp = cfg.OptGroup("foo") opts = [cfg.StrOpt("opt1")] conf.register_group(grp) conf.register_opts(opts, group=grp) def __init__(self, conf=None): # conf contains options registered above and domain specific values # being set. pass ... Allowing domain configuration per API ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ A safety measure of the Keystone domain configuration API is that options allowed for the change need to be explicitly whitelisted. This is done in the `domain_config` section of the main Keystone configuration file. .. code-block:: cfg [domain_config] additional_whitelisted_options=:[opt1,opt2,opt3] additional_sensitive_options=:[password] The `` is the name of the configuration group as defined by the driver. Sensitive options are not included in the GET api call and are stored in a separate database table. Driver Interface Changes ------------------------ We no longer support driver versioning. Thus, if a driver interface changes, you will need to upgrade your custom driver to meet the new driver contract. Removing Methods ~~~~~~~~~~~~~~~~ Newer driver interfaces may remove methods that are currently required. Methods are removed when they are no longer required or invoked by Keystone. There is no reason why methods removed from the Keystone interface need to be removed from custom drivers. Adding Methods ~~~~~~~~~~~~~~ The most common API changes will be adding methods to support new features. The new method must be implemented by custom driver implementations. Updating Methods ~~~~~~~~~~~~~~~~ We will do our best not to update existing methods in ways that will break custom driver implementations. However, if that is not possible, again you will need to upgrade your custom driver implementation to meet the new driver contract. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/doctor-checks.rst0000664000175000017500000001136700000000000023374 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ Developing ``doctor`` checks ============================ As noted in the section above, keystone's management CLI provides various tools for administrating OpenStack Identity. One of those tools is called ``keystone-manage doctor`` and it is responsible for performing health checks about the deployment. If ``keystone-manage doctor`` detects a symptom, it will provide the operator with suggestions to improve the overall health of the deployment. This section is dedicated to documenting how to write symptoms for ``doctor``. The ``doctor`` tool consists of a list of symptoms. Each symptom is something that we can check against, and provide a warning for if we detect a misconfiguration. The ``doctor`` module is located in :py:mod:`keystone.cmd.doctor`. The current checks are based heavily on inspecting configuration values. As a result, many of the submodules within the ``doctor`` module are named after the configuration section for the symptoms they check. For example, if we want to ensure the ``keystone.conf [DEFAULT] max_token_size`` option is properly configured for whatever ``keystone.conf [token] provider`` is set to, we can place that symptom in a module called :py:mod:`keystone.cmd.doctor.tokens`. The symptom will be loaded by importing the ``doctor`` module, which is done when ``keystone-manage doctor`` is invoked from the command line. When adding new symptoms, it's important to remember to add new modules to the ``SYMPTOM_MODULES`` list in :py:mod:`keystone.cmd.doctor.__init__`. Doing that will ensure ``doctor`` discovers properly named symptoms when executed. Now that we know symptoms are organized according to configuration sections, and how to add them, how exactly do we write a new symptom? ``doctor`` will automatically discover new symptoms by inspecting the methods of each symptom module (i.e. ``SYMPTOM_MODULES``). If a method declaration starts with ``def symptom_`` it is considered a symptom that ``doctor`` should check for, and it should be run. The naming of the symptom, or method name, is extremely important since ``doctor`` will use it to describe what it's doing to whoever runs ``doctor``. In addition to a well named method, we also need to provide a complete documentation string for the method. If ``doctor`` detects a symptom, it will use the method's documentation string as feedback to the operator. It should describe why the check is being done, why it was triggered, and possible solutions to cure the symptom. For examples of this, see the existing symptoms in any of ``doctor``'s symptom modules. The last step is evaluating the logic within the symptom. As previously stated, ``doctor`` will check for a symptom if methods within specific symptom modules make a specific naming convention. In order for ``doctor`` to suggest feedback, it needs to know whether or not the symptom is actually present. We accomplish this by making all symptoms return ``True`` when a symptom is present. When a symptom evaluates to ``False``, ``doctor`` will move along to the next symptom in the list since. If the deployment isn't suffering for a specific symptom, ``doctor`` should not suggest any actions related to that symptom (i.e. if you have your cholesterol under control, why would a physician recommend cholesterol medication if you don't need it). To summarize: - Symptoms should live in modules named according to the most relevant configuration section they apply to. This ensure we keep our symptoms organized, grouped, and easy to find. - When writing symptoms for a new section, remember to add the module name to the ``SYMPTOM_MODULES`` list in :py:mod:`keystone.cmd.doctor.__init__`. - Remember to use a good name for the symptom method signature and to prepend it with ``symptom_`` in order for it to be discovered automatically by ``doctor``. - Symptoms have to evaluate to ``True`` in order to provide feedback to operators. - Symptoms should have very thorough documentation strings that describe the symptom, side-effects of the symptom, and ways to remedy it. For examples, feel free to run ``doctor`` locally using ``keystone-manage`` and inspect the existing symptoms. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/filtering-responsibilities.rst0000664000175000017500000000406300000000000026207 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================================ Filtering responsibilities between API resources and drivers ============================================================ Keystone supports the specification of filtering on list queries as part of the v3 identity API. By default these queries are satisfied in the API resource when it calls the ``wrap_collection`` method at the end of a ``get`` method. However, to enable optimum performance, any driver can implement some or all of the specified filters (for example, by adding filtering to the generated SQL statements to generate the list). The communication of the filter details between the API resource and its drivers is handled by the passing of a reference to a Hints object, which is a list of dicts describing the filters. A driver that satisfies a filter must delete the filter from the Hints object so that when it is returned to the API, it knows to only execute any unsatisfied filters. The contract for a driver for ``list_{entity}`` methods is therefore: * It MUST return a list of entities of the specified type * It MAY either just return all such entities, or alternatively reduce the list by filtering for one or more of the specified filters in the passed Hints reference, and removing any such satisfied filters. An exception to this is that for identity drivers that support domains, then they should at least support filtering by domain_id. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/how-can-i-help.rst0000664000175000017500000001312300000000000023344 0ustar00zuulzuul00000000000000.. Copyright 2018 SUSE Linux GmbH All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============== How Can I Help? =============== Are you interested in contributing to the keystone project? Whether you're a software developer, a technical writer, an OpenStack operator or an OpenStack user, there are many reasons to get involved with the keystone project: * You can help shape the direction of the project, ensuring it meets your organization's needs in the future * You can help maintain the project's health and get your bugs fixed faster * You can collaborate with other people to find common solutions that will help you and your organization * You can hack on a fun, security-related Python project with interesting challenges Here are some easy ways to make a big difference to the keystone project and become part of the team: * Read the documentation, starting with the rest of this contributor guide, and try to follow it to set up keystone and try out different features. Does it make sense? Is something out of date? Is something misleading or incorrect? Submit a patch or bug report to fix it. * Monitor incoming bug reports, try to reproduce the bug in a test environment, ask the bug reporter for more information, answer support questions and close invalid bugs. Follow the `bug triage guide`_. New bugs can be found with the "New" status: * `keystone `__ * `keystonemiddleware `__ * `keystoneauth `__ * `python-keystoneclient `__ You can also subscribe to email notifications for new bugs. * Subscribe to the openstack-discuss@lists.openstack.org mailing list (filter on subject tag ``[keystone]``) and join the #openstack-keystone IRC channel on OFTC. Help answer user support questions if you or your organization has faced and solved a similar problem, or chime in on design discussions that will affect you and your organization. * Check out the low hanging fruit bugs, submit patches to fix them: * `keystone `__ * `keystonemiddleware `__ * `keystoneauth `__ * `python-keystoneclient `__ * Look for deprecation warnings in the unit tests and in the keystone logs of a running keystone installation and submit patches to make them go away. * Look at other projects, especially `devstack`_, and submit patches to correct usage of options that keystone has deprecated. Make sure to let the `keystone maintainers`_ know you're looking at these so that it's on their radar and they can help review. * Check the test coverage report (``tox -ecover``) and try to add unit test coverage. * Review `new changes`_. Keep OpenStack's `review guidelines`_ in mind. Ask questions when you don't understand a change. Need any help? :doc:`Reach out ` to the keystone team. .. _bug triage guide: https://wiki.openstack.org/wiki/BugTriage .. _devstack: https://docs.openstack.org/devstack/latest/ .. _keystone maintainers: https://review.opendev.org/#/admin/groups/9,members .. _new changes: https://review.opendev.org/#/q/is:open+project:openstack/keystone+OR+project:openstack/keystonemiddleware+OR+project:openstack/keystoneauth+OR+project:openstack/python-keystoneclient .. _review guidelines: https://docs.openstack.org/project-team-guide/review-the-openstack-way.html The Meaning of Low Hanging Fruit ================================ This section describes the intent behind bugs tagged as low hanging fruit. Current maintainers should apply the tag consistently while triaging bugs, using this document as a guide. This practice ensures newcomers to the project can expect each low hanging fruit bug to be of similar complexity. Bugs fit for the low hanging fruit tag: * Should require minimal python experience, someone new to OpenStack might also be new to python * Should only require a basic understanding of the review workflow, complicated changesets with dependencies between repositories coupled with CI testing only raises the cognitive bar for new contributors * Can include documentation fixes so long it doesn't require an in-depth understanding of complicated subsystems and features (e.g., overhauling the federated identity guide) * Should be something a newcomer can progress through in a week or less, long wait times due to the discussion of complicated topics can deter new contributors from participating * Shouldn't require a new contributor to understand copious amounts of historical context, newcomers should eventually understand this information but consuming that information is outside the scope of low hanging fruit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/http-api.rst0000664000175000017500000002031700000000000022365 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================ Identity API v2.0 and v3 History ================================ Specifications ============== As of the Queens release, Keystone solely implements the `Identity API v3`_. Support for Identity API v2.0 has been removed in Queens in favor of the `Identity API v3`_. Identity API v3 is a superset of all the functionality available in the Identity API v2.0 and several of its extensions, and provides a much more consistent developer experience. .. _`Identity API v3`: https://docs.openstack.org/api-ref/identity/v3/ History ======= You're probably wondering why Keystone does not implement a "v1" API. As a matter of fact, one exists, but it actually predates OpenStack. The v1.x API was an extremely small API documented and implemented by Rackspace for their early public cloud products. With the advent of OpenStack, Keystone served to provide a superset of the authentication and multi-tenant authorization models already implemented by Rackspace's public cloud, Nova, and Swift. Thus, Identity API v2.0 was introduced. Identity API v3 was established to introduce namespacing for users and projects by using "domains" as a higher-level container for more flexible identity management and fixed a security issue in the v2.0 API (bearer tokens appearing in URLs). How do I migrate from v2.0 to v3? ================================= I am a deployer --------------- You need to ensure that you've configured your service catalog in Keystone correctly. The simplest, and most ideal, configuration would expose one identity with unversioned endpoints (note the lack of ``/v2.0/`` or ``/v3/`` in these URLs): - Service (type: ``identity``) - Endpoint (interface: ``public``, URL: ``http://identity:5000/``) - Endpoint (interface: ``admin``, URL: ``http://identity:35357/``) If you were to perform a ``GET`` against either of these endpoints, you would be greeted by an ``HTTP/1.1 300 Multiple Choices`` response, which newer Keystone clients can use to automatically detect available API versions. .. NOTE:: Deploying v3 only requires a single application since administrator and end-user operations are handled by the same process, and not separated into two different applications. Depending on how v2.0 was configured, you might be able to decommission one endpoint. Until users are educated about which endpoint to use, the former admin API (e.g. using port 35357) and the public API (e.g. using port 5000) can run the v3 API simulateously and serve both sets of users. .. code-block:: bash $ curl -i http://identity:35357/ HTTP/1.1 300 Multiple Choices Vary: X-Auth-Token Content-Type: application/json Content-Length: 755 Date: Tue, 10 Jun 2014 14:22:26 GMT {"versions": {"values": [ ... ]}} With unversioned ``identity`` endpoints in the service catalog, you should be able to `authenticate with keystoneclient`_ successfully. .. _`authenticate with keystoneclient`: https://docs.openstack.org/python-keystoneclient/latest/using-api-v3.html#authenticating-using-sessions I have a Python client ---------------------- The Keystone community provides first-class support for Python API consumers via our client library, `python-keystoneclient`_. If you're not currently using this library, you should, as it is intended to expose all of our HTTP API functionality. If we're missing something you're looking for, please contribute! Adopting `python-keystoneclient`_ should be the easiest way to migrate to Identity API v3. .. _`python-keystoneclient`: https://pypi.org/project/python-keystoneclient/ I have a non-Python client -------------------------- You'll likely need to heavily reference our `API documentation`_ to port your application to Identity API v3. .. _`API documentation`: https://docs.openstack.org/api-ref/identity/v3/ The most common operation would be password-based authentication including a tenant name (i.e. project name) to specify an authorization scope. In Identity API v2.0, this would be a request to ``POST /v2.0/tokens``: .. code-block:: javascript { "auth": { "passwordCredentials": { "password": "my-password", "username": "my-username" }, "tenantName": "project-x" } } And you would get back a JSON blob with an ``access`` -> ``token`` -> ``id`` that you could pass to another web service as your ``X-Auth-Token`` header value. In Identity API v3, an equivalent request would be to ``POST /v3/auth/tokens``: .. code-block:: javascript { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "id": "default" }, "name": "my-username", "password": "my-password" } } }, "scope": { "project": { "domain": { "id": "default" }, "name": "project-x" } } } } Note a few key differences when compared to the v2.0 API: - A "tenant" in v2.0 became a "project" in v3. - The authentication method (``password``) is explicitly identified. - Both the user name (``my-username``) and project name (``project-x``) are namespaced by an owning domain (where ``id`` = ``default``). The "default" domain exists by default in Keystone, and automatically owns the namespace exposed by Identity API v2.0. Alternatively, you may reference users and projects that exist outside the namespace of the default domain, which are thus inaccessible to the v2.0 API. - In v3, your token is returned to you in an ``X-Subject-Token`` header, instead of as part of the request body. You should still authenticate yourself to other services using the ``X-Auth-Token`` header. Why do I see deployments with Keystone running on two ports? ============================================================ During development of the v2.0 API, operational functionality was isolated into different applications within the project. One application was dedicated to end-user functionality and its sole purpose was to authenticate and validate user identities. The second application consisted of more features and allowed operators the ability to manage their deployment by adding or deleting users, creating projects, etc. These applications were referred to as the ``public`` and ``admin`` APIs, respectively. This deployment model was required by the architecture of the v2.0 API. In a way, authorization was limited to the application you had access to. Once development began on the v3 API, the code paths for both applications were merged into one. Instead of isolating functionality into separate applications, all functionality was consolidated into a single application. Each v3 endpoint or API is protected by policy instead. This makes deployment and management of Keystone's infrastructure easier for operators to deploy and for users to consume. As a result, Keystone deployments are not required to deploy separate ``admin`` and ``public`` endpoints, especially now that the v2.0 API implementation has been removed. HTTP/1.1 Chunked Encoding ========================= .. WARNING:: Running Keystone under HTTPD in the recommended (and tested) configuration does not support the use of ``Transfer-Encoding: chunked``. This is due to a limitation with the WSGI spec and the implementation used by ``mod_wsgi``. It is recommended that all clients assume Keystone will not support ``Transfer-Encoding: chunked``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/id-manage.rst0000664000175000017500000000357500000000000022470 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Identity entity ID management for domain-specific backends ========================================================== Keystone supports the option of having domain-specific backends for the identity driver (i.e. for user and group storage), allowing, for example, a different LDAP server for each domain. To ensure that Keystone can determine to which backend it should route an API call, starting with Juno, the identity manager will, provided that :ref:`domain-specific backends ` are enabled, build on-the-fly a persistent mapping table between Keystone Public IDs that are presented to the API and the domain that holds the entity, along with whatever local ID is understood by the driver. This hides, for instance, the LDAP specifics of whatever ID is being used. To ensure backward compatibility, the default configuration of either a single SQL or LDAP backend for Identity will not use the mapping table, meaning that public facing IDs will be the unchanged. If keeping these IDs the same for the default LDAP backend is not required, then setting the configuration variable ``backward_compatible_ids`` to ``False`` will enable the mapping for the default LDAP driver, hence hiding the LDAP specifics of the IDs being used.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/index.rst0000664000175000017500000000222100000000000021740 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================= Contributor Documentation ========================= .. toctree:: :maxdepth: 1 contributing.rst how-can-i-help.rst set-up-keystone.rst http-api.rst proposing-features.rst release-notes.rst testing-keystone.rst doctor-checks.rst api_change_tutorial.rst auth-plugins.rst database-migrations.rst id-manage.rst architecture.rst services.rst developing-drivers.rst service-catalog.rst vision-reflection.rst programming-exercises.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/list-truncation.rst0000664000175000017500000000301100000000000023766 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================= Entity list truncation by drivers ================================= Keystone supports the ability for a deployment to restrict the number of entries returned from list operations, typically to prevent poorly formed searches (e.g. without sufficient filters) from becoming a performance issue. These limits are set in the configuration file, either for a specific driver or across all drivers. A global ``list_limit`` set in ``[DEFAULT]`` section of keystone is considered in case no limit is set for specific driver. These limits are read at the Manager level and passed into individual drivers as part of the Hints list object. A driver should try and honor any such limit if possible, but if it is unable to do so then it may ignore it (and the truncation of the returned list of entities will happen at the API level by ``wrap_collection`` method). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/programming-exercises.rst0000664000175000017500000001271700000000000025156 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================================================== Programming Exercises for Interns and New Contributors ====================================================== The keystone team participates in open source internship programs such as `Outreachy`_ and `Google Summer of Code`_ and welcomes contributions from students and developers of all skill levels. To help with formal applications for work programs or to give casual contributors a taste of what working on keystone is like, we've created a few exercises to showcase what we think are valuable development skills. These exercises are samples, and code produced to solve them should most likely not be merged into keystone. However, you should still propose them to `Gerrit`_ to get practice with the code review system and to get feedback from the team. This is a good way to get used to the development workflow and get acquainted with the benefits of working in a collaborative development environment. Also feel free to :doc:`talk to the keystone team <../getting-started/community>` to get help with these exercises, and refer to the :doc:`contributor documentation ` for more context on the architecture and contributing guidelines for keystone. The exercises provide some ideas of what you can do in keystone, but feel free to get creative. .. _Outreachy: https://www.outreachy.org/ .. _Google Summer of Code: https://summerofcode.withgoogle.com/ .. _Gerrit: https://docs.openstack.org/contributors/common/setup-gerrit.html Add a Parameter to an API ========================= Add a string parameter named ``nickname`` to the Project API. The end result will be that you can use the new parameter when you create a new project using the `POST /v3/projects`_ API, update the parameter using the `PATCH /v3/projects/{project_id}`_ API, and the value displayed using the `GET /v3/projects/{project_id}`_. Refer to the :doc:`API Change tutorial `. In short, you will need to follow these steps: #. Create a SQL migration to add the parameter to the database table (:py:mod:`keystone.common.sql.migrations.versions`) #. Add a SQL migration unit test (`keystone/tests/unit/test_sql_upgrade.py`) #. Add the parameter to the SQL model for projects (:py:mod:`keystone.resource.backends.sql`) #. Add unit tests (`keystone/tests/unit/resource/test_backend.py`) for the manager (:py:mod:`keystone.resource.core`) to show that the project can be created and updated with the new parameter using the provider mechanism #. Add the parameter to the API schema (:py:mod:`keystone.resource.schema`) #. Add an API unit test (`keystone/tests/unit/test_v3_resource.py`) #. Document the new parameter in the `api-ref`_ .. _POST /v3/projects: https://docs.openstack.org/api-ref/identity/v3/#create-project .. _PATCH /v3/projects/{project_id}: https://docs.openstack.org/api-ref/identity/v3/#update-project .. _GET /v3/projects/{project_id}: https://docs.openstack.org/api-ref/identity/v3/#show-project-details .. _api-ref: https://docs.openstack.org/api-ref/identity/ Write an External Driver ======================== Write an external driver named ``file`` that implements the Project API. The end result will be that you can set ``[resource]/driver = file`` in `keystone.conf` to have keystone load a list of project names from a text file, and querying keystone for projects will return projects with those names in the default domain. Refer to the :doc:`Developing Keystone Drivers ` tutorial. Your driver can start as an in-tree driver: create a class named ``Resource`` in `keystone/resource/backends/file.py` that implements :py:mod:`keystone.resource.backends.base.Resource`. Once you have that working, break it out into a separate repository and create a `Setuptools entrypoint`_ to allow you to register it with keystone. .. _Setuptools entrypoint: https://setuptools.readthedocs.io/en/latest/setuptools.html#dynamic-discovery-of-services-and-plugins Write an Auth Plugin ==================== Write an auth plugin named ``hacker`` that allows any existing user to authenticate if they provide a valid username and the password ``"hax0r"``. The end result will be that you can add ``hacker`` as an auth method in ``[auth]/methods`` in `keystone.conf`, and users will be able to get an :doc:`unscoped token <../admin/tokens>` using `POST /v3/auth/tokens`_ and providing ``"hacker"`` as the auth method, a valid username as the username, and ``"hax0r"`` as the password. Refer to the :doc:`auth-plugins` documentation. You should create a class ``Hacker`` in `keystone/auth/plugins/hacker.py` that implements :py:mod:`keystone.auth.plugins.base.AuthMethodHandler`. For bonus points, also add the plugin to `keystoneauth`_ so that Python clients can also use this auth method. .. _POST /v3/auth/tokens: https://docs.openstack.org/api-ref/identity/v3/#password-authentication-with-unscoped-authorization .. _keystoneauth: https://docs.openstack.org/keystoneauth/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/proposing-features.rst0000664000175000017500000001237100000000000024474 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _proposing_features: ================== Proposing Features ================== Requests for enhancements or new features must follow a process that requires using bug reports and specifications. We publish the contents of the `keystone-specs repository `_ at `specs.openstack.org `_. RFE Bug Reports =============== All code, documentation, and tests implementing a feature should be tracked. To do this, we use Launchpad bug reports. We use bug reports because the OpenStack review infrastructure has existing tooling that groups patches based on commit message syntax. When you propose a patch that is related to a bug or a feature, the OpenStack Infrastructure bot automatically links the patch as a comment in the bug report. Comments are also immutable, allowing us to track long-running initiatives without losing context. To create an RFE bug report, file a bug against the appropriate project. For example, if we were to create an RFE bug report for supporting a new Foobar API within keystone, we'd `open `_ that RFE against the keystone project. The title should start with "RFE: ", followed by a snippet of the feature or enhancement. For example, "RFE: Implement a Foobar API". The description should be short. Since we use specifications for details, we don't need to duplicate information in the body of the bug report. After you create the bug, you can tag it with the "rfe" tag, which helps people filter feature work from other bug reports. Finally, if your specification has already merged, be sure to include a link to it as a comment. If it hasn't, you can propose, or re-propose, your specification with ``Partial-Bug:`` followed by the bug number, at the bottom of your commit message. The OpenStack Infrastructure bot automatically updates the RFE bug report you just created with a link to the proposed specification. The specification template explains how to link to RFE bug reports, which should prompt you to open your RFE bug prior to proposing your specification. If your feature is broken up into multiple commits, make sure to include ``Partial-Bug`` in your commit messages. Additionally, use ``Closes-Bug`` in the last commit implementing the feature. This process ensures all patches written for a feature are tracked in the bug report, making it easier to audit. If you miss the opportunity to use the ``Closes-Bug`` tag and your feature work is complete, set the bug status to "Fix Committed". Specifications ============== We use specifications as a way to describe, in detail, the change that we're making and why. To write a specification, you can follow the template provided in the repository. To start writing a new specification, copy the template to the directory that fits the project and release you plan to target. For example, if you want to propose a feature to keystone for the Stein release, you should do the following: .. code-block:: bash $ cp specs/template.rst specs/keystone/stein/feature-foobar.rst Once you have a template in place, work through each section. Specifications should be descriptive and include use cases that justify the work. There are sections dedicated to the problem statement, the proposed solution, alternative solutions, security concerns, among other things. These sections are meant to prompt you to think about how your feature impacts users, operators, developers, related projects, and the existing code base. The template acts as a guide, so if you need to inject an ad-hoc section to describe additional details of your feature, don't hesitate to add one. Do not remove sections from the template that do not apply to your specification. Instead, simply explain why your proposed change doesn't have an impact on that aspect of the template. Propose your specification for review when you're ready for feedback: .. code-block:: bash $ git review The process for reviewing specifications is handled using Gerrit. We don't restrict the specification selection process to a particular group of individuals, which allows for open and collaborative feedback. We encourage everyone to be a part of the review process. Applying a code-review methodology to specifications allows different people to think through the problem you're trying to solve. Everyone wants to ensure the best design possible, given various resource constraints. This process takes time. Don't be discouraged if it takes longer than you anticipated for your specification to get feedback. A specification must have support (+2) from at least two keystone-spec core reviewers and it is typically approved (+Workflow) by the PTL, in order to be formally accepted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/release-notes.rst0000664000175000017500000001104500000000000023403 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========================== Working with Release Notes ========================== The Keystone team uses `reno `_ to generate release notes. These are important user-facing documents that must be included when a user or operator facing change is performed, like a bug-fix or a new feature. A release note should be included in the same patch the work is being performed. Release notes should be short, easy to read, and easy to maintain. They also `must` link back to any appropriate documentation if it exists. The following conventions help ensure all release notes achieve those goals. Most release notes either describe bug fixes or announce support for new features, both of which are tracked using Launchpad. The conventions below rely on links in Launchpad to provide readers with more context. .. warning:: We highly recommend taking careful thought when writing and reviewing release notes. Once a release note has been published with a formal release, updating it across releases will cause it to be published in a subsequent release. Reviews that update, or modify, a release note from a previous release outside of the branch it was added in should be rejected unless it's required for a very specific case. Please refer to reno's `documentation `_ for more information. Release Notes for Bugs ====================== When creating a release note that communicates a bug fix, use the bug number in the name of the release note: .. code-block:: bash $ reno new bug-1652012 Created new notes file in releasenotes/notes/bug-1652012-7c53b9702b10084d.yaml The body of the release note should clearly explain how the impact will affect users and operators. It should also include why the change was necessary but not be overspecific about implementation details, as that can be found in the commit and the bug report. It should contain a properly formatted link in reStructuredText that points back to the original bug report used to track the fix. This ensures the release note is kept short and to-the-point while providing readers with additional resources: .. code-block:: yaml --- fixes: - | [`bug 1652012 `_] Changes the token_model to return is_admin_project False if the attribute is not defined. Returning True for this has the potential to be dangerous and the given reason for keeping it True was strictly for backwards compatibility. Release Notes for Features ========================== Release notes detailing feature work follow the same basic format, since features are also tracked as bugs. .. code-block:: bash $ reno new bug-1652012 Created new notes file in releasenotes/notes/bug-1652012-7c53b9702b10084d.yaml Just like release notes communicating bug fixes, release notes detailing feature work must contain a link back to the RFE bug report. Readers should be able to easily discover all patches that implement the feature, as well as find links to the full specification and documentation. The release notes can be added to the last patch of the feature. All of this is typically found in the RFE bug report registered in Launchpad: .. code-block:: yaml --- features: - > [`bug 1652012 `_] Keystone now fully supports the usage of fizzbangs. In the rare case there is a release note that does not pertain to a bug or feature work, use a sensible slug and include any documentation relating to the note. We can iterate on the content and application of the release note during the review process. For more information on how and when to create release notes, see the `project-team-guide `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/service-catalog.rst0000664000175000017500000001653500000000000023716 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== Service Catalog Overview ======================== The OpenStack keystone service catalog allows API clients to dynamically discover and navigate to cloud services. The service catalog may differ from deployment-to-deployment, user-to-user, and project-to-project. The service catalog is the first hurdle that API consumers will need to understand after successfully authenticating with Keystone, making it a critical focal point for the overall user experience of OpenStack. *If you're integrating your OpenStack service with Keystone*, then please follow the guidelines provided below. *If you're writing an OpenStack client*, hopefully this helps you navigate the service catalog that you're being presented so that you can quickly move on to the business of consuming cloud services. An example service catalog ========================== The following is an example service catalog. It actually excludes several common attributes such as ``id``, which are of no concern to end users, ``region_id``, which are a bit out of scope for this topic, and ``enabled``, which is always ``true`` for end users. This service catalog contains just one service, "Keystone", which is accessible via a single endpoint URL: .. code-block:: json { "catalog": [ { "name": "Keystone", "type": "identity", "endpoints": [ { "interface": "public", "url": "https://identity.example.com:5000/" } ] } ] } The service catalog itself may appear in a token creation response (``POST /v3/auth/tokens``), a token validation response (``GET /v3/auth/tokens``), or as a standalone resource (``GET /v3/auth/catalog``). Services ======== The service catalog itself is composed of a list of services. Service entities represent web services in the OpenStack deployment. A service may have zero or more endpoints associated with it, although a service with zero endpoints is essentially useless in an OpenStack configuration. In addition to the related endpoints, there are two attributes of services that important to end users: * ``name`` (string): user-facing name of the service This attribute is not intended to be machine-parseable or otherwise meaningful beyond branding or name-recognition for end users. Logical values might include "Keystone" or maybe "Brand X Public Cloud Identity Service". Deployers should be free to rename, and therefore rebrand, a service at will. * ``type`` (string): describes the API implemented by the service. To support future projects, the value should not be validated against a list. An OpenStack-wide effort to standardize service types has been done outside of Keystone and is known as the `service-types authority`_. This should not convey the version of the API implemented by the service (as in Cinder's ``volumev2`` service type) because both the ``volume`` service and ``volumev2`` service provide "block storage as a service" which is what the service type is meant to convey. The underlying implementation is completely irrelevant here. In the general case, there should only be one service in a deployment per service type, although Keystone does not enforce this today. .. _service-types authority: https://service-types.openstack.org/ Endpoints ========= Each service should have one or more related endpoints. An endpoint is essentially a base URL for an API, along with some metadata about the endpoint itself and represents a set of URL endpoints for OpenStack web services. * ``interface`` (string): describes the visibility of the endpoint according to one of three values (``public``, ``internal``, and ``admin``) ``public`` endpoints are intended for consumption by end users or other service users, generally on a publicly available network interface. ``internal`` endpoints are intended for consumption by end users, generally on an unmetered internal network interface. ``admin`` endpoints are intended only for consumption by those needing administrative access to the service, generally on a secure network interface. You might also think of each interface value as the result of a matrix of use cases: * **Public API** on a **public network**: use a ``public`` interface. * **Public API** on an **internal network**: use an ``internal`` interface. * **Privileged API** on a **public network**: unsupported! Use access controls on your ``public`` endpoint instead. * **Privileged API** on an **internal network**: ``admin`` interface, but use access controls on your ``public`` endpoint instead. The notion of a "privileged API" endpoint makes security-conscious developers instantly lazy (security becomes someone else's problem), and is an obvious attack vector if someone were to infiltrate your internal network. It also adds more complexity to your API architecture which makes documentation, testing, and API evolution that much more difficult. * ``url`` (string): fully qualified URL of the service endpoint This should be unversioned base URL for an API. Good examples include ``https://identity.example.com:5000/`` and ``https://keystone.example.com/``. Conversely, ``https://identity.example.com:5000/v3/`` is an unfortunate example because it directs all clients to connect to a versioned endpoint, regardless of which API versions they understand. This makes it hard for services to do any sort of API versioning, and for clients to dynamically discover additional available versions. For a period of time, keystone was stuck in a position where it implements a ``/v3/`` API, but for backwards compatibility with existing v2 clients, was forced to continue advertising the ``/v2.0/`` endpoint in the service catalog until it was reasonable to assume that all clients in the ecosystem are capable of handling an unversioned URL. As a side effect, this has had a tremendous impact on the awareness of, and thus adoption of, Keystone's Identity API v3 (which has been enabled by default — and stable — since the 2013.1 Grizzly release). Don't put your project in that position! Similarly, ``https://object-store.example.com/v1/KEY_\$(project_id)s`` (which would ultimately be rendered to clients as a project-specific URL, such as ``https://object-store.example.com/v1/KEY_d12af07f4e2c4390a21acc31517ebec9``) is an unfortunate example because not only does it hardcode an API version as in the above example, but it also exposes the client's project ID directly to the client. Instead, the operational scope or a request can be determined by inspecting the user's token or consuming values populated by ``keystonemiddleware.auth_token``. It's also far less cacheable than a URL that is neither project nor user specific, which is important given that every client needs access to consume the service catalog prior to nearly every API request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/services.rst0000664000175000017500000007171000000000000022465 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =========================== Keystone for Other Services =========================== This document provides a summary of some things that other services need to know about how keystone works, and specifically about how they can take advantage of the v3 API. The v3 API was introduced as a stable API in the Grizzly release. Glossary ======== Authentication The process of determining if a user is who they claim to be (authN). Authorization The process of determining if a user can do what they are requesting (authZ). Scope A specific operating context. This is commonly used when describing the authorization a user may have. For example, a user with a role assignment on a project can get a token scoped to that project, ultimately operating within that project's scope. System An assignment target that refers to a collection of API services as a whole. Users and groups can be granted authorization on the *deployment system*. Service OpenStack services like identity, compute, image, etc. Domain A container for users, projects, and groups. A domain is also an assignment target for users and groups. It's possible for users and groups to have authorization on domains outside of the domain associated to their reference. Project A container and a namespace for resources isolated within OpenStack. A user, or group of users, must have a role assignment on a project in order to interact with it. Token A self-service resource that proves a user's identity and authentication. It can optionally carry a user's authorization, allowing them to interact with OpenStack services. Role A string that represents one or more permissions or capabilities. Role Assignment An association between an actor and a target that results in authorization. Actors can be users or groups of users. Targets can be projects, domains, or the deployment system itself. User A entity modeling an end-user of the system. Group A container for users. Users indirectly inherit any authorization the group has on projects, domains, or the system. Domains ======= A major new feature in v3 is domains. Every project, user, and user group is owned by a domain (reflected by their ``domain_id`` value) which provides them their own namespace. For example, unlike in v2.0, usernames are no longer unique across the deployment. You can have two users with the same name, but they must be in different domains. However, user IDs are assigned to users by keystone and are expected to be unique across the deployment. All of this logic applies to projects, user groups and roles. One of the great things about domains is that you can have one domain backed by SQL (for service users) and another backed by LDAP (the cloud is deployed into existing infrastructure). The "default" domain ==================== .. note:: The v2.0 API has been removed as of the Queens release. While this section references the v2.0 API, it is purely for historical reasons that clarify the existence of the *default* domain. Domains were introduced as a v3-only feature. As a result, the v2.0 API didn't understand the concept of domains. To allow for both versions of the Identity API to run side-by-side, the idea of a *default* domain was established. The *default* domain was a domain that was guaranteed to exist and was created during the ``keystone-manage db_sync`` process. By default, the domain ID is ``default`` and the name is ``Default``, but it is possible to change these values through keystone's configuration file. The v2.0 API would consider users and projects existing within that domain as valid, but it would never expose domain information through the API. This allowed the v2.0 API to operate under the assumption that everything within the *default* domain was accessible. This was crucial in avoiding namespace conflicts between v2.0 and v3 where multiple domains existed. Using v3 allowed deployers the ability to experiment with domains, while isolating them from the v2.0 API. As far as the v3 API is concerned, the *default* domain is simply a domain and doesn't carry any special connotation like it did with v2.0. Authorization Scopes ==================== End users use the Identity API as a way to express their authoritative power to other OpenStack services. This is done using tokens, which can be scoped to one of several targets depending on the users' role assignments. This is typically referred to as a token's *scope*. This happens when a user presents credentials, in some form or fashion, to keystone in addition to a desired scope. If keystone can prove the user is who they say they are (authN), it will then validate that the user has access to the scope they are requesting (authZ). If successful, the token response will contain a token ID and data about the transaction, such as the scope target and role assignments. Users can use this token ID in requests to other OpenStack services, which consume the authorization information associated to that token to make decisions about what that user can or cannot do within that service. This section describes the various scopes available, and what they mean for services consuming tokens. System Scope ------------ A *system-scoped* token implies the user has authorization to act on the *deployment system*. These tokens are useful for interacting with resources that affect the deployment as a whole, or exposes resources that may otherwise violate project or domain isolation. Good examples of system-scoped resources include: * Services: Service entities within keystone that describe the services deployed in a cloud. * Endpoints: Endpoints that tell users where to find services deployed in a cloud. * Hypervisors: Physical compute infrastructure that hosts instances where the instances may, or may not, be owned by the same project. Domain Scope ------------ A *domain-scoped* token carries a user's authorization on a specific domain. Ideally, these tokens would be useful for listing resources aggregated across all projects with that domain. They can also be useful for creating entities that must belong to a domain. Users and groups are good examples of this. The following is an example of how a domain-scoped token could be used against a service. Assume a domain exists called `Foo`, and it contains projects called `bar` and `baz`. Let's also assume both projects contain instances running a workload. If Alice is a domain administrator for `Foo`, she should be able to pass her domain-scoped token to nova and ask for a list of instances. If nova supports domain-scoped tokens, the response would contain all instances in projects `bar` and `baz`. Another example of using a domain-scoped token would be if Alice wanted to create a new project in domain `Foo`. When Alice sends a request to create a new project (`POST /v3/projects`), keystone should ensure the new project is created within the `Foo` domain, since that's the authorization associated to Alice's token. .. WARNING:: This behavior isn't completely implemented, and is still in progress. This example describes the ideal behavior, specifically for developers looking to implement scope into their APIs. Project Scope ------------- A *project-scoped* token carries the role assignments a user has on a project. This type of scope is great for managing resources that fit nicely within project boundaries. Good examples of project-level resources that can be managed with project-scoped tokens are: * Instances: Virtual compute servers that require a project association in order to be created. * Volumes: Storage devices that can be attached to instances. Unscoped -------- An *unscoped* token is a token that proves authentication, but doesn't carry any authorization. Users can obtain unscoped tokens by simply proving their identity with credentials. Unscoped tokens can be exchanged for any of the various scoped tokens if a user has authorization on the requested scope. An example of where unscoped tokens are specifically useful is when users perform federated authentication. First, a user will receive an unscoped token pending successful federated authentication, which they can use to query keystone for a list of projects they're allowed to access. Then they can exchange their unscoped token for a project-scoped token allowing them to perform actions within a particular project. Why are authorization scopes important? ======================================= Flexibility for exposing your work ---------------------------------- OpenStack provides a rich set of APIs and functionality. We wrote some APIs with the intent of managing the deployment hardware, otherwise referred to as the deployment system. We wrote others to orchestrate resources in a project or a domain. Some APIs even operate on multiple levels. Since we use tokens to authorize a user's actions against a given service, they needed to handle different scope targets. For example, when a user asks for a new instance, we expect that instance to belong to a project; thus we expect a project relayed through the token's scope. This idea is fundamental in providing isolation, or tenancy, between projects in OpenStack. Initially, keystone only supported the ability to generate project-scoped tokens as a product of a user having a role assignment on a project. Consequently, services had no other choice but to require project-scoped tokens to protect almost all of their APIs, even if that wasn't an ideal option. Using project-scoped tokens to protect APIs they weren't designed to protect required operators to write custom policy checks to secure those APIs. An example showcases this more clearly. Let's assume an operator wanted to create a read-only role. Users with the `reader` role would be able to list things owned by the project, like instances, volumes, or snapshots. The operator also wants to have a read-only role for fellow operators or auditors, allowing them to view hypervisor information or endpoints and services. Reusing the existing `reader` role is difficult because users with that role on a project shouldn't see data about hypervisors, which would violate tenancy. Operators could create a new role called `operator` or `system-reader`, but then those users would still need to have that role assigned on a project to access deployment-level APIs. The concept of getting project-scoped tokens to access deployment-level resources makes no sense for abstractions like hypervisors that cannot belong to a single project. Furthermore, this requires deployers to maintain all of this in policy files. You can quickly see how only using project-scope limits our ability to protect APIs without convoluted or expensive-to-maintain solutions. Each scope offered by keystone helps operators and users avoid these problems by giving you, the developer, multiple options for protecting APIs you write, instead of the one-size-fits-all approach we outgrew. You no longer have to hope an operator configures policy correctly so their users can consume the feature you wrote. The more options you have for protecting an API, the easier it is to provide default policies that expose more of your work to users safely. Less custom code ---------------- Another crucial benefit of authorization scopes offered by keystone is less custom code. For example, if you were writing an API to manage a deployment-level resource but only allowed to consume project-scoped tokens, how would you determine an operator from an end user? Would you attempt to standardize a role name? Would you look for a unique project in the token's scope? Would these checks be configurable in policy or hardcoded in your service? Chances are, different services will come up with different, inconsistent solution for the same problem. These inconsistencies make it harder for developers to context switch between services that process things differently. Users also suffer from inconsistencies by having to maintain a mental mapping of different behavior between services. Having different scopes at your disposal, through keystone tokens, lets you build on a standard solution that other projects also consume, reducing the likelihood of accidentally developing inconsistencies between services. This commonality also gives us a similar set of terms we can use when we communicate with each other and users, allowing us to know what someone means by a `system-admin` and how that is different from a `project-admin`. Reusable default roles ---------------------- When OpenStack services originally started developing a policy enforcement engine to protect APIs, the only real concrete role we assumed to be present in the deployment was a role called `admin`. Because we assumed this, we were able to write policies with `admin` as the default. Keystone also took steps to ensure it had a role with that name during installation. While making this assumption is beneficial for some APIs, having only one option is underwhelming and leaves many common policy use cases for operators to implement through policy overrides. For example, a typical ask from operators is to have a read-only role, that only allows users with that role on a target to view its contents, restricting them from making writable changes. Another example is a membership role that isn't the administrator. To put it clearly, a user with a `member` role assignment on a project may create new storage volumes, but they're unable to perform backups. Users with the `admin` role on a project can access the backups functionality. Keep in mind, the examples above are only meant to describe the need for other roles besides `admin` in a deployment. Service developers should be able to reuse these definitions for similar APIs and assume those roles exist. As a result, keystone implemented support for ensuring the `admin`, `member`, and `reader` roles are present during the installation process, specifically when running ``keystone-manage bootstrap``. Additionally, keystone creates a relationship among these roles that make them easier for service developers to use. During creation, keystone implies that the `admin` role is a superset of the `member` role, and the `member` role is a superset of the `reader` role. The benefit may not be obvious, but what this means is that users with the `admin` role on a target also have the `member` and `reader` roles generated in their token. Similarly, users with the `member` role also have the `reader` role relayed in their token, even though they don't have a direct role assignment using the `reader` role. This subtle relationship allows developers to use a short-hand notation for writing policies. The following assumes ``foobar`` is a project-level resource available over a service API and is protected by policies using generic roles: .. code-block:: yaml "service:foobar:get": "role:admin OR role:member OR role:reader" "service:foobar:list": "role:admin OR role:member OR role:reader" "service:foobar:create": "role:admin OR role:member" "service:foobar:update": "role:admin OR role:member" "service:foobar:delete": "role:admin" The following policies are functionally equivalent to the policies above, but rely on the implied relationship between the three roles, resulting in a simplified check string expression: .. code-block:: yaml "service:foobar:get": "role:reader" "service:foobar:list": "role:reader" "service:foobar:create": "role:member" "service:foobar:update": "role:member" "service:foobar:delete": "role:admin" In addition to above roles, from 2023.2 (Bobcat) release ``keystone-manage bootstrap`` will provide `service` role as well. If a ``service`` role is already present in the deployment, then a new one is not created. This way any local scripts relying on the role ID will not be broken. .. note:: If you already have a ``service`` role in your deployment, you should review its usage to make sure it is used only for service-to-service communication. Once ``service`` role is created, OpenStack service developers can start integrating it into their default policies as expressed: .. code-block:: python policy.DocumentedRuleDefault( name='os_compute_api:os-server-external-events:create', check_str='role:service', scope_types=['project'] ) It is important to note that we need to keep all the service-to-service APIs default to ``service`` role only. For example, a policy that requires ``service`` can be expressed as: .. code-block:: yaml "service:foobar:create": "role:service" There might be exception service-to-service APIs which project think are useful to be used by admin or non-admin user then they can take the exceptional decision to default them to user role and ``service`` role. For example, a policy that requires ``service`` and ``admin`` can be expressed as: .. code-block:: yaml "service:foobar:create": "role:service" OR "role:admin" Additionally, any deployment tools that create service accounts for OpenStack services, should start preparing for these policy changes by updating their role assignments and performing the deployment language equivalent of the following: .. code-block:: console $ openstack role add --user nova --project service service $ openstack role add --user cinder --project service service $ openstack role add --user neutron --project service service $ openstack role add --user glance --project service service $ openstack role add --user manila --project service service How do I incorporate authorization scopes into a service? ========================================================= Now that you understand the advantages of a shared approach to policy enforcement, the following section details the order of operations you can use to implement it in your service. Ruthless Testing ---------------- Policy enforcement implementations vary greatly across OpenStack services. Some enforce authorization near the top of the API while others push the logic deeper into the service. Differences and intricacies between services make testing imperative to adopt a uniform, consistent approach. Positive and negative protection testing helps us assert users with specific roles can, or cannot, access APIs. A protection test is similar to an API, or functional test, but purely focused on the authoritative outcome. In other words, protection testing is sufficient when we can assert that a user is or isn't allowed to do or see something. For example, a user with a role assignment on project `foo` shouldn't be able to list volumes in project `bar`. A user with a role on a project shouldn't be able to modify entries in the service catalog. Users with a `reader` role on the system, a domain, or a project shouldn't be able to make writable changes. You commonly see protection tests conclude with an assertion checking for a successful response code or an HTTP 403 Forbidden. If your service has minimal or non-existent protection coverage, you should start by introducing tests that exercise the current default policies, whatever those are. This step serves three significant benefits. First, it puts us in the shoes of our users from an authorization perspective, allowing us to see the surface of the API a user has access to with a given assignment. This information helps audit the API to make sure the user has all the authorization to do what they need_, but nothing more. We should note inconsistencies here as feedback that we should fix, especially since operators are probably attempting to fix these inconsistencies through customized policy today. Second, a collection of protection tests make sure we don't have unwanted security-related regressions. Imagine making a policy change that introduced a regression and allowed a user to access an API and data they aren't supposed to see. Conversely, imagine a patch that accidentally tightened restriction on an API that resulted in a broken workflow for users. Testing makes sure we catch cases like this early and handle them accordingly. Finally, protection tests help us use test-driven development to evolve policy enforcement. We can make a change and assert the behavior using tests locally, allowing us to be proactive and not reactive in our authoritative business logic. To get started, refer to the `oslo.policy documentation`_ that describes techniques for writing useful protection tests. This document also describes some historical context you might recognize in your service and how you should deal with it. You can also look at protection tests examples in other services, like keystone_ or cinder_. Note that these examples test the three default roles provided from keystone (reader, member, and admin) against the three scopes keystone offers, allowing for nine different personas without operators creating roles specific to their deployment. We recommend testing these personas where applicable in your service: * project reader * project member * project admin * system reader * system member * system admin * domain reader * domain member * domain admin .. _need: https://en.wikipedia.org/wiki/Principle_of_least_privilege .. _oslo.policy documentation: https://docs.openstack.org/oslo.policy/latest/user/usage.html#testing-default-policies .. _keystone: https://opendev.org/openstack/keystone/src/commit/77e50e49c5af37780b8b4cfe8721ba28e8a58183/keystone/tests/unit/protection/v3 .. _cinder: https://review.opendev.org/#/c/602489/ Auditing the API ---------------- After going through the API and adding protection tests, you should have a good idea of how each API is or isn't exposed to end users with different role assignments. You might also have a list of areas where policies could be improved. For example, maybe you noticed an API in your service that consumes project-scoped tokens to protect a system-level resource. If your service has a bug tracker, you can use it to document these gaps. The keystone team went through this exercise and used bugs_. Feel free to use these bug reports as a template for describing gaps in policy enforcement. For example, if your service has APIs for listing or getting resources, you could implement the reader role on that API. .. _bugs: http://tinyurl.com/y5kj6fn9 Setting scope types ------------------- With testing in place and gaps documented, you can start refactoring. The first step is to start using oslo.policy for scope checking, which reduces complexity in your service by having a library do some lifting for you. For example, if you have an API that requires a project-scoped token, you can set the scope of the policy protecting that API accordingly. If an instance of ``RuleDefault`` has scope associated to it, oslo.policy checks that it matches the scope of the token used to make the request. This behavior is configurable_, allowing operators to turn it on once all policies have a scope type and once operators have audited their assignments and educated their users on how to get the scope necessary to access an API. Once that happens, an operator can configure oslo.policy to reject requests made with the wrong scope. Otherwise, oslo.policy logs a warning for operators that describes the mismatched scope. The oslo.policy library provides `documentation for setting scope`_. You can also see `keystone examples`_ or `placement examples`_ of setting scope types on policies. If you have difficulty deciding which scope an API or resource requires, try thinking about the intended user. Are they an operator managing the deployment? Then you might choose `system`. Are they an end user meant to operate only within a given project? Then `project` scope is likely what you need. Scopes aren't mutually exclusive. You may have APIs that require more than one scope. Keystone's user and project APIs are good examples of resources that need different scopes. For example, a system administrator should be able to list all users in the system, but domain administrators should only be able to list users within their domain. If you have an API that falls into this category, you may be required to implicitly filter responses based on the scope type. If your service uses oslo.context and keystonemiddleware, you can query a `RequestContext` object about the token's scope. There are keystone patches_ that show how to filter responses according to scope using oslo.context, in case you need inspiration. If you still can't seem to find a solution, don't hesitate to send a note to the `OpenStack Discuss mailing list`_ tagged with `[keystone]` or ask in #openstack-keystone on IRC_. .. _configurable: https://docs.openstack.org/oslo.policy/latest/configuration/index.html#oslo_policy.enforce_scope .. _documentation for setting scope: https://docs.openstack.org/oslo.policy/latest/user/usage.html#setting-scope .. _keystone examples: https://review.opendev.org/#/q/status:merged+project:openstack/keystone+branch:master+topic:add-scope-types .. _placement examples: https://review.opendev.org/#/c/571201/ .. _patches: https://review.opendev.org/#/c/623319/ .. _OpenStack Discuss mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-discuss .. _IRC: https://wiki.openstack.org/wiki/IRC Rewriting check string ---------------------- With oslo.policy able to check scope, you can start refactoring check strings where-ever necessary. For example, adding support for default roles or removing hard-coded ``is_admin: True`` checks. Remember that oslo.policy provides deprecation tooling that makes upgrades easier for operators. Specifically, upgrades are made easier by combining old defaults or overrides with the new defaults using a logical `OR`. We encourage you to use the available deprecation tooling when you change policy names or check strings. You can refer to examples_ that show you how to build descriptive rule objects using all the default roles from keystone and consuming scopes. .. _examples: https://review.opendev.org/#/q/(status:open+OR+status:merged)+project:openstack/keystone+branch:master+topic:implement-default-roles Communication ------------- Communicating early and often is never a bad thing, especially when a change is going to impact operators. At this point, it's crucial to emphasize the changes you've made to policy enforcement in your service. Release notes are an excellent way to signal changes to operators. You can find examples when keystone implemented support for default roles. Additionally, you might have operators or users ask questions about the various scopes or what they mean. Don't hesitate to refer them to keystone's :ref:`scope documentation `. Auth Token middleware ===================== The ``auth_token`` middleware handles token validation for the different services. Conceptually, what happens is that ``auth_token`` pulls the token out of the ``X-Auth-Token`` request header, validates the token using keystone, produces information about the identity (the API user) and authorization context (the project, roles, etc) of the token, and sets environment variables with that data. The services typically take the environment variables, put them in the service's "context", and use the context for policy enforcement via ``oslo.policy``. Service tokens -------------- Service tokens are a feature where the ``auth_token`` middleware will also accept a service token in the ``X-Service-Token`` header. It does the same thing with the service token as the user token, but the results of the token are passed separately in environment variables for the service token (the service user, project, and roles). If the service knows about these then it can put this info in its "context" and use it for policy checks. For example, assuming there's a special policy rule called ``service_role`` that works like the ``role`` rule except checks the service roles, you could have an ``oslo.policy`` rule like ``service_role:service and user_id:%(user_id)s`` such that a service token is required along with the user owning the object. Picking the version =================== Use version discovery to figure out what version the identity server supports rather than configuring the version. This will make it easier to adopt new API versions as they are implemented. For information about how to accomplish service discovery with the keystoneauth library, please see the `documentation `_. Hierarchical Multitenancy ========================= This feature is specific to v3 and allows projects to have parents, siblings, and children relationships with other projects. Tokens scoped to projects in a hierarchical structure won't contain information about the hierarchy in the token response. If the service needs to know the hierarchy it should use the v3 API to fetch the hierarchy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/set-up-keystone.rst0000664000175000017500000001667500000000000023727 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _dev-environment: =================== Setting up Keystone =================== Prerequisites ============= This document assumes you are using an Ubuntu, Fedora, or openSUSE platform and that you have the following tools pre-installed on your system: - python_ 3.6, as the programming language; - git_, as the version control tool; .. NOTE:: Keystone dropped the support of python_ 2.7 in the Ussuri release of Openstack. **Reminder**: If you are successfully using a different platform, or a different version of the above, please document your configuration here! .. _git: http://git-scm.com/ Installing from source ====================== The source install instructions specifically avoid using platform specific packages. Instead, we recommend using the source for the code and the Python Package Index (PyPi_) for development environment installations.. .. _PyPi: https://pypi.org/project/pypi It's expected that your system already has python_, pip_, and git_ available. .. _python: http://www.python.org .. _pip: http://www.pip-installer.org/en/latest/installing.html .. _git: http://git-scm.com/ Clone the keystone repository: .. code-block:: bash $ git clone https://opendev.org/openstack/keystone.git $ cd keystone Install the keystone web service: .. code-block:: bash $ pip install -e . .. NOTE:: This step is guaranteed to fail if you do not have the proper binary dependencies already installed on your development system. Maintaining a list of platform-specific dependencies is outside the scope of this documentation, but is within scope of DEVSTACK_. .. _DEVSTACK: https://docs.openstack.org/devstack/latest Development environment ======================= For setting up the Python development environment and running `tox` testing environments, please refer to the `Project Team Guide: Python Project Guide`_, the OpenStack guide on wide standard practices around the use of Python. That documentation will help you configure your development environment and run keystone tests using `tox`, which uses virtualenv_ to isolate the Python environment. After running it, notice the existence of a `.tox` directory. .. _`Project Team Guide: Python Project Guide`: https://docs.openstack.org/project-team-guide/project-setup/python.html .. _virtualenv: http://www.virtualenv.org/ Deploying configuration files ============================= You should be able to run keystone after installing via pip. Additional configuration files are required. The following file is required in order to run keystone: * ``keystone.conf`` Configuring Keystone with a sample file --------------------------------------- Keystone requires a configuration file. Keystone's sample configuration file ``etc/keystone.conf.sample`` is automatically generated based upon all of the options available within Keystone. These options are sourced from the many files around Keystone as well as some external libraries. The sample configuration file will be updated as the end of the development cycle approaches. Developers should *NOT* generate the config file and propose it as part of their patches, this will cause unnecessary conflicts. You can generate one locally using the following command: .. code-block:: bash $ tox -e genconfig The tox command will place an updated sample config in ``etc/keystone.conf.sample``. The defaults are enough to get you going, but you can make any changes if needed. If there is a new external library (e.g. ``oslo.messaging``) that utilizes the ``oslo.config`` package for configuration, it can be added to the list of libraries found in ``config-generator/keystone.conf``. You can also generate sample policy files using ``tox -e genpolicy``. Please refer to :doc:`../configuration/config-options` for guidance on specific configuration options or to view a sample paste file. Bootstrapping a test deployment =============================== You can use the ``keystone-manage bootstrap`` command to pre-populate the database with necessary data. Verifying keystone is set up ============================ Once set up, you should be able to invoke Python and import the libraries: .. code-block:: bash $ .tox/py36/bin/python -c "import keystone" If you can import keystone without a traceback, you should be ready to move on to the next sections. You can run keystone using a host of wsgi implementations or web servers. The following uses ``uwsgi``: .. code-block:: bash $ uwsgi --http 127.0.0.1:5000 --wsgi-file $(which keystone-wsgi-public) This runs Keystone with the configuration the etc/ directory of the project. See :doc:`../configuration/config-options` for details on how Keystone is configured. By default, Keystone is configured with SQL backends. Database setup ============== The script ``tools/test-setup.sh`` sets up databases as used by the unit tests. Initializing Keystone ===================== Before using keystone, it is necessary to create the database tables and ensures the database schemas are up to date, perform the following: .. code-block:: bash $ keystone-manage db_sync If the above commands result in a ``KeyError``, or they fail on a ``.pyc`` file with the message, ``You can only have one Python script per version``, then it is possible that there are out-of-date compiled Python bytecode files in the Keystone directory tree that are causing problems. This can occur if you have previously installed and ran older versions of Keystone. These out-of-date files can be easily removed by running a command like the following from the Keystone root project directory: .. code-block:: bash $ find . -name "*.pyc" -delete Initial Sample Data ------------------- There is an included script which is helpful in setting up some initial sample data for use with keystone: .. code-block:: bash $ ADMIN_PASSWORD=s3cr3t tools/sample_data.sh Once run, you can see the sample data that has been created by using the `python-openstackclient`_ command-line interface: .. code-block:: bash $ export OS_USERNAME=admin $ export OS_PASSWORD=s3cr3t $ export OS_PROJECT_NAME=admin $ export OS_USER_DOMAIN_ID=default $ export OS_PROJECT_DOMAIN_ID=default $ export OS_IDENTITY_API_VERSION=3 $ export OS_AUTH_URL=http://localhost:5000/v3 $ openstack user list The `python-openstackclient`_ can be installed using the following: .. code-block:: bash $ pip install python-openstackclient Interacting with Keystone ========================= You can also interact with keystone through its REST API. There is a Python keystone client library `python-keystoneclient`_ which interacts exclusively through the REST API, and a command-line interface `python-openstackclient`_ command-line interface. .. _`python-keystoneclient`: https://opendev.org/openstack/python-keystoneclient .. _`python-openstackclient`: https://opendev.org/openstack/python-openstackclient ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/testing-keystone.rst0000664000175000017500000003514600000000000024161 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ Testing Keystone ================ Running Tests ------------- Before running tests, you should have ``tox`` installed and available in your environment (in addition to the other external dependencies in :ref:`dev-environment`): .. code-block:: bash $ pip install tox .. NOTE:: You may need to perform both the above operation and the next inside a python virtualenv, or prefix the above command with ``sudo``, depending on your preference. To execute the full suite of tests maintained within keystone, simply run: .. code-block:: bash $ tox This iterates over multiple configuration variations, and uses external projects to do light integration testing to verify the Identity API against other projects. .. NOTE:: The first time you run ``tox``, it will take additional time to build virtualenvs. You can later use the ``-r`` option with ``tox`` to rebuild your virtualenv in a similar manner. To run tests for one or more specific test environments (for example, the most common configuration of Python 3.6 and PEP-8), list the environments with the ``-e`` option, separated by spaces: .. code-block:: bash $ tox -e py36,pep8 .. NOTE:: Keystone dropped the support of python_ 2.7 in the Ussuri release of Openstack. Use ``tox --listenvs`` to list all testing environments specified in keystone's ``tox.ini`` file. .. _python: http://www.python.org Interactive debugging ~~~~~~~~~~~~~~~~~~~~~ Using ``pdb`` breakpoints with ``tox`` and ``testr`` normally doesn't work since the tests just fail with a ``BdbQuit`` exception rather than stopping at the breakpoint. To capture breakpoints while running tests, use the ``debug`` environment. The following example uses the environment while invoking a specific test run. .. code-block:: bash $ tox -e debug keystone.tests.unit.test_module.TestClass.test_case For reference, the ``debug`` environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests Building the Documentation -------------------------- The ``docs`` and ``api-ref`` environments will automatically generate documentation and the API reference respectively. The results are written to ``doc/`` and ``api-ref/``. For example, use the following command to render all documentation and manual pages: .. code-block:: bash $ tox -e docs Tests Structure --------------- Not all of the tests in the ``keystone/tests/unit`` directory are strictly unit tests. Keystone intentionally includes tests that run the service locally and drives the entire configuration to achieve basic functional testing. For the functional tests, an in-memory key-value store or in-memory SQLite database is used to keep the tests fast. Within the tests directory, the general structure of the backend tests is a basic set of tests represented under a test class, and then subclasses of those tests under other classes with different configurations to drive different backends through the APIs. To add tests covering all drivers, update the base test class in ``test_backend.py``. .. NOTE:: The structure of backend testing is in transition, migrating from having all classes in a single file (``test_backend.py``) to one where there is a directory structure to reduce the size of the test files. See: - :mod:`keystone.tests.unit.backend.role` - :mod:`keystone.tests.unit.backend.domain_config` To add new drivers, subclass the base class at ``test_backend.py`` (look at ``test_backend_sql.py`` for examples) and update the configuration of the test class in ``setUp()``. For example, ``test_backend.py`` has a sequence of tests under the class :class:`keystone.tests.unit.test_backend.IdentityTests` that will work with the default drivers. The ``test_backend_sql.py`` module subclasses those tests, changing the configuration by overriding with configuration files stored in the ``tests/unit/config_files`` directory aimed at enabling the SQL backend for the Identity module. Testing Schema Migrations ------------------------- Tests for database migrations can be found in ``keystone/tests/unit/test_sql_upgrade.py`` and ``keystone/tests/unit/test_sql_banned_operations.py``. LDAP Tests ---------- LDAP has a fake backend that performs rudimentary operations. If you are building more significant LDAP functionality, you should test against a live LDAP server. Devstack has an option to set up a directory server for Keystone to use. Add ldap to the ``ENABLED_SERVICES`` environment variable, and set environment variables ``KEYSTONE_IDENTITY_BACKEND=ldap`` and ``KEYSTONE_CLEAR_LDAP=yes`` in your ``localrc`` file. The unit tests can be run against a live server with ``keystone/tests/unit/test_ldap_livetest.py`` and ``keystone/tests/unit/test_ldap_pool_livetest.py``. The default password is ``test`` but if you have installed devstack with a different LDAP password, modify the file ``keystone/tests/unit/config_files/backend_liveldap.conf`` and ``keystone/tests/unit/config_files/backend_pool_liveldap.conf`` to reflect your password. .. NOTE:: To run the live tests you need to set the environment variable ``ENABLE_LDAP_LIVE_TEST`` to a non-negative value. "Work in progress" Tests ------------------------ Work in progress (WIP) tests are very useful in a variety of situations including: * While doing test-driven-development they can be used to add tests to a review while they are not yet working and will not cause test failures. They can be removed when the functionality is fixed in a later patch set. * A common practice is to recreate bugs by exposing the broken behavior in a functional or unit test. To encapsulate the correct behavior in the test, the test will usually assert the correct outcome, which will break without a fix. Marking the test as WIP gives us the ability to capture the broken behavior in code if a fix isn't ready yet. The :func:`keystone.tests.unit.utils.wip` decorator can be used to mark a test as WIP. A WIP test will always be run. If the test fails then a SkipTest exception is raised because we expect the test to fail. We do not pass the test in this case so that it doesn't count toward the number of successfully run tests. If the test passes an AssertionError exception is raised so that the developer knows they made the test pass. This is a reminder to remove the decorator. The :func:`keystone.tests.unit.utils.wip` decorator requires that the author provides a message. This message is important because it will tell other developers why this test is marked as a work in progress. Reviewers will require that these messages are descriptive and accurate. .. NOTE:: The :func:`keystone.tests.unit.utils.wip` decorator is not a replacement for skipping tests. .. code-block:: python @wip('waiting on bug #000000') def test(): pass .. NOTE:: Another strategy is to not use the wip decorator and instead show how the code currently incorrectly works. Which strategy is chosen is up to the developer. API & Scenario Tests -------------------- Keystone provides API and scenario tests via a `tempest plugin`_ which is located in a separate `repository`_. This tempest plugin is mainly intended for specific scenarios that require a special deployment, such as the tests for the ``Federated Identity`` feature or live testing against LDAP. For the deployment of these scenarios, keystone also provides a `devstack plugin`_. For example, to setup a working federated environment, add the following lines in your `devstack` `local.conf`` file: .. code-block:: bash [[local|localrc]] enable_plugin keystone https://opendev.org/openstack/keystone enable_service keystone-saml2-federation Clone and install keystone-tempest-plugin. .. code-block:: bash git clone https://opendev.org/openstack/keystone-tempest-plugin sudo pip install ./keystone-tempest-plugin Finally, to run keystone's API and scenario tests, deploy `tempest`_ with `devstack`_ (using the configuration above) and then run the following command from the tempest directory: .. code-block:: bash tox -e all -- keystone_tempest_plugin .. NOTE:: Most of keystone's API tests are implemented in `tempest`_ and it is usually the correct place to add new tests. .. _devstack: https://opendev.org/openstack/devstack .. _devstack plugin: https://docs.openstack.org/devstack/latest/plugins.html .. _tempest: https://opendev.org/openstack/tempest .. _tempest plugin: https://docs.openstack.org/tempest/latest/plugin.html .. _repository: http://opendev.org/openstack/keystone-tempest-plugin Writing new API & Scenario Tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When writing tests for the keystone tempest plugin, we should follow the official tempest guidelines, details about the guidelines can be found at the `tempest coding guide`_. There are also specific guides for the API and scenario tests: `Tempest Field Guide to API tests`_ and `Tempest Field Guide to Scenario tests`_. The keystone tempest plugin also provides a base class. For most cases, the tests should inherit from it: :class:`keystone_tempest_plugin.tests.base.BaseIdentityTest`. This class already setups the identity API version and is the container of all API services clients. New API services clients :mod:`keystone_tempest_plugin.services` (which are used to communicate with the REST API from the services) should also be added to this class. For example, below we have a snippet from the tests at :py:mod:`keystone_tempest_plugin.tests.api.identity.v3.test_identity_providers.py`. .. code-block:: python class IdentityProvidersTest(base.BaseIdentityTest): ... def _create_idp(self, idp_id, idp_ref): idp = self.idps_client.create_identity_provider( idp_id, **idp_ref)['identity_provider'] self.addCleanup( self.idps_client.delete_identity_provider, idp_id) return idp @decorators.idempotent_id('09450910-b816-4150-8513-a2fd4628a0c3') def test_identity_provider_create(self): idp_id = data_utils.rand_uuid_hex() idp_ref = fixtures.idp_ref() idp = self._create_idp(idp_id, idp_ref) # The identity provider is disabled by default idp_ref['enabled'] = False # The remote_ids attribute should be set to an empty list by default idp_ref['remote_ids'] = [] self._assert_identity_provider_attributes(idp, idp_id, idp_ref) The test class extends :class:`keystone_tempest_plugin.tests.base.BaseIdentityTest`. Also, the ``_create_idp`` method calls keystone's API using the ``idps_client``, which is an instance from. :class:`keystone_tempest_plugin.tests.services.identity.v3.identity_providers_client.IdentityProvidersClient`. Additionally, to illustrate the construction of a new test class, below we have a snippet from the scenario test that checks the complete federated authentication workflow ( :py:mod:`keystone_tempest_plugin.tests.scenario.test_federated_authentication.py`). In the test setup, all of the needed resources are created using the API service clients. Since it is a scenario test, it is common to need some customized settings that will come from the environment (in this case, from the devstack plugin) - these settings are collected in the ``_setup_settings`` method. .. code-block:: python class TestSaml2EcpFederatedAuthentication(base.BaseIdentityTest): ... def _setup_settings(self): self.idp_id = CONF.fed_scenario.idp_id self.idp_url = CONF.fed_scenario.idp_ecp_url self.keystone_v3_endpoint = CONF.identity.uri_v3 self.password = CONF.fed_scenario.idp_password self.protocol_id = CONF.fed_scenario.protocol_id self.username = CONF.fed_scenario.idp_username ... def setUp(self): super(TestSaml2EcpFederatedAuthentication, self).setUp() self._setup_settings() # Reset client's session to avoid getting garbage from another runs self.saml2_client.reset_session() # Setup identity provider, mapping and protocol self._setup_idp() self._setup_mapping() self._setup_protocol() Finally, the tests perform the complete workflow of the feature, asserting correctness in each step: .. code-block:: python def _request_unscoped_token(self): resp = self.saml2_client.send_service_provider_request( self.keystone_v3_endpoint, self.idp_id, self.protocol_id) self.assertEqual(http_client.OK, resp.status_code) saml2_authn_request = etree.XML(resp.content) relay_state = self._str_from_xml( saml2_authn_request, self.ECP_RELAY_STATE) sp_consumer_url = self._str_from_xml( saml2_authn_request, self.ECP_SERVICE_PROVIDER_CONSUMER_URL) # Perform the authn request to the identity provider resp = self.saml2_client.send_identity_provider_authn_request( saml2_authn_request, self.idp_url, self.username, self.password) self.assertEqual(http_client.OK, resp.status_code) saml2_idp_authn_response = etree.XML(resp.content) idp_consumer_url = self._str_from_xml( saml2_idp_authn_response, self.ECP_IDP_CONSUMER_URL) # Assert that both saml2_authn_request and saml2_idp_authn_response # have the same consumer URL. self.assertEqual(sp_consumer_url, idp_consumer_url) ... @testtools.skipUnless(CONF.identity_feature_enabled.federation, "Federated Identity feature not enabled") def test_request_unscoped_token(self): self._request_unscoped_token() Notice that the ``test_request_unscoped_token`` test only executes if the ``federation`` feature flag is enabled. .. NOTE:: For each patch submitted upstream, all of the tests from the keystone tempest plugin are executed in the ``gate-keystone-dsvm-functional-v3-only-*`` job. .. _Tempest Field Guide to Scenario tests: https://docs.openstack.org/tempest/latest/field_guide/scenario.html .. _Tempest Field Guide to API tests: https://docs.openstack.org/tempest/latest/field_guide/api.html .. _tempest coding guide: https://docs.openstack.org/tempest/latest/HACKING.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/contributor/vision-reflection.rst0000664000175000017500000001364100000000000024300 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================= Technical Vision for Keystone ============================= This document is a self-evaluation of keystone with regard to the Technical Committee's `technical vision`_ and serves as a basis for guiding the mission of the keystone project. The objectives captured here are what the keystone team strives to build. New features and design changes should be compared with this document before being embarked upon. When such proposals are not in alignment, propose a change to this document or to the overall `technical vision`_ to initiate a discussion on the renewed vision for the project. .. _technical vision: https://governance.openstack.org/tc/reference/technical-vision.html Mission Statement ================= Keystone's mission is to provide secure, resilient, and user-friendly discovery, authentication, and authorization for multitenant services. Vision for OpenStack ==================== Self-service ------------ Keystone needs to strive to provide a flexible and simple mechanism to expose OpenStack functionality safely and securely in a multi-tenant environment, to enable a true self-service experience for end users in a shared-resource system. Application Control ------------------- Keystone provides the ability for applications to have their own identity through :ref:`application credentials `, in service of developers building applications that need to access cloud APIs and cloud-native applications. Interoperability ---------------- Keystone strives for a completely seamless experience for end users and applications running on multiple clouds. Initiatives in service of providing such a consistent user experience include providing a discovery mechanism for available functionality, eliminating optional API extensions, and providing useful default roles which eliminate the need for inconsistently-named, operator-defined roles for similar access levels between clouds. Keystone is also capable of itself acting as a bridge between separate clouds through its Keystone-to-Keystone federated authentication functionality. Bidirectional Compatibility --------------------------- To support clients operating across multiple clouds of potentially different versions, changes in keystone's major API are additive-only, and updates to the API are signaled by the minor version number, which allows clients to discover, to a reasonable degree, what capabilities are available in the keystone version they are connecting to. Keystone also provides a JSON-home document to aid clients in discovering the availability and status of features. Enhancements to the discoverability of keystone's APIs are a priority. Partitioning ------------ Keystone's service catalog mechanism makes it possible for users to have authorization for resources in geographically distributed regions, and keystone's various mechanisms for distributed authentication, such as using a distributed database or LDAP identity backend, using an external authentication source, or federating keystone itself to provide distributed identity providers, support geographically distributed computing. Keystone hopes to create a consistent user story and reference architecture for large-scale distributed deployments, including edge-computing use cases. Basic Physical Data Center Management ------------------------------------- In support of OpenStack being primarily a data center management tool, keystone should always work out of the box and not rely on the pre-existence of another identity management system in the data center. In practice this means always continuing to support a SQL storage backend for user data. Plays Well With Others ---------------------- Keystone encourages its use outside of an OpenStack environment. In support of this, keystone supports a standard authentication token format (`JWT`_) that can be understood by many applications, and seeks to support full Single-Sign-On functionality that can be used in front of any web application. .. _JWT: https://tools.ietf.org/html/rfc7519 Customizable Integration ------------------------ In service of supporting customizable integration both between OpenStack services and from client applications, keystone has an ongoing mission to fulfill the `Principle of Least Privilege`_ and permit the cloud consumer to delegate only the minimum permissions needed to an application. Keystone works to provide this both through reforming OpenStack policy to make it easier to manage across services, and by providing new mechanisms such as application credential access rules to allow users to restrict capabilities of applications to a subset of service APIs. Graphical User Interface ------------------------ Keystone does not provide a graphical user interface, but must always be mindful of how its APIs will be presented in dashboards. For some features, such as Single-Sign-On authentication, keystone may provide its own graphical user interface in order to provide a smooth web-login experience without requiring a dependency on another dashboard. Secure by Design ---------------- Keystone strives to be secure by design, by making opinionated choices about the default security configuration. Making it easier to administer fine-grained access control in support of the `Principle of Least Privilege`_ is an ongoing effort. .. _Principle of Least Privilege: https://en.wikipedia.org/wiki/Principle_of_least_privilege ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4861143 keystone-26.0.0/doc/source/getting-started/0000775000175000017500000000000000000000000020635 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/getting-started/architecture.rst0000664000175000017500000004126600000000000024062 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===================== Keystone Architecture ===================== Much of the design assumes that in most deployments auth backends will be shims in front of existing user systems. Services ======== Keystone is organized as a group of internal services exposed on one or many endpoints. Many of these services are used in a combined fashion by the frontend. For example, an authenticate call will validate user/project credentials with the Identity service and, upon success, create and return a token with the Token service. Identity -------- The Identity service provides auth credential validation and data about `users` and `groups`. In the basic case, this data is managed by the Identity service, allowing it to also handle all CRUD operations associated with this data. In more complex cases, the data is instead managed by an authoritative backend service. An example of this would be when the Identity service acts as a frontend for LDAP. In that case the LDAP server is the source of truth and the role of the Identity service is to relay that information accurately. Users ^^^^^ ``Users`` represent an individual API consumer. A user itself must be owned by a specific domain, and hence all user names are **not** globally unique, but only unique to their domain. Groups ^^^^^^ ``Groups`` are a container representing a collection of users. A group itself must be owned by a specific domain, and hence all group names are **not** globally unique, but only unique to their domain. Resource -------- The Resource service provides data about `projects` and `domains`. Projects ^^^^^^^^ ``Projects`` represent the base unit of ``ownership`` in OpenStack, in that all resources in OpenStack should be owned by a specific project. A project itself must be owned by a specific domain, and hence all project names are **not** globally unique, but unique to their domain. If the domain for a project is not specified, then it is added to the default domain. Domains ^^^^^^^ ``Domains`` are a high-level container for projects, users and groups. Each is owned by exactly one domain. Each domain defines a namespace where an API-visible name attribute exists. Keystone provides a default domain, aptly named 'Default'. In the Identity v3 API, the uniqueness of attributes is as follows: - Domain Name. Globally unique across all domains. - Role Name. Unique within the owning domain. - User Name. Unique within the owning domain. - Project Name. Unique within the owning domain. - Group Name. Unique within the owning domain. Due to their container architecture, domains may be used as a way to delegate management of OpenStack resources. A user in a domain may still access resources in another domain, if an appropriate assignment is granted. Assignment ---------- The Assignment service provides data about `roles` and `role assignments`. Roles ^^^^^ ``Roles`` dictate the level of authorization the end user can obtain. Roles can be granted at either the domain or project level. A role can be assigned at the individual user or group level. Role names are unique within the owning domain. Role Assignments ^^^^^^^^^^^^^^^^ A 3-tuple that has a ``Role``, a ``Resource`` and an ``Identity``. Token ----- The Token service validates and manages tokens used for authenticating requests once a user's credentials have already been verified. Catalog ------- The Catalog service provides an endpoint registry used for endpoint discovery. Application Construction ======================== Keystone is an HTTP front-end to several services. Since the Rocky release Keystone uses the `Flask-RESTful`_ library to provide a REST API interface to these services. .. _`Flask-RESTful`: https://flask-restful.readthedocs.io/en/latest/ Keystone defines functions related to `Flask-RESTful`_ in :mod:`keystone.server.flask.common`. Keystone creates API resources which inherit from class :mod:`keystone.server.flask.common.ResourceBase` and exposes methods for each supported HTTP methods GET, PUT , POST, PATCH and DELETE. For example, the User resource will look like: .. code-block:: python class UserResource(ks_flask.ResourceBase): collection_key = 'users' member_key = 'user' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='identity_api', method='get_user') def get(self, user_id=None): """Get a user resource or list users. GET/HEAD /v3/users GET/HEAD /v3/users/{user_id} """ ... def post(self): """Create a user. POST /v3/users """ ... class UserChangePasswordResource(ks_flask.ResourceBase): @ks_flask.unenforced_api def post(self, user_id): ... Routes for each API resource are defined by classes which inherit from :mod:`keystone.server.flask.common.APIBase`. For example, the UserAPI will look like: .. code-block:: python class UserAPI(ks_flask.APIBase): _name = 'users' _import_name = __name__ resources = [UserResource] resource_mapping = [ ks_flask.construct_resource_map( resource=UserChangePasswordResource, url='/users//password', resource_kwargs={}, rel='user_change_password', path_vars={'user_id': json_home.Parameters.USER_ID} ), ... The methods ``_add_resources()`` or ``_add_mapped_resources()`` in :mod:`keystone.server.flask.common.APIBase` bind the resources with the APIs. Within each API, one or more managers are loaded (for example, see :mod:`keystone.catalog.core.Manager`), which are thin wrapper classes which load the appropriate service driver based on the keystone configuration. * Assignment * :mod:`keystone.api.role_assignments` * :mod:`keystone.api.role_inferences` * :mod:`keystone.api.roles` * :mod:`keystone.api.os_inherit` * :mod:`keystone.api.system` * Authentication * :mod:`keystone.api.auth` * :mod:`keystone.api.ec2tokens` * :mod:`keystone.api.s3tokens` * Catalog * :mod:`keystone.api.endpoints` * :mod:`keystone.api.os_ep_filter` * :mod:`keystone.api.regions` * :mod:`keystone.api.services` * Credentials * :mod:`keystone.api.credentials` * Federation * :mod:`keystone.api.os_federation` * Identity * :mod:`keystone.api.groups` * :mod:`keystone.api.users` * Limits * :mod:`keystone.api.registered_limits` * :mod:`keystone.api.limits` * Oauth1 * :mod:`keystone.api.os_oauth1` * Policy * :mod:`keystone.api.policy` * Resource * :mod:`keystone.api.domains` * :mod:`keystone.api.projects` * Revoke * :mod:`keystone.api.os_revoke` * Trust * :mod:`keystone.api.trusts` Service Backends ================ Each of the services can be configured to use a backend to allow keystone to fit a variety of environments and needs. The backend for each service is defined in the keystone.conf file with the key ``driver`` under a group associated with each service. A general class exists under each backend to provide an abstract base class for any implementations, identifying the expected service implementations. The abstract base classes are stored in the service's backends directory as ``base.py``. The corresponding drivers for the services are: * :mod:`keystone.assignment.backends.base.AssignmentDriverBase` * :mod:`keystone.assignment.role_backends.base.RoleDriverBase` * :mod:`keystone.auth.plugins.base.AuthMethodHandler` * :mod:`keystone.catalog.backends.base.CatalogDriverBase` * :mod:`keystone.credential.backends.base.CredentialDriverBase` * :mod:`keystone.endpoint_policy.backends.base.EndpointPolicyDriverBase` * :mod:`keystone.federation.backends.base.FederationDriverBase` * :mod:`keystone.identity.backends.base.IdentityDriverBase` * :mod:`keystone.identity.mapping_backends.base.MappingDriverBase` * :mod:`keystone.identity.shadow_backends.base.ShadowUsersDriverBase` * :mod:`keystone.oauth1.backends.base.Oauth1DriverBase` * :mod:`keystone.policy.backends.base.PolicyDriverBase` * :mod:`keystone.resource.backends.base.ResourceDriverBase` * :mod:`keystone.resource.config_backends.base.DomainConfigDriverBase` * :mod:`keystone.revoke.backends.base.RevokeDriverBase` * :mod:`keystone.token.providers.base.Provider` * :mod:`keystone.trust.backends.base.TrustDriverBase` If you implement a backend driver for one of the keystone services, you're expected to subclass from these classes. Templated Backend ----------------- Largely designed for a common use case around service catalogs in the keystone project, a templated backend is a catalog backend that simply expands pre-configured templates to provide catalog data. Example paste.deploy config (uses $ instead of % to avoid ConfigParser's interpolation) .. code-block:: ini [DEFAULT] catalog.RegionOne.identity.publicURL = http://localhost:$(public_port)s/v3 catalog.RegionOne.identity.adminURL = http://localhost:$(public_port)s/v3 catalog.RegionOne.identity.internalURL = http://localhost:$(public_port)s/v3 catalog.RegionOne.identity.name = 'Identity Service' Data Model ========== Keystone was designed from the ground up to be amenable to multiple styles of backends. As such, many of the methods and data types will happily accept more data than they know what to do with and pass them on to a backend. There are a few main data types: * **User**: has account credentials, is associated with one or more projects or domains * **Group**: a collection of users, is associated with one or more projects or domains * **Project**: unit of ownership in OpenStack, contains one or more users * **Domain**: unit of ownership in OpenStack, contains users, groups and projects * **Role**: a first-class piece of metadata associated with many user-project pairs. * **Token**: identifying credential associated with a user or user and project * **Extras**: bucket of key-value metadata associated with a user-project pair. * **Rule**: describes a set of requirements for performing an action. While the general data model allows a many-to-many relationship between users and groups to projects and domains; the actual backend implementations take varying levels of advantage of that functionality. Approach to CRUD ================ While it is expected that any "real" deployment at a large company will manage their users and groups in their existing user systems, a variety of CRUD operations are provided for the sake of development and testing. CRUD is treated as an extension or additional feature to the core feature set, in that a backend is not required to support it. It is expected that backends for services that don't support the CRUD operations will raise a :mod:`keystone.exception.NotImplemented`. Approach to Authorization (Policy) ================================== Various components in the system require that different actions are allowed based on whether the user is authorized to perform that action. For the purposes of keystone there are only a couple levels of authorization being checked for: * Require that the performing user is considered an admin. * Require that the performing user matches the user being referenced. Other systems wishing to use the policy engine will require additional styles of checks and will possibly write completely custom backends. By default, keystone leverages policy enforcement that is maintained in `oslo.policy `_. Rules ----- Given a list of matches to check for, simply verify that the credentials contain the matches. For example: .. code-block:: python credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']} # An admin only call: policy_api.enforce(('is_admin:1',), credentials) # An admin or owner call: policy_api.enforce(('is_admin:1', 'user_id:foo'), credentials) # A netadmin call: policy_api.enforce(('roles:nova:netadmin',), credentials) Credentials are generally built from the user metadata in the 'extras' part of the Identity API. So, adding a 'role' to the user just means adding the role to the user metadata. Capability RBAC --------------- (Not yet implemented.) Another approach to authorization can be action-based, with a mapping of roles to which capabilities are allowed for that role. For example: .. code-block:: python credentials = {'user_id': 'foo', 'is_admin': 1, 'roles': ['nova:netadmin']} # add a policy policy_api.add_policy('action:nova:add_network', ('roles:nova:netadmin',)) policy_api.enforce(('action:nova:add_network',), credentials) In the backend this would look up the policy for 'action:nova:add_network' and then do what is effectively a 'Simple Match' style match against the credentials. Approach to Authentication ========================== Keystone provides several authentication plugins that inherit from :mod:`keystone.auth.plugins.base`. The following is a list of available plugins. * :mod:`keystone.auth.plugins.external.Base` * :mod:`keystone.auth.plugins.mapped.Mapped` * :mod:`keystone.auth.plugins.oauth1.OAuth` * :mod:`keystone.auth.plugins.password.Password` * :mod:`keystone.auth.plugins.token.Token` * :mod:`keystone.auth.plugins.totp.TOTP` In the most basic plugin ``password``, two pieces of information are required to authenticate with keystone, a bit of ``Resource`` information and a bit of ``Identity``. Take the following call POST data for instance: .. code-block:: javascript { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "0ca8f6", "password": "secretsecret" } } }, "scope": { "project": { "id": "263fd9" } } } } The user (ID of 0ca8f6) is attempting to retrieve a token that is scoped to project (ID of 263fd9). To perform the same call with names instead of IDs, we now need to supply information about the domain. This is because usernames are only unique within a given domain, but user IDs are supposed to be unique across the deployment. Thus, the auth request looks like the following: .. code-block:: javascript { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "domain": { "name": "acme" } "name": "userA", "password": "secretsecret" } } }, "scope": { "project": { "domain": { "id": "1789d1" }, "name": "project-x" } } } } For both the user and the project portion, we must supply either a domain ID or a domain name, in order to properly determine the correct user and project. Alternatively, if we wanted to represent this as environment variables for a command line, it would be: .. code-block:: bash $ export OS_PROJECT_DOMAIN_ID=1789d1 $ export OS_USER_DOMAIN_NAME=acme $ export OS_USERNAME=userA $ export OS_PASSWORD=secretsecret $ export OS_PROJECT_NAME=project-x Note that the project the user is attempting to access must be in the same domain as the user. What is Scope? -------------- Scope is an overloaded term. In reference to authenticating, as seen above, scope refers to the portion of the POST data that dictates what ``Resource`` (project, domain, or system) the user wants to access. In reference to tokens, scope refers to the effectiveness of a token, i.e.: a `project-scoped` token is only useful on the project it was initially granted for. A `domain-scoped` token may be used to perform domain-related function. A `system-scoped` token is only useful for interacting with APIs that affect the entire deployment. In reference to users, groups, and projects, scope often refers to the domain that the entity is owned by. i.e.: a user in domain X is scoped to domain X. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/getting-started/community.rst0000664000175000017500000000700400000000000023414 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================ Getting Involved ================ The OpenStack community is a very friendly group and there are places online to join in with the community. Feel free to ask questions. This document points you to some of the places where you can communicate with people. How to Join the Community ========================= Our community welcomes all people interested in open source cloud computing, and there are no formal membership requirements. The best way to join the community is to talk with others online or at a meetup and offer contributions through Launchpad_, the wiki_, or blogs. We welcome all types of contributions, from feature designs to documentation to testing to deployment scripts. .. _Launchpad: https://launchpad.net/keystone .. _wiki: https://wiki.openstack.org/ #openstack-keystone on OFTC IRC Network --------------------------------------- You can find Keystone folks in ``_. This is usually the best place to ask questions and find your way around. IRC stands for Internet Relay Chat and it is a way to chat online in real time. You can also ask a question and come back to the log files to read the answer later. Logs for the #openstack IRC channels are stored at ``_. For more information regarding OpenStack IRC channels please visit the `OpenStack IRC Wiki `_. Keystone on Launchpad --------------------- Launchpad is a code hosting that OpenStack is using to track bugs, feature work, and releases of OpenStack. Like other OpenStack projects, Keystone source code is hosted on opendev.org * `Keystone Project Page on Launchpad `_ * `Keystone Source Repository `_ Within launchpad, we use `bugs `_ to report issues as well as to track feature work. If you are looking for a place to get started contributing to keystone, please look at any bugs for Keystone that are tagged as `low-hanging-fruit `_. OpenStack Blog -------------- The OpenStack blog includes a weekly newsletter that aggregates OpenStack news from around the internet, as well as providing inside information on upcoming events and posts from OpenStack contributors. `OpenStack Blog `_ See also: `Planet OpenStack `_, an aggregation of blogs about OpenStack from around the internet, combined into a web site and RSS feed. If you'd like to contribute with your blog posts, there are instructions for `adding your blog `_. Twitter ------- Because all the cool kids do it: `@openstack `_. Also follow the `#openstack `_ tag for relevant tweets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/getting-started/index.rst0000664000175000017500000000021600000000000022475 0ustar00zuulzuul00000000000000=============== Getting Started =============== .. toctree:: :maxdepth: 1 architecture.rst policy_mapping.rst community.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/getting-started/policy_mapping.rst0000664000175000017500000005321400000000000024406 0ustar00zuulzuul00000000000000=============================== Mapping of policy target to API =============================== The following table shows the target in the policy.yaml file for each API. ========================================================= === Target API ========================================================= === identity:get_region GET /v3/regions/{region_id} identity:list_regions GET /v3/regions identity:create_region POST /v3/regions identity:update_region PATCH /v3/regions/{region_id} identity:delete_region DELETE /v3/regions/{region_id} identity:get_service GET /v3/services/{service_id} identity:list_services GET /v3/services identity:create_service POST /v3/services identity:update_service PATCH /v3/services/{service__id} identity:delete_service DELETE /v3/services/{service__id} identity:get_endpoint GET /v3/endpoints/{endpoint_id} identity:list_endpoints GET /v3/endpoints identity:create_endpoint POST /v3/endpoints identity:update_endpoint PATCH /v3/endpoints/{endpoint_id} identity:delete_endpoint DELETE /v3/endpoints/{endpoint_id} identity:get_registered_limit GET /v3/registered_limits/{registered_limit_id} identity:list_registered_limits GET /v3/registered_limits identity:create_registered_limits POST /v3/registered_limits identity:update_registered_limit PATCH /v3/registered_limits/{registered_limit_id} identity:delete_registered_limit DELETE /v3/registered_limits/{registered_limit_id} identity:get_limit GET /v3/limits/{limit_id} identity:list_limits GET /v3/limits identity:create_limits POST /v3/limits identity:update_limit PATCH /v3/limits/{limit_id} identity:delete_limit DELETE /v3/limits/{limit_id} identity:get_limit_model GET /v3/limits/model HEAD /v3/limits/model identity:get_domain GET /v3/domains/{domain_id} identity:list_domains GET /v3/domains identity:create_domain POST /v3/domains identity:update_domain PATCH /v3/domains/{domain_id} identity:delete_domain DELETE /v3/domains/{domain_id} identity:get_project GET /v3/projects/{project_id} identity:list_projects GET /v3/projects identity:list_user_projects GET /v3/users/{user_id}/projects identity:create_project POST /v3/projects identity:update_project PATCH /v3/projects/{project_id} identity:delete_project DELETE /v3/projects/{project_id} identity:get_project_tag GET /v3/projects/{project_id}/tags/{tag_name} HEAD /v3/projects/{project_id}/tags/{tag_name} identity:list_project_tags GET /v3/projects/{project_id}/tags HEAD /v3/projects/{project_id}/tags identity:create_project_tag PUT /v3/projects/{project_id}/tags/{tag_name} identity:update_project_tags PUT /v3/projects/{project_id}/tags identity:delete_project_tag DELETE /v3/projects/{project_id}/tags/{tag_name} identity:delete_project_tags DELETE /v3/projects/{project_id}/tags identity:get_user GET /v3/users/{user_id} identity:list_users GET /v3/users identity:create_user POST /v3/users identity:update_user PATCH /v3/users/{user_id} identity:delete_user DELETE /v3/users/{user_id} identity:get_group GET /v3/groups/{group_id} identity:list_groups GET /v3/groups identity:list_groups_for_user GET /v3/users/{user_id}/groups identity:create_group POST /v3/groups identity:update_group PATCH /v3/groups/{group_id} identity:delete_group DELETE /v3/groups/{group_id} identity:list_users_in_group GET /v3/groups/{group_id}/users identity:remove_user_from_group DELETE /v3/groups/{group_id}/users/{user_id} identity:check_user_in_group GET /v3/groups/{group_id}/users/{user_id} identity:add_user_to_group PUT /v3/groups/{group_id}/users/{user_id} identity:get_credential GET /v3/credentials/{credential_id} identity:list_credentials GET /v3/credentials identity:create_credential POST /v3/credentials identity:update_credential PATCH /v3/credentials/{credential_id} identity:delete_credential DELETE /v3/credentials/{credential_id} identity:ec2_get_credential GET /v3/users/{user_id}/credentials/OS-EC2/{credential_id} identity:ec2_list_credentials GET /v3/users/{user_id}/credentials/OS-EC2 identity:ec2_create_credential POST /v3/users/{user_id}/credentials/OS-EC2 identity:ec2_delete_credential DELETE /v3/users/{user_id}/credentials/OS-EC2/{credential_id} identity:get_role GET /v3/roles/{role_id} identity:list_roles GET /v3/roles identity:create_role POST /v3/roles identity:update_role PATCH /v3/roles/{role_id} identity:delete_role DELETE /v3/roles/{role_id} identity:get_domain_role GET /v3/roles/{role_id} where role.domain_id is not null identity:list_domain_roles GET /v3/roles?domain_id where role.domain_id is not null identity:create_domain_role POST /v3/roles where role.domain_id is not null identity:update_domain_role PATCH /v3/roles/{role_id} where role.domain_id is not null identity:delete_domain_role DELETE /v3/roles/{role_id} where role.domain_id is not null identity:get_implied_role GET /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:list_implied_roles GET /v3/roles/{prior_role_id}/implies identity:create_implied_role PUT /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:delete_implied_role DELETE /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:list_role_inference_rules GET /v3/role_inferences identity:check_implied_role HEAD /v3/roles/{prior_role_id}/implies/{implied_role_id} identity:check_grant GET `grant_resources`_ identity:list_grants GET `grant_collections`_ identity:create_grant PUT `grant_resources`_ identity:revoke_grant DELETE `grant_resources`_ identity:list_system_grants_for_user GET /v3/system/users/{user_id}/roles identity:check_system_grant_for_user GET /v3/system/users/{user_id}/roles/{role_id} identity:create_system_grant_for_user PUT /v3/system/users/{user_id}/roles/{role_id} identity:revoke_system_grant_for_user DELETE /v3/system/users/{user_id}/roles/{role_id} identity:list_system_grants_for_group GET /v3/system/groups/{group_id}/roles identity:check_system_grant_for_group GET /v3/system/groups/{group_id}/roles/{role_id} identity:create_system_grant_for_group PUT /v3/system/groups/{group_id}/roles/{role_id} identity:revoke_system_grant_for_group DELETE /v3/system/groups/{group_id}/roles/{role_id} identity:list_role_assignments GET /v3/role_assignments identity:list_role_assignments_for_tree GET /v3/role_assignments?include_subtree identity:get_policy GET /v3/policy/{policy_id} identity:list_policies GET /v3/policy identity:create_policy POST /v3/policy identity:update_policy PATCH /v3/policy/{policy_id} identity:delete_policy DELETE /v3/policy/{policy_id} identity:check_token HEAD /v3/auth/tokens identity:validate_token GET /v3/auth/tokens identity:revocation_list GET /v3/auth/tokens/OS-PKI/revoked identity:revoke_token DELETE /v3/auth/tokens identity:create_trust POST /v3/OS-TRUST/trusts identity:list_trusts GET /v3/OS-TRUST/trusts identity:list_trusts_for_trustor GET /v3/OS-TRUST/trusts?trustor_user_id={trustor_user_id} identity:list_trusts_for_trustee GET /v3/OS-TRUST/trusts?trustee_user_id={trustee_user_id} identity:list_roles_for_trust GET /v3/OS-TRUST/trusts/{trust_id}/roles identity:get_role_for_trust GET /v3/OS-TRUST/trusts/{trust_id}/roles/{role_id} identity:delete_trust DELETE /v3/OS-TRUST/trusts/{trust_id} identity:get_trust GET /v3/OS-TRUST/trusts/{trust_id} identity:create_consumer POST /v3/OS-OAUTH1/consumers identity:get_consumer GET /v3/OS-OAUTH1/consumers/{consumer_id} identity:list_consumers GET /v3/OS-OAUTH1/consumers identity:delete_consumer DELETE /v3/OS-OAUTH1/consumers/{consumer_id} identity:update_consumer PATCH /v3/OS-OAUTH1/consumers/{consumer_id} identity:authorize_request_token PUT /v3/OS-OAUTH1/authorize/{request_token_id} identity:list_access_token_roles GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles identity:get_access_token_role GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}/roles/{role_id} identity:list_access_tokens GET /v3/users/{user_id}/OS-OAUTH1/access_tokens identity:get_access_token GET /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} identity:delete_access_token DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} identity:list_projects_for_endpoint GET /v3/OS-EP-FILTER/endpoints/{endpoint_id}/projects identity:add_endpoint_to_project PUT /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} identity:check_endpoint_in_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} identity:list_endpoints_for_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoints identity:remove_endpoint_from_project DELETE /v3/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id} identity:create_endpoint_group POST /v3/OS-EP-FILTER/endpoint_groups identity:list_endpoint_groups GET /v3/OS-EP-FILTER/endpoint_groups identity:get_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} identity:update_endpoint_group PATCH /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} identity:delete_endpoint_group DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id} identity:list_projects_associated_with_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects identity:list_endpoints_associated_with_endpoint_group GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/endpoints identity:get_endpoint_group_in_project GET /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} identity:list_endpoint_groups_for_project GET /v3/OS-EP-FILTER/projects/{project_id}/endpoint_groups identity:add_endpoint_group_to_project PUT /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} identity:remove_endpoint_group_from_project DELETE /v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}/projects/{project_id} identity:create_identity_provider PUT /v3/OS-FEDERATION/identity_providers/{idp_id} identity:list_identity_providers GET /v3/OS-FEDERATION/identity_providers identity:get_identity_provider GET /v3/OS-FEDERATION/identity_providers/{idp_id} identity:update_identity_provider PATCH /v3/OS-FEDERATION/identity_providers/{idp_id} identity:delete_identity_provider DELETE /v3/OS-FEDERATION/identity_providers/{idp_id} identity:create_protocol PUT /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:update_protocol PATCH /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:get_protocol GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:list_protocols GET /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols identity:delete_protocol DELETE /v3/OS-FEDERATION/identity_providers/{idp_id}/protocols/{protocol_id} identity:create_mapping PUT /v3/OS-FEDERATION/mappings/{mapping_id} identity:get_mapping GET /v3/OS-FEDERATION/mappings/{mapping_id} identity:list_mappings GET /v3/OS-FEDERATION/mappings identity:delete_mapping DELETE /v3/OS-FEDERATION/mappings/{mapping_id} identity:update_mapping PATCH /v3/OS-FEDERATION/mappings/{mapping_id} identity:create_service_provider PUT /v3/OS-FEDERATION/service_providers/{sp_id} identity:list_service_providers GET /v3/OS-FEDERATION/service_providers identity:get_service_provider GET /v3/OS-FEDERATION/service_providers/{sp_id} identity:update_service_provider PATCH /v3/OS-FEDERATION/service_providers/{sp_id} identity:delete_service_provider DELETE /v3/OS-FEDERATION/service_providers/{sp_id} identity:get_auth_catalog GET /v3/auth/catalog identity:get_auth_projects GET /v3/auth/projects identity:get_auth_domains GET /v3/auth/domains identity:get_auth_system GET /v3/auth/system identity:list_projects_for_user GET /v3/OS-FEDERATION/projects identity:list_domains_for_user GET /v3/OS-FEDERATION/domains identity:list_revoke_events GET /v3/OS-REVOKE/events identity:create_policy_association_for_endpoint PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} identity:check_policy_association_for_endpoint GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} identity:delete_policy_association_for_endpoint DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints/{endpoint_id} identity:create_policy_association_for_service PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} identity:check_policy_association_for_service GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} identity:delete_policy_association_for_service DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id} identity:create_policy_association_for_region_and_service PUT /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} identity:check_policy_association_for_region_and_service GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} identity:delete_policy_association_for_region_and_service DELETE /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/services/{service_id}/regions/{region_id} identity:get_policy_for_endpoint GET /v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy identity:list_endpoints_for_policy GET /v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints identity:create_domain_config PUT /v3/domains/{domain_id}/config identity:get_domain_config GET /v3/domains/{domain_id}/config GET /v3/domains/{domain_id}/config/{group} GET /v3/domains/{domain_id}/config/{group}/{option} identity:get_security_compliance_domain_config GET /v3/domains/{domain_id}/config/security_compliance GET /v3/domains/{domain_id}/config/security_compliance/{option} identity:update_domain_config PATCH /v3/domains/{domain_id}/config PATCH /v3/domains/{domain_id}/config/{group} PATCH /v3/domains/{domain_id}/config/{group}/{option} identity:delete_domain_config DELETE /v3/domains/{domain_id}/config DELETE /v3/domains/{domain_id}/config/{group} DELETE /v3/domains/{domain_id}/config/{group}/{option} identity:get_domain_config_default GET /v3/domains/config/default GET /v3/domains/config/{group}/default GET /v3/domains/config/{group}/{option}/default identity:get_application_credential GET /v3/users/{user_id}/application_credentials/{application_credential_id} identity:list_application_credentials GET /v3/users/{user_id}/application_credentials identity:create_application_credential POST /v3/users/{user_id}/application_credential identity:delete_application_credential DELETE /v3/users/{user_id}/application_credential/{application_credential_id} identity:get_access_rule GET /v3/users/{user_id}/access_rules/{access_rule_id} identity:list_access_rules GET /v3/users/{user_id}/access_rules identity:delete_access_rule DELETE /v3/users/{user_id}/access_rules/{access_rule_id} ========================================================= === .. _grant_resources: *grant_resources* are: - /v3/projects/{project_id}/users/{user_id}/roles/{role_id} - /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} - /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} - /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} - /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id}/inherited_to_projects - /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects - /v3/OS-INHERIT/projects/{project_id}/users/{user_id}/roles/{role_id}/inherited_to_projects - /v3/OS-INHERIT/projects/{project_id}/groups/{group_id}/roles/{role_id}/inherited_to_projects .. _grant_collections: *grant_collections* are: - /v3/projects/{project_id}/users/{user_id}/roles - /v3/projects/{project_id}/groups/{group_id}/roles - /v3/domains/{domain_id}/users/{user_id}/roles - /v3/domains/{domain_id}/groups/{group_id}/roles - /v3/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/inherited_to_projects - /v3/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/inherited_to_projects ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/index.rst0000664000175000017500000000606400000000000017377 0ustar00zuulzuul00000000000000.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================================== Keystone, the OpenStack Identity Service ======================================== Keystone is an OpenStack service that provides API client authentication, service discovery, and distributed multi-tenant authorization by implementing `OpenStack's Identity API`_. This documentation is useful for contributors looking to get involved in our community, developers writing applications on top of OpenStack, and operators administering their own OpenStack deployments. This documentation is generated by the Sphinx toolkit and lives in the `source tree`_. Also see the :doc:`getting-started/community` page for other ways to interact with the community. .. _`OpenStack's Identity API`: https://docs.openstack.org/api-ref/identity/index.html .. _`source tree`: https://opendev.org/openstack/keystone/src/branch/master/doc/source Related Projects ~~~~~~~~~~~~~~~~ In addition to creating OpenStack's Identity Service, the Keystone team also provides a `WSGI middleware`_, an `Authentication library`_ and a `Python client library`_. .. _`WSGI middleware`: https://docs.openstack.org/keystonemiddleware/latest .. _`Authentication library`: https://docs.openstack.org/keystoneauth/latest .. _`Python client library`: https://docs.openstack.org/python-keystoneclient/latest Installation Guides ~~~~~~~~~~~~~~~~~~~ .. toctree:: :maxdepth: 2 install/index.rst General Information ~~~~~~~~~~~~~~~~~~~ This section contains general information related to keystone which is relevant to developers, users and operators. For documentation specific to any of these three, please see the subsequent sections. .. toctree:: :maxdepth: 2 getting-started/index.rst code_documentation.rst indices-tables.rst Contributor Documentation ~~~~~~~~~~~~~~~~~~~~~~~~~ This section contains the documentation needed for developing keystone. .. toctree:: :maxdepth: 2 contributor/index.rst User Documentation ~~~~~~~~~~~~~~~~~~ This section contains the documentation for end-users of keystone. .. toctree:: :maxdepth: 2 user/index.rst CLI Documentation ~~~~~~~~~~~~~~~~~ This section details information related to keystone management commands. .. toctree:: :maxdepth: 2 cli/index.rst Administrator Guides ~~~~~~~~~~~~~~~~~~~~ .. toctree:: :maxdepth: 3 admin/index.rst Configuration Options ~~~~~~~~~~~~~~~~~~~~~ .. toctree:: :maxdepth: 2 configuration/index.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/indices-tables.rst0000664000175000017500000000013200000000000021144 0ustar00zuulzuul00000000000000Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search`././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4861143 keystone-26.0.0/doc/source/install/0000775000175000017500000000000000000000000017176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4861143 keystone-26.0.0/doc/source/install/common/0000775000175000017500000000000000000000000020466 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/common/get-started-identity.inc0000664000175000017500000000535200000000000025240 0ustar00zuulzuul00000000000000Identity service overview ~~~~~~~~~~~~~~~~~~~~~~~~~ The OpenStack Identity service provides a single point of integration for managing authentication, authorization, and a catalog of services. The Identity service is typically the first service a user interacts with. Once authenticated, an end user can use their identity to access other OpenStack services. Likewise, other OpenStack services leverage the Identity service to ensure users are who they say they are and discover where other services are within the deployment. The Identity service can also integrate with some external user management systems (such as LDAP). Users and services can locate other services by using the service catalog, which is managed by the Identity service. As the name implies, a service catalog is a collection of available services in an OpenStack deployment. Each service can have one or many endpoints and each endpoint can be one of three types: admin, internal, or public. In a production environment, different endpoint types might reside on separate networks exposed to different types of users for security reasons. For instance, the public API network might be visible from the Internet so customers can manage their clouds. The admin API network might be restricted to operators within the organization that manages cloud infrastructure. The internal API network might be restricted to the hosts that contain OpenStack services. Also, OpenStack supports multiple regions for scalability. For simplicity, this guide uses the management network for all endpoint types and the default ``RegionOne`` region. Together, regions, services, and endpoints created within the Identity service comprise the service catalog for a deployment. Each OpenStack service in your deployment needs a service entry with corresponding endpoints stored in the Identity service. This can all be done after the Identity service has been installed and configured. The Identity service contains these components: Server A centralized server provides authentication and authorization services using a RESTful interface. Drivers Drivers or a service back end are integrated to the centralized server. They are used for accessing identity information in repositories external to OpenStack, and may already exist in the infrastructure where OpenStack is deployed (for example, SQL databases or LDAP servers). Modules Middleware modules run in the address space of the OpenStack component that is using the Identity service. These modules intercept service requests, extract user credentials, and send them to the centralized server for authorization. The integration between the middleware modules and OpenStack components uses the Python Web Server Gateway Interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/common/keystone-users.inc0000664000175000017500000001152400000000000024164 0ustar00zuulzuul00000000000000.. -*- rst -*- Create a domain, projects, users, and roles ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The Identity service provides authentication services for each OpenStack service. The authentication service uses a combination of domains, projects, users, and roles. #. Although the "default" domain already exists from the `keystone-manage bootstrap` step in this guide, a formal way to create a new domain would be: .. code-block:: console $ openstack domain create --description "An Example Domain" example +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | An Example Domain | | enabled | True | | id | 2f4f80574fd84fe6ba9067228ae0a50c | | name | example | | tags | [] | +-------------+----------------------------------+ .. end #. This guide uses a service project that contains a unique user for each service that you add to your environment. Create the ``service`` project: .. code-block:: console $ openstack project create --domain default \ --description "Service Project" service +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Service Project | | domain_id | default | | enabled | True | | id | 24ac7f19cd944f4cba1d77469b2a73ed | | is_domain | False | | name | service | | parent_id | default | | tags | [] | +-------------+----------------------------------+ .. end #. Regular (non-admin) tasks should use an unprivileged project and user. As an example, this guide creates the ``myproject`` project and ``myuser`` user. * Create the ``myproject`` project: .. code-block:: console $ openstack project create --domain default \ --description "Demo Project" myproject +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Demo Project | | domain_id | default | | enabled | True | | id | 231ad6e7ebba47d6a1e57e1cc07ae446 | | is_domain | False | | name | myproject | | parent_id | default | | tags | [] | +-------------+----------------------------------+ .. end .. note:: Do not repeat this step when creating additional users for this project. * Create the ``myuser`` user: .. code-block:: console $ openstack user create --domain default \ --password-prompt myuser User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | aeda23aa78f44e859900e22c24817832 | | name | myuser | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ .. end * Create the ``myrole`` role: .. code-block:: console $ openstack role create myrole +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | None | | id | 997ce8d05fc143ac97d83fdfb5998552 | | name | myrole | +-----------+----------------------------------+ .. end * Add the ``myrole`` role to the ``myproject`` project and ``myuser`` user: .. code-block:: console $ openstack role add --project myproject --user myuser myrole .. end .. note:: This command provides no output. .. note:: You can repeat this procedure to create additional projects and users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/common/openrc.inc0000664000175000017500000000736700000000000022464 0ustar00zuulzuul00000000000000.. -*- rst -*- Create OpenStack client environment scripts ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The previous sections used a combination of environment variables and command options to interact with the Identity service via the ``openstack`` client. To increase efficiency of client operations, OpenStack supports simple client environment scripts also known as OpenRC files. These scripts typically contain common options for all clients, but also support unique options. For more information, see the `OpenStack End User Guide `_. Creating the scripts -------------------- Create client environment scripts for the ``admin`` and ``demo`` projects and users. Future portions of this guide reference these scripts to load appropriate credentials for client operations. .. note:: The paths of the client environment scripts are unrestricted. For convenience, you can place the scripts in any location, however ensure that they are accessible and located in a secure place appropriate for your deployment, as they do contain sensitive credentials. #. Create and edit the ``admin-openrc`` file and add the following content: .. note:: The OpenStack client also supports using a ``clouds.yaml`` file. For more information, see the `os-client-config `_. .. code-block:: bash export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=admin export OS_USERNAME=admin export OS_PASSWORD=ADMIN_PASS export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2 .. end Replace ``ADMIN_PASS`` with the password you chose for the ``admin`` user in the Identity service. #. Create and edit the ``demo-openrc`` file and add the following content: .. code-block:: bash export OS_PROJECT_DOMAIN_NAME=Default export OS_USER_DOMAIN_NAME=Default export OS_PROJECT_NAME=myproject export OS_USERNAME=myuser export OS_PASSWORD=DEMO_PASS export OS_AUTH_URL=http://controller:5000/v3 export OS_IDENTITY_API_VERSION=3 export OS_IMAGE_API_VERSION=2 .. end Replace ``DEMO_PASS`` with the password you chose for the ``demo`` user in the Identity service. Using the scripts ----------------- To run clients as a specific project and user, you can simply load the associated client environment script prior to running them. For example: #. Load the ``admin-openrc`` file to populate environment variables with the location of the Identity service and the ``admin`` project and user credentials: .. code-block:: console $ . admin-openrc .. end #. Request an authentication token: .. code-block:: console $ openstack token issue +------------+-----------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------+ | expires | 2016-02-12T20:44:35.659723Z | | id | gAAAAABWvjYj-Zjfg8WXFaQnUd1DMYTBVrKw4h3fIagi5NoEmh21U72SrRv2trl | | | JWFYhLi2_uPR31Igf6A8mH2Rw9kv_bxNo1jbLNPLGzW_u5FC7InFqx0yYtTwa1e | | | eq2b0f6-18KZyQhs7F3teAta143kJEWuNEYET-y7u29y0be1_64KYkM7E | | project_id | 343d245e850143a096806dfaefa9afdc | | user_id | ac3377633149401296f6c0d92d79dc16 | +------------+-----------------------------------------------------------------+ .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/get-started-obs.rst0000664000175000017500000000005500000000000022734 0ustar00zuulzuul00000000000000.. include:: common/get-started-identity.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/get-started-rdo.rst0000664000175000017500000000005500000000000022735 0ustar00zuulzuul00000000000000.. include:: common/get-started-identity.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/get-started-ubuntu.rst0000664000175000017500000000005500000000000023473 0ustar00zuulzuul00000000000000.. include:: common/get-started-identity.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/index-obs.rst0000664000175000017500000000247300000000000021626 0ustar00zuulzuul00000000000000===================================================================== Keystone Installation Tutorial for openSUSE and SUSE Linux Enterprise ===================================================================== Abstract ~~~~~~~~ This guide will show you how to install OpenStack by using packages on openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 - for both SP1 and SP2 - through the Open Build Service Cloud repository. Explanations of configuration options and sample configuration files are included. .. warning:: This guide is a work-in-progress and is subject to updates frequently. Pre-release packages have been used for testing, and some instructions may not work with final versions. Please help us make this guide better by reporting any errors you encounter. Contents ~~~~~~~~ .. toctree:: :maxdepth: 2 get-started-obs keystone-install-obs keystone-users-obs keystone-verify-obs keystone-openrc-obs .. Pseudo only directive for each distribution used by the build tool. This pseudo only directive for toctree only works fine with Tox. When you directly build this guide with Sphinx, some navigation menu may not work properly. .. Keep this pseudo only directive not to break translation tool chain at the openstack-doc-tools repo until it is changed. .. end of contents ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/index-rdo.rst0000664000175000017500000000243100000000000021621 0ustar00zuulzuul00000000000000====================================================================== Keystone Installation Tutorial for Red Hat Enterprise Linux and CentOS ====================================================================== Abstract ~~~~~~~~ This guide will show you how to install Keystone by using packages available on Red Hat Enterprise Linux 8 and 9 and their derivatives through the RDO repository. Explanations of configuration options and sample configuration files are included. .. warning:: This guide is a work-in-progress and is subject to updates frequently. Pre-release packages have been used for testing, and some instructions may not work with final versions. Please help us make this guide better by reporting any errors you encounter. Contents ~~~~~~~~ .. toctree:: :maxdepth: 2 get-started-rdo keystone-install-rdo keystone-users-rdo keystone-verify-rdo keystone-openrc-rdo .. Pseudo only directive for each distribution used by the build tool. This pseudo only directive for toctree only works fine with Tox. When you directly build this guide with Sphinx, some navigation menu may not work properly. .. Keep this pseudo only directive not to break translation tool chain at the openstack-doc-tools repo until it is changed. .. end of contents ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/index-ubuntu.rst0000664000175000017500000000230300000000000022355 0ustar00zuulzuul00000000000000========================================= Keystone Installation Tutorial for Ubuntu ========================================= Abstract ~~~~~~~~ This guide will walk through an installation by using packages available through Canonical's Ubuntu Cloud archive repository for Ubuntu 16.04 (LTS). Explanations of configuration options and sample configuration files are included. .. warning:: This guide is a work-in-progress and is subject to updates frequently. Pre-release packages have been used for testing, and some instructions may not work with final versions. Please help us make this guide better by reporting any errors you encounter. Contents ~~~~~~~~ .. toctree:: :maxdepth: 2 get-started-ubuntu keystone-install-ubuntu keystone-users-ubuntu keystone-verify-ubuntu keystone-openrc-ubuntu .. Pseudo only directive for each distribution used by the build tool. This pseudo only directive for toctree only works fine with Tox. When you directly build this guide with Sphinx, some navigation menu may not work properly. .. Keep this pseudo only directive not to break translation tool chain at the openstack-doc-tools repo until it is changed. .. end of contents ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/index.rst0000664000175000017500000000144300000000000021041 0ustar00zuulzuul00000000000000================================= Keystone Installation Tutorial ================================= The OpenStack system consists of several key services that are separately installed. These services work together depending on your cloud needs and include the Compute, Identity, Networking, Image, Block Storage, Object Storage, Telemetry, Orchestration, and Database services. You can install any of these projects separately and configure them stand-alone or as connected entities. This section describes how to install and configure the OpenStack Identity service, code-named keystone, on the controller node. For scalability purposes, this configuration deploys Fernet tokens and the Apache HTTP server to handle requests. .. toctree:: :maxdepth: 3 index-obs index-rdo index-ubuntu ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-install-obs.rst0000664000175000017500000001536700000000000024032 0ustar00zuulzuul00000000000000Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the OpenStack Identity service, code-named keystone, on the controller node. For scalability purposes, this configuration deploys Fernet tokens and the Apache HTTP server to handle requests. .. note:: Ensure that you have completed the prerequisite installation steps in the `Openstack Install Guide `_ before proceeding. Prerequisites ------------- Before you install and configure the Identity service, you must create a database. .. note:: Before you begin, ensure you have the most recent version of ``python-pyasn1`` `installed `_. #. Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end 2. Create the ``keystone`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE keystone; .. end #. Grant proper access to the ``keystone`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \ IDENTIFIED BY 'KEYSTONE_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \ IDENTIFIED BY 'KEYSTONE_DBPASS'; .. end Replace ``KEYSTONE_DBPASS`` with a suitable password. #. Exit the database access client. .. _keystone-install-configure-obs: Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst .. note:: Starting with the Newton release, SUSE OpenStack packages are shipping with the upstream default configuration files. For example ``/etc/keystone/keystone.conf``, with customizations in ``/etc/keystone/keystone.conf.d/010-keystone.conf``. While the following instructions modify the default configuration file, adding a new file in ``/etc/keystone/keystone.conf.d`` achieves the same result. #. Run the following command to install the packages: .. code-block:: console # zypper install openstack-keystone apache2 apache2-mod_wsgi .. end 2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/keystone/keystone.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone .. end Replace ``KEYSTONE_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[token]`` section, configure the Fernet token provider: .. path /etc/keystone/keystone.conf .. code-block:: ini [token] # ... provider = fernet .. end 3. Populate the Identity service database: .. code-block:: console # su -s /bin/sh -c "keystone-manage db_sync" keystone .. end 4. Initialize Fernet key repositories: .. note:: The ``--keystone-user`` and ``--keystone-group`` flags are used to specify the operating system's user/group that will be used to run keystone. These are provided to allow running keystone under another operating system user/group. In the example below, we call the user & group ``keystone``. .. code-block:: console # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone .. end 5. Bootstrap the Identity service: .. note:: Before the Queens release, keystone needed to be run on two separate ports to accommodate the Identity v2 API which ran a separate admin-only service commonly on port 35357. With the removal of the v2 API, keystone can be run on the same port for all interfaces. .. code-block:: console # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \ --bootstrap-admin-url http://controller:5000/v3/ \ --bootstrap-internal-url http://controller:5000/v3/ \ --bootstrap-public-url http://controller:5000/v3/ \ --bootstrap-region-id RegionOne .. end Replace ``ADMIN_PASS`` with a suitable password for an administrative user. .. _suse_configure_apache: Configure the Apache HTTP server -------------------------------- #. Edit the ``/etc/sysconfig/apache2`` file and configure the ``APACHE_SERVERNAME`` option to reference the controller node: .. path /etc/sysconfig/apache2 .. code-block:: shell APACHE_SERVERNAME="controller" .. end The ``APACHE_SERVERNAME`` entry will need to be added if it does not already exist. #. Create the ``/etc/apache2/conf.d/wsgi-keystone.conf`` file with the following content: .. path /etc/apache2/conf.d/wsgi-keystone.conf .. code-block:: apache Listen 5000 WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} WSGIProcessGroup keystone-public WSGIScriptAlias / /usr/bin/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/apache2/keystone.log CustomLog /var/log/apache2/keystone_access.log combined Require all granted .. end #. Recursively change the ownership of the ``/etc/keystone`` directory: .. code-block:: console # chown -R keystone:keystone /etc/keystone .. end SSL ^^^ A secure deployment should have the web server configured to use SSL or running behind an SSL terminator. Finalize the installation ------------------------- #. Start the Apache HTTP service and configure it to start when the system boots: .. code-block:: console # systemctl enable apache2.service # systemctl start apache2.service .. end 2. Configure the administrative account by setting the proper environmental variables: .. code-block:: console $ export OS_USERNAME=admin $ export OS_PASSWORD=ADMIN_PASS $ export OS_PROJECT_NAME=admin $ export OS_USER_DOMAIN_NAME=Default $ export OS_PROJECT_DOMAIN_NAME=Default $ export OS_AUTH_URL=http://controller:5000/v3 $ export OS_IDENTITY_API_VERSION=3 .. end These values shown here are the default ones created from ``keystone-manage bootstrap``. Replace ``ADMIN_PASS`` with the password used in the ``keystone-manage bootstrap`` command in `keystone-install-configure-obs`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-install-rdo.rst0000664000175000017500000001277300000000000024031 0ustar00zuulzuul00000000000000Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the OpenStack Identity service, code-named keystone, on the controller node. For scalability purposes, this configuration deploys Fernet tokens and the Apache HTTP server to handle requests. .. note:: Ensure that you have completed the prerequisite installation steps in the `Openstack Install Guide `_ before proceeding. Prerequisites ------------- Before you install and configure the Identity service, you must create a database. #. Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console $ mysql -u root -p .. end 2. Create the ``keystone`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE keystone; .. end #. Grant proper access to the ``keystone`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \ IDENTIFIED BY 'KEYSTONE_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \ IDENTIFIED BY 'KEYSTONE_DBPASS'; .. end Replace ``KEYSTONE_DBPASS`` with a suitable password. #. Exit the database access client. .. _keystone-install-configure-rdo: Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst #. Run the following command to install the packages: .. code-block:: console # dnf install openstack-keystone httpd python3-mod_wsgi .. end 2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/keystone/keystone.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone .. end Replace ``KEYSTONE_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. .. note:: The host, ``controller`` in this example, must be resolvable. * In the ``[token]`` section, configure the Fernet token provider: .. path /etc/keystone/keystone.conf .. code-block:: ini [token] # ... provider = fernet .. end 3. Populate the Identity service database: .. code-block:: console # su -s /bin/sh -c "keystone-manage db_sync" keystone .. end 4. Initialize Fernet key repositories: .. note:: The ``--keystone-user`` and ``--keystone-group`` flags are used to specify the operating system's user/group that will be used to run keystone. These are provided to allow running keystone under another operating system user/group. In the example below, we call the user & group ``keystone``. .. code-block:: console # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone .. end 5. Bootstrap the Identity service: .. note:: Before the Queens release, keystone needed to be run on two separate ports to accommodate the Identity v2 API which ran a separate admin-only service commonly on port 35357. With the removal of the v2 API, keystone can be run on the same port for all interfaces. .. code-block:: console # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \ --bootstrap-admin-url http://controller:5000/v3/ \ --bootstrap-internal-url http://controller:5000/v3/ \ --bootstrap-public-url http://controller:5000/v3/ \ --bootstrap-region-id RegionOne .. end Replace ``ADMIN_PASS`` with a suitable password for an administrative user. .. _redhat_configure_apache: Configure the Apache HTTP server -------------------------------- #. Edit the ``/etc/httpd/conf/httpd.conf`` file and configure the ``ServerName`` option to reference the controller node: .. path /etc/httpd/conf/httpd .. code-block:: apache ServerName controller .. end The ``ServerName`` entry will need to be added if it does not already exist. #. Create a link to the ``/usr/share/keystone/wsgi-keystone.conf`` file: .. code-block:: console # ln -s /usr/share/keystone/wsgi-keystone.conf /etc/httpd/conf.d/ .. end SSL ^^^ A secure deployment should have the web server configured to use SSL or running behind an SSL terminator. Finalize the installation ------------------------- #. Start the Apache HTTP service and configure it to start when the system boots: .. code-block:: console # systemctl enable httpd.service # systemctl start httpd.service .. end 2. Configure the administrative account by setting the proper environmental variables: .. code-block:: console $ export OS_USERNAME=admin $ export OS_PASSWORD=ADMIN_PASS $ export OS_PROJECT_NAME=admin $ export OS_USER_DOMAIN_NAME=Default $ export OS_PROJECT_DOMAIN_NAME=Default $ export OS_AUTH_URL=http://controller:5000/v3 $ export OS_IDENTITY_API_VERSION=3 .. end These values shown here are the default ones created from ``keystone-manage bootstrap``. Replace ``ADMIN_PASS`` with the password used in the ``keystone-manage bootstrap`` command in `keystone-install-configure-rdo`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-install-ubuntu.rst0000664000175000017500000001271000000000000024556 0ustar00zuulzuul00000000000000Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the OpenStack Identity service, code-named keystone, on the controller node. For scalability purposes, this configuration deploys Fernet tokens and the Apache HTTP server to handle requests. .. note:: Ensure that you have completed the prerequisite installation steps in the `Openstack Install Guide `_ before proceeding. Prerequisites ------------- Before you install and configure the Identity service, you must create a database. #. Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql .. end 2. Create the ``keystone`` database: .. code-block:: console MariaDB [(none)]> CREATE DATABASE keystone; .. end #. Grant proper access to the ``keystone`` database: .. code-block:: console MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'localhost' \ IDENTIFIED BY 'KEYSTONE_DBPASS'; MariaDB [(none)]> GRANT ALL PRIVILEGES ON keystone.* TO 'keystone'@'%' \ IDENTIFIED BY 'KEYSTONE_DBPASS'; .. end Replace ``KEYSTONE_DBPASS`` with a suitable password. #. Exit the database access client. .. _keystone-install-configure-ubuntu: Install and configure components -------------------------------- .. include:: shared/note_configuration_vary_by_distribution.rst .. note:: This guide uses the Apache HTTP server with ``mod_wsgi`` to serve Identity service requests on port 5000. By default, the keystone service still listens on this port. The package handles all of the Apache configuration for you (including the activation of the ``mod_wsgi`` apache2 module and keystone configuration in Apache). #. Run the following command to install the packages: .. code-block:: console # apt install keystone .. end 2. Edit the ``/etc/keystone/keystone.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. path /etc/keystone/keystone.conf .. code-block:: ini [database] # ... connection = mysql+pymysql://keystone:KEYSTONE_DBPASS@controller/keystone .. end Replace ``KEYSTONE_DBPASS`` with the password you chose for the database. .. note:: Comment out or remove any other ``connection`` options in the ``[database]`` section. * In the ``[token]`` section, configure the Fernet token provider: .. path /etc/keystone/keystone.conf .. code-block:: ini [token] # ... provider = fernet .. end 3. Populate the Identity service database: .. code-block:: console # su -s /bin/sh -c "keystone-manage db_sync" keystone .. end 4. Initialize Fernet key repositories: .. note:: The ``--keystone-user`` and ``--keystone-group`` flags are used to specify the operating system's user/group that will be used to run keystone. These are provided to allow running keystone under another operating system user/group. In the example below, we call the user & group ``keystone``. .. code-block:: console # keystone-manage fernet_setup --keystone-user keystone --keystone-group keystone # keystone-manage credential_setup --keystone-user keystone --keystone-group keystone .. end 5. Bootstrap the Identity service: .. note:: Before the Queens release, keystone needed to be run on two separate ports to accommodate the Identity v2 API which ran a separate admin-only service commonly on port 35357. With the removal of the v2 API, keystone can be run on the same port for all interfaces. .. code-block:: console # keystone-manage bootstrap --bootstrap-password ADMIN_PASS \ --bootstrap-admin-url http://controller:5000/v3/ \ --bootstrap-internal-url http://controller:5000/v3/ \ --bootstrap-public-url http://controller:5000/v3/ \ --bootstrap-region-id RegionOne .. end Replace ``ADMIN_PASS`` with a suitable password for an administrative user. .. _ubuntu_configure_apache: Configure the Apache HTTP server -------------------------------- #. Edit the ``/etc/apache2/apache2.conf`` file and configure the ``ServerName`` option to reference the controller node: .. path /etc/apache2/apache2.conf .. code-block:: apache ServerName controller .. end The ``ServerName`` entry will need to be added if it does not already exist. SSL ^^^ A secure deployment should have the web server configured to use SSL or running behind an SSL terminator. Finalize the installation ------------------------- #. Restart the Apache service: .. code-block:: console # service apache2 restart .. end 2. Configure the administrative account by setting the proper environmental variables: .. code-block:: console $ export OS_USERNAME=admin $ export OS_PASSWORD=ADMIN_PASS $ export OS_PROJECT_NAME=admin $ export OS_USER_DOMAIN_NAME=Default $ export OS_PROJECT_DOMAIN_NAME=Default $ export OS_AUTH_URL=http://controller:5000/v3 $ export OS_IDENTITY_API_VERSION=3 .. end These values shown here are the default ones created from ``keystone-manage bootstrap``. Replace ``ADMIN_PASS`` with the password used in the ``keystone-manage bootstrap`` command in `keystone-install-configure-ubuntu`_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-openrc-obs.rst0000664000175000017500000000003700000000000023636 0ustar00zuulzuul00000000000000.. include:: common/openrc.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-openrc-rdo.rst0000664000175000017500000000003700000000000023637 0ustar00zuulzuul00000000000000.. include:: common/openrc.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-openrc-ubuntu.rst0000664000175000017500000000003700000000000024375 0ustar00zuulzuul00000000000000.. include:: common/openrc.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-users-obs.rst0000664000175000017500000000004700000000000023512 0ustar00zuulzuul00000000000000.. include:: common/keystone-users.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-users-rdo.rst0000664000175000017500000000004700000000000023513 0ustar00zuulzuul00000000000000.. include:: common/keystone-users.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-users-ubuntu.rst0000664000175000017500000000004700000000000024251 0ustar00zuulzuul00000000000000.. include:: common/keystone-users.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-verify-obs.rst0000664000175000017500000000540100000000000023654 0ustar00zuulzuul00000000000000Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Identity service before installing other services. .. note:: Perform these commands on the controller node. #. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD`` environment variable: .. code-block:: console $ unset OS_AUTH_URL OS_PASSWORD .. end #. As the ``admin`` user, request an authentication token: .. code-block:: console $ openstack --os-auth-url http://controller:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name admin --os-username admin token issue Password: +------------+-----------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------+ | expires | 2016-02-12T20:14:07.056119Z | | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv | | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 | | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws | | project_id | 343d245e850143a096806dfaefa9afdc | | user_id | ac3377633149401296f6c0d92d79dc16 | +------------+-----------------------------------------------------------------+ .. end .. note:: This command uses the password for the ``admin`` user. #. As the ``myuser`` user created in the previous section, request an authentication token: .. code-block:: console $ openstack --os-auth-url http://controller:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name myproject --os-username myuser token issue Password: +------------+-----------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------+ | expires | 2016-02-12T20:15:39.014479Z | | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW | | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ | | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U | | project_id | ed0b60bf607743088218b0a533d5943f | | user_id | 58126687cbcc4888bfa9ab73a2256f27 | +------------+-----------------------------------------------------------------+ .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-verify-rdo.rst0000664000175000017500000000540100000000000023655 0ustar00zuulzuul00000000000000Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Identity service before installing other services. .. note:: Perform these commands on the controller node. #. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD`` environment variable: .. code-block:: console $ unset OS_AUTH_URL OS_PASSWORD .. end #. As the ``admin`` user, request an authentication token: .. code-block:: console $ openstack --os-auth-url http://controller:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name admin --os-username admin token issue Password: +------------+-----------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------+ | expires | 2016-02-12T20:14:07.056119Z | | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv | | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 | | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws | | project_id | 343d245e850143a096806dfaefa9afdc | | user_id | ac3377633149401296f6c0d92d79dc16 | +------------+-----------------------------------------------------------------+ .. end .. note:: This command uses the password for the ``admin`` user. #. As the ``myuser`` user created in the previous section, request an authentication token: .. code-block:: console $ openstack --os-auth-url http://controller:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name myproject --os-username myuser token issue Password: +------------+-----------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------+ | expires | 2016-02-12T20:15:39.014479Z | | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW | | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ | | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U | | project_id | ed0b60bf607743088218b0a533d5943f | | user_id | 58126687cbcc4888bfa9ab73a2256f27 | +------------+-----------------------------------------------------------------+ .. end ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/keystone-verify-ubuntu.rst0000664000175000017500000000537100000000000024421 0ustar00zuulzuul00000000000000Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Identity service before installing other services. .. note:: Perform these commands on the controller node. #. Unset the temporary ``OS_AUTH_URL`` and ``OS_PASSWORD`` environment variable: .. code-block:: console $ unset OS_AUTH_URL OS_PASSWORD .. end #. As the ``admin`` user, request an authentication token: .. code-block:: console $ openstack --os-auth-url http://controller:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name admin --os-username admin token issue Password: +------------+-----------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------+ | expires | 2016-02-12T20:14:07.056119Z | | id | gAAAAABWvi7_B8kKQD9wdXac8MoZiQldmjEO643d-e_j-XXq9AmIegIbA7UHGPv | | | atnN21qtOMjCFWX7BReJEQnVOAj3nclRQgAYRsfSU_MrsuWb4EDtnjU7HEpoBb4 | | | o6ozsA_NmFWEpLeKy0uNn_WeKbAhYygrsmQGA49dclHVnz-OMVLiyM9ws | | project_id | 343d245e850143a096806dfaefa9afdc | | user_id | ac3377633149401296f6c0d92d79dc16 | +------------+-----------------------------------------------------------------+ .. end .. note:: This command uses the password for the ``admin`` user. #. As the ``myuser`` user created in the previous, request an authentication token: .. code-block:: console $ openstack --os-auth-url http://controller:5000/v3 \ --os-project-domain-name Default --os-user-domain-name Default \ --os-project-name myproject --os-username myuser token issue Password: +------------+-----------------------------------------------------------------+ | Field | Value | +------------+-----------------------------------------------------------------+ | expires | 2016-02-12T20:15:39.014479Z | | id | gAAAAABWvi9bsh7vkiby5BpCCnc-JkbGhm9wH3fabS_cY7uabOubesi-Me6IGWW | | | yQqNegDDZ5jw7grI26vvgy1J5nCVwZ_zFRqPiz_qhbq29mgbQLglbkq6FQvzBRQ | | | JcOzq3uwhzNxszJWmzGC7rJE_H0A_a3UFhqv8M4zMRYSbS2YF0MyFmp_U | | project_id | ed0b60bf607743088218b0a533d5943f | | user_id | 58126687cbcc4888bfa9ab73a2256f27 | +------------+-----------------------------------------------------------------+ .. end ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4861143 keystone-26.0.0/doc/source/install/shared/0000775000175000017500000000000000000000000020444 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/install/shared/note_configuration_vary_by_distribution.rst0000664000175000017500000000046600000000000031432 0ustar00zuulzuul00000000000000.. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4901142 keystone-26.0.0/doc/source/user/0000775000175000017500000000000000000000000016506 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/user/application_credentials.rst0000664000175000017500000004066500000000000024133 0ustar00zuulzuul00000000000000.. Copyright 2018 SUSE Linux GmbH All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _application_credentials: ======================= Application Credentials ======================= Users can create application credentials to allow their applications to authenticate to keystone. Users can delegate a subset of their role assignments on a project to an application credential, granting the application the same or restricted authorization to a project. With application credentials, applications authenticate with the application credential ID and a secret string which is not the user's password. This way, the user's password is not embedded in the application's configuration, which is especially important for users whose identities are managed by an external system such as LDAP or a single-signon system. See the `Identity API reference`_ for more information on authenticating with and managing application credentials. .. _`Identity API reference`: https://docs.openstack.org/api-ref/identity/v3/index.html#application-credentials Managing Application Credentials ================================ Create an application credential using python-openstackclient: .. code-block:: console $ openstack application credential create monitoring +--------------+----------------------------------------------------------------------------------------+ | Field | Value | +--------------+----------------------------------------------------------------------------------------+ | description | None | | expires_at | None | | id | 26bb287fd56a41f8a577c47f79221187 | | name | monitoring | | project_id | e99b6f4b9bf84a9da27e20c9cbfe887a | | roles | Member anotherrole | | secret | PJXxBFGPOLwdl3PA6tSivJT9S4RpWhLcNZH2gXzCoxX1C2cnZsj2_Xmfw-LE7Wc-NwuJEYoHcG0gQ5bjWwe-bg | | unrestricted | False | +--------------+----------------------------------------------------------------------------------------+ The only required parameter is a name. The application credential is created for the project to which the user is currently scoped with the same role assignments the user has on that project. Keystone will automatically generate a secret string that will be revealed once at creation time. You can also provide your own secret, if desired: .. code-block:: console $ openstack application credential create monitoring --secret securesecret +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | description | None | | expires_at | None | | id | bc257241e21747768c83fb9806af392d | | name | monitoring | | project_id | e99b6f4b9bf84a9da27e20c9cbfe887a | | roles | Member anotherrole | | secret | securesecret | | unrestricted | False | +--------------+----------------------------------+ The secret is hashed before it is stored, so the original secret is not retrievable after creation. If the secret is lost, a new application credential must be created. If none are provided, the application credential is created with the same role assignments on the project that the user has. You can find out what role assignments you have on a project by examining your token or your keystoneauth session: .. code-block:: python >>> mysession.auth.auth_ref.role_names [u'anotherrole', u'Member'] If you have more than one role assignment on a project, you can grant your application credential only a subset of your role assignments if desired. This is useful if you have administrator privileges on a project but only want the application to have basic membership privileges, or if you have basic membership privileges but want the application to only have read-only privileges. You cannot grant the application a role assignment that your user does not already have; for instance, if you are an admin on a project, and you want your application to have read-only access to the project, you must acquire a read-only role assignment on that project yourself before you can delegate it to the application credential. Removing a user's role assignment on a project will invalidate the user's application credentials for that project. .. code-block:: console $ openstack application credential create monitoring --role Member +--------------+----------------------------------------------------------------------------------------+ | Field | Value | +--------------+----------------------------------------------------------------------------------------+ | description | None | | expires_at | None | | id | 5d04e42491a54e83b313aa2625709411 | | name | monitoring | | project_id | e99b6f4b9bf84a9da27e20c9cbfe887a | | roles | Member | | secret | vALEOMENxB_QaKFZOA2XOd7stwrhTlqPKrOdrXXM5BORss9u3O6GT-w_HYCPaZbtg96sDPCdtzVARZLpgUOY_g | | unrestricted | False | +--------------+----------------------------------------------------------------------------------------+ An alternative way to limit the application credential's privileges is to use :ref:`access_rules`. You can provide an expiration date for application credentials: .. code-block:: console $ openstack application credential create monitoring --expiration '2019-02-12T20:52:43' +--------------+----------------------------------------------------------------------------------------+ | Field | Value | +--------------+----------------------------------------------------------------------------------------+ | description | None | | expires_at | 2019-02-12T20:52:43.000000 | | id | 4ea8c4a84f7b4c65a3d84460be9cd1f7 | | name | monitoring | | project_id | e99b6f4b9bf84a9da27e20c9cbfe887a | | roles | Member anotherrole | | secret | _My16dlySn6jr7pGvBxjcMrmPA0MCpYlkKWs3gpY3-Ybk05yt2Hh83uMdTLPWlFeh8lOXajIAVHrQaBQ06iz5Q | | unrestricted | False | +--------------+----------------------------------------------------------------------------------------+ By default, application credentials are restricted from creating or deleting other application credentials and from creating or deleting trusts. If your application needs to be able to perform these actions and you accept the risks involved, you can disable this protection: .. warning:: Restrictions on these Identity operations are deliberately imposed as a safeguard to prevent a compromised application credential from regenerating itself. Disabling this restriction poses an inherent added risk. .. code-block:: console $ openstack application credential create monitoring --unrestricted +--------------+----------------------------------------------------------------------------------------+ | Field | Value | +--------------+----------------------------------------------------------------------------------------+ | description | None | | expires_at | None | | id | 0a0372dbedfb4e82ab66449c3316ef1e | | name | monitoring | | project_id | e99b6f4b9bf84a9da27e20c9cbfe887a | | roles | Member anotherrole | | secret | ArOy6DYcLeLTRlTmfvF1TH1QmRzYbmD91cbVPOHL3ckyRaLXlaq5pTGJqvCvqg6leEvTI1SQeX3QK-3iwmdPxg | | unrestricted | True | +--------------+----------------------------------------------------------------------------------------+ .. _access_rules: Access Rules ============ In addition to delegating a subset of roles to an application credential, you may also delegate more fine-grained access control by using access rules. .. note:: Application credentials with access rules require additional configuration of each service that will use it. See below for details. If application credentials with access rules are required, an OpenStack service using keystonemiddleware to authenticate with keystone, needs to define ``service_type`` in its configuration file. Following is an example for the cinder V3 service: .. code-block:: ini [keystone_authtoken] service_type = volumev3 For other OpenStack sevices, their types can be obtained using the OpenStack client. For example: .. code-block:: console $ openstack service list -c Name -c Type +-----------+-----------+ | Name | Type | +-----------+-----------+ | glance | image | | cinderv3 | volumev3 | | cinderv2 | volumev2 | | keystone | identity | | nova | compute | | neutron | network | | placement | placement | +-----------+-----------+ .. note:: Updates to the configuration files of a service require restart of the appropriate services for the changes to take effect. In order to create an example application credential that is constricted to creating servers in nova, the user can add the following access rules: .. code-block:: console openstack application credential create scaler-upper --access-rules '[ { "path": "/v2.1/servers", "method": "POST", "service": "compute" } ]' The ``"path"`` attribute of application credential access rules uses a wildcard syntax to make it more flexible. For example, to create an application credential that is constricted to listing server IP addresses, you could use either of the following access rules: :: [ { "path": "/v2.1/servers/*/ips", "method": "GET", "service": "compute" } ] or equivalently: :: [ { "path": "/v2.1/servers/{server_id}/ips", "method": "GET", "service": "compute" } ] In both cases, a request path containing any server ID will match the access rule. For even more flexibility, the recursive wildcard ``**`` indicates that request paths containing any number of ``/`` will be matched. For example: :: [ { "path": "/v2.1/**", "method": "GET", "service": "compute" } ] will match any nova API for version 2.1. An access rule created for one application credential can be re-used by providing its ID to another application credential. You can list existing access rules: .. code-block:: console $ openstack access rule list +--------+---------+--------+---------------+ | ID | Service | Method | Path | +--------+---------+--------+---------------+ | abcdef | compute | POST | /v2.1/servers | +--------+---------+--------+---------------+ and create an application credential using that rule: .. code-block:: console $ openstack application credential create scaler-upper-02 \ --access-rules '[{"id": "abcdef"}]' Using Application Credentials ============================= Applications can authenticate using the application_credential auth method. For a service using keystonemiddleware to authenticate with keystone, the auth section would look like this: .. code-block:: ini [keystone_authtoken] auth_url = https://keystone.server/identity/v3 auth_type = v3applicationcredential application_credential_id = 6cb5fa6a13184e6fab65ba2108adf50c application_credential_secret= glance_secret You can also identify your application credential with its name and the name or ID of its owner. For example: .. code-block:: ini [keystone_authtoken] auth_url = https://keystone.server/identity/v3 auth_type = v3applicationcredential username = glance user_domain_name = Default application_credential_name = glance_cred application_credential_secret = glance_secret Rotating Application Credentials ================================ A user can create multiple application credentials with the same role assignments on the same project. This allows the application credential to be gracefully rotated with minimal or no downtime for your application. In contrast, changing a service user's password results in immediate downtime for any application using that password until the application can be updated with the new password. .. note:: Rotating application credentials is essential if a team member who has knowledge of the application credential identifier and secret leaves the team for any reason. Rotating application credentials is also recommended as part of regular application maintenance. Rotating an application credential is a simple process: #. Create a new application credential. Application credential names must be unique within the user's set of application credentials, so this new application credential must not have the same name as the old one. #. Update your application's configuration to use the new ID (or name and user identifier) and the new secret. For a distributed application, this can be done one node at a time. #. When your application is fully set up with the new application credential, delete the old one. Frequently Asked Questions ========================== Why is the application credential owned by the user rather than the project? ---------------------------------------------------------------------------- Having application credentials be owned by a project rather than by an individual user would be convenient for cases where teams want applications to continue running after the creating user has left the team. However, this would open up a security hole by which the creating user could still gain access to the resources accessible by the application credential even after the user is disabled. Rather than relying on the application credential persisting after users are disabled, it is recommended to proactively rotate the application credential to another user prior to the original creating user being disabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/user/index.rst0000664000175000017500000000205100000000000020345 0ustar00zuulzuul00000000000000.. Copyright 2017 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================== User Documentation ================== An end user can find the specific API documentation here, `OpenStack's Identity API`_. .. _`OpenStack's Identity API`: https://docs.openstack.org/api-ref/identity/v3 .. toctree:: :maxdepth: 1 supported_clients.rst application_credentials.rst trusts.rst json_home.rst ../api_curl_examples.rst multi-factor-authentication.rst ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/user/json_home.rst0000664000175000017500000000706500000000000021231 0ustar00zuulzuul00000000000000.. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================ API Discovery with JSON Home ============================ What is JSON Home? ================== JSON Home describes a method of API discovery for non-browser HTTP clients. The `draft`_ is still in review, but keystone supplies an implementation accessible to end-users. The result of calling keystone's JSON Home API is a JSON document that informs the user about API endpoints, where to find them, and even information about the API's status (e.g. experimental, supported, deprecated). More information keystone's implementation of JSON Home can be found in the `specification`_. .. _`draft`: https://mnot.github.io/I-D/json-home/ .. _`specification`: http://specs.openstack.org/openstack/keystone-specs/specs/keystone/juno/json-home.html Requesting JSON Home Documents ============================== Requesting keystone's JSON Home document is easy. The API does not require a token, but future implementations might expand in it's protection with token validation and enforcement. To get a JSON Home document, just query a keystone endpoint with ``application/json-home`` specified in the ``Accept`` header: .. code-block:: bash curl -X GET -H "Accept: application/json-home" http://example.com/identity/ The result will be a JSON document containing a list of ``resources``: .. code-block:: console { "resources": [ "https://docs.openstack.org/api/openstack-identity/3/ext/OS-TRUST/1.0/rel/trusts": { "href": "/v3/OS-TRUST/trusts" }, "https://docs.openstack.org/api/openstack-identity/3/ext/s3tokens/1.0/rel/s3tokens": { "href": "/v3/s3tokens" }, "https://docs.openstack.org/api/openstack-identity/3/rel/application_credential": { "href-template": "/v3/users/{user_id}/application_credentials/{application_credential_id}", "href-vars": { "application_credential_id": "https://docs.openstack.org/api/openstack-identity/3/param/application_credential_id", "user_id": "https://docs.openstack.org/api/openstack-identity/3/param/user_id" } }, "https://docs.openstack.org/api/openstack-identity/3/rel/auth_catalog": { "href": "/v3/auth/catalog" }, "https://docs.openstack.org/api/openstack-identity/3/rel/auth_domains": { "href": "/v3/auth/domains" }, "https://docs.openstack.org/api/openstack-identity/3/rel/auth_projects": { "href": "/v3/auth/projects" }, "https://docs.openstack.org/api/openstack-identity/3/rel/auth_system": { "href": "/v3/auth/system" }, ... ] } The list of resources can then be parsed based on the relationship key for a dictionary of data about that endpoint. This includes a path where users can find interact with the endpoint for that specific resources. API status information will also be present. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/user/multi-factor-authentication.rst0000664000175000017500000000677300000000000024700 0ustar00zuulzuul00000000000000.. _multi_factor_authentication_user_guide: =========================== Multi-Factor Authentication =========================== Configuring MFA =============== Configuring MFA right now has to be done entirely by an admin, for how to do that, see :ref:`multi_factor_authentication`. Using MFA ========= Multi-Factor Authentication with Keystone can be used in two ways, either you treat it like current single method authentication and provide all the details upfront, or you doing it as a multi-step process with auth receipts. Single step ----------- In the single step approach you would supply all the required authentication methods in your request for a token. Here is an example using 2 factors (``password`` and ``totp``): .. code-block:: json { "auth": { "identity": { "methods": [ "password", "totp" ], "totp": { "user": { "id": "2ed179c6af12496cafa1d279cb51a78f", "passcode": "012345" } }, "password": { "user": { "id": "2ed179c6af12496cafa1d279cb51a78f", "password": "super sekret pa55word" } } } } } If all the supplied auth methods are valid, Keystone will return a token. Multi-Step ---------- In the multi-step approach you can supply any one method from the auth rules: Again we do a 2 factor example, starting with ``password``: .. code-block:: json { "auth": { "identity": { "methods": [ "password" ], "password": { "user": { "id": "2ed179c6af12496cafa1d279cb51a78f", "password": "super sekret pa55word" } } } } } Provided the method is valid, Keystone will still return a ``401``, but will in the response header ``Openstack-Auth-Receipt`` return a receipt of valid auth method for reuse later. The response body will also contain information about the auth receipt, and what auth methods may be missing: .. code-block:: json { "receipt":{ "expires_at":"2018-07-05T08:39:23.000000Z", "issued_at":"2018-07-05T08:34:23.000000Z", "methods": [ "password" ], "user": { "domain": { "id": "default", "name": "Default" }, "id": "ee4dfb6e5540447cb3741905149d9b6e", "name": "admin" } }, "required_auth_methods": [ ["totp", "password"] ] } Now you can continue authenticating by supplying the missing auth methods, and supplying the header ``Openstack-Auth-Receipt`` as gotten from the previous response: .. code-block:: json { "auth": { "identity": { "methods": [ "totp" ], "totp": { "user": { "id": "2ed179c6af12496cafa1d279cb51a78f", "passcode": "012345" } } } } } Provided the auth methods are valid, Keystone will now supply a token. If not you can try again until the auth receipt expires (e.g in case of TOTP timeout). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/user/supported_clients.rst0000664000175000017500000000653000000000000023012 0ustar00zuulzuul00000000000000.. Copyright 2018 SUSE Linux GmbH All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Supported clients ================= There are two supported clients, `python-keystoneclient`_ project provides python bindings and `python-openstackclient`_ provides a command line interface. .. _`python-openstackclient`: https://docs.openstack.org/python-openstackclient/latest .. _`python-keystoneclient`: https://docs.openstack.org/python-keystoneclient/latest Authenticating with a Password via CLI -------------------------------------- To authenticate with keystone using a password and ``python-openstackclient``, set the following flags, note that the following user referenced below should be granted the ``admin`` role. * ``--os-username OS_USERNAME``: Name of your user * ``--os-user-domain-name OS_USER_DOMAIN_NAME``: Name of the user's domain * ``--os-password OS_PASSWORD``: Password for your user * ``--os-project-name OS_PROJECT_NAME``: Name of your project * ``--os-project-domain-name OS_PROJECT_DOMAIN_NAME``: Name of the project's domain * ``--os-auth-url OS_AUTH_URL``: URL of the keystone authentication server * ``--os-identity-api-version OS_IDENTITY_API_VERSION``: This should always be set to 3 You can also set these variables in your environment so that they do not need to be passed as arguments each time: .. code-block:: bash $ export OS_USERNAME=my_username $ export OS_USER_DOMAIN_NAME=my_user_domain $ export OS_PASSWORD=my_password $ export OS_PROJECT_NAME=my_project $ export OS_PROJECT_DOMAIN_NAME=my_project_domain $ export OS_AUTH_URL=http://localhost:5000/v3 $ export OS_IDENTITY_API_VERSION=3 For example, the commands ``user list``, ``token issue`` and ``project create`` can be invoked as follows: .. code-block:: bash # Using password authentication, with environment variables $ export OS_USERNAME=admin $ export OS_USER_DOMAIN_NAME=Default $ export OS_PASSWORD=secret $ export OS_PROJECT_NAME=admin $ export OS_PROJECT_DOMAIN_NAME=Default $ export OS_AUTH_URL=http://localhost:5000/v3 $ export OS_IDENTITY_API_VERSION=3 $ openstack user list $ openstack project create demo $ openstack token issue # Using password authentication, with flags $ openstack --os-username=admin --os-user-domain-name=Default \ --os-password=secret \ --os-project-name=admin --os-project-domain-name=Default \ --os-auth-url=http://localhost:5000/v3 --os-identity-api-version=3 \ user list $ openstack --os-username=admin --os-user-domain-name=Default \ --os-password=secret \ --os-project-name=admin --os-project-domain-name=Default \ --os-auth-url=http://localhost:5000/v3 --os-identity-api-version=3 \ project create demo ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/doc/source/user/trusts.rst0000664000175000017500000000512700000000000020611 0ustar00zuulzuul00000000000000.. Copyright 2018 SUSE Linux GmbH All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====== Trusts ====== OpenStack Identity manages authentication and authorization. A trust is an OpenStack Identity extension that enables delegation and, optionally, impersonation through ``keystone``. A trust extension defines a relationship between: **Trustor** The user delegating a limited set of their own rights to another user. **Trustee** The user trust is being delegated to, for a limited time. The trust can eventually allow the trustee to impersonate the trustor. For security reasons, some safeties are added. For example, if a trustor loses a given role, any trusts the user issued with that role, and the related tokens, are automatically revoked. The delegation parameters are: **User ID** The user IDs for the trustor and trustee. **Privileges** The delegated privileges are a combination of a project ID and a number of roles that must be a subset of the roles assigned to the trustor. If you omit all privileges, nothing is delegated. You cannot delegate everything. **Delegation depth** Defines whether or not the delegation is recursive. If it is recursive, defines the delegation chain length. Specify one of the following values: - ``0``. The delegate cannot delegate these permissions further. - ``1``. The delegate can delegate the permissions to any set of delegates but the latter cannot delegate further. - ``inf``. The delegation is infinitely recursive. **Endpoints** A list of endpoints associated with the delegation. This parameter further restricts the delegation to the specified endpoints only. If you omit the endpoints, the delegation is useless. A special value of ``all_endpoints`` allows the trust to be used by all endpoints associated with the delegated project. **Duration** (Optional) Comprised of the start time and end time for the trust. .. note:: See the administrator guide on :doc:`removing expired trusts ` for recommended maintenance procedures. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4901142 keystone-26.0.0/etc/0000775000175000017500000000000000000000000014236 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/etc/README.txt0000664000175000017500000000047700000000000015744 0ustar00zuulzuul00000000000000To generate the sample keystone.conf and keystone.policy.yaml files, run the following commands from the top level of the keystone directory: tox -egenconfig tox -egenpolicy For a pre-generated example of the latest files, see: https://docs.openstack.org/keystone/latest/configuration/samples/index.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/etc/default_catalog.templates0000664000175000017500000000361100000000000021275 0ustar00zuulzuul00000000000000# config for templated.Catalog, using camelCase because I don't want to do # translations for keystone compat catalog.RegionOne.identity.publicURL = http://localhost:5000/v3 catalog.RegionOne.identity.adminURL = http://localhost:5000/v3 catalog.RegionOne.identity.internalURL = http://localhost:5000/v3 catalog.RegionOne.identity.name = Identity Service # fake compute service for now to help novaclient tests work catalog.RegionOne.computev21.publicURL = http://localhost:8774/v2.1 catalog.RegionOne.computev21.adminURL = http://localhost:8774/v2.1 catalog.RegionOne.computev21.internalURL = http://localhost:8774/v2.1 catalog.RegionOne.computev21.name = Compute Service V2.1 catalog.RegionOne.volumev3.publicURL = http://localhost:8776/v3 catalog.RegionOne.volumev3.adminURL = http://localhost:8776/v3 catalog.RegionOne.volumev3.internalURL = http://localhost:8776/v3 catalog.RegionOne.volumev3.name = Volume Service V3 catalog.RegionOne.image.publicURL = http://localhost:9292 catalog.RegionOne.image.adminURL = http://localhost:9292 catalog.RegionOne.image.internalURL = http://localhost:9292 catalog.RegionOne.image.name = Image Service catalog.RegionOne.network.publicURL = http://localhost:9696 catalog.RegionOne.network.adminURL = http://localhost:9696 catalog.RegionOne.network.internalURL = http://localhost:9696 catalog.RegionOne.network.name = Network Service catalog.RegionOne.placement.publicURL = http://localhost:8778 catalog.RegionOne.placement.adminURL = http://localhost:8778 catalog.RegionOne.placement.internalURL = http://localhost:8778 catalog.RegionOne.placement.name = Placement Service catalog.RegionOne.orchestration.publicURL = http://localhost:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.adminURL = http://localhost:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.internalURL = http://localhost:8004/v1/$(tenant_id)s catalog.RegionOne.orchestration.name = Orchestration Service ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/etc/logging.conf.sample0000664000175000017500000000202600000000000020013 0ustar00zuulzuul00000000000000[loggers] keys=root,access [handlers] keys=production,file,access_file,devel [formatters] keys=minimal,normal,debug ########### # Loggers # ########### [logger_root] level=WARNING handlers=file [logger_access] level=INFO qualname=access handlers=access_file ################ # Log Handlers # ################ [handler_production] class=handlers.SysLogHandler level=ERROR formatter=normal args=(('localhost', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_USER) [handler_file] class=handlers.WatchedFileHandler level=WARNING formatter=normal args=('error.log',) [handler_access_file] class=handlers.WatchedFileHandler level=INFO formatter=minimal args=('access.log',) [handler_devel] class=StreamHandler level=NOTSET formatter=debug args=(sys.stdout,) ################## # Log Formatters # ################## [formatter_minimal] format=%(message)s [formatter_normal] format=(%(name)s): %(asctime)s %(levelname)s %(message)s [formatter_debug] format=(%(name)s): %(asctime)s %(levelname)s %(module)s %(funcName)s %(message)s ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/etc/sso_callback_template.html0000664000175000017500000000123100000000000021434 0ustar00zuulzuul00000000000000 Keystone WebSSO redirect

Please wait...
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/examples/0000775000175000017500000000000000000000000015301 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/examples/pki/0000775000175000017500000000000000000000000016064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4901142 keystone-26.0.0/examples/pki/certs/0000775000175000017500000000000000000000000017204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/examples/pki/certs/cacert.pem0000664000175000017500000000255700000000000021161 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIID1jCCAr6gAwIBAgIJAKiIU3dYUGKeMA0GCSqGSIb3DQEBBQUAMIGeMQowCAYD VQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1bm55 dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTElMCMG CSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxMLU2Vs ZiBTaWduZWQwIBcNMTMwNzA5MTYyNTAwWhgPMjA3MjAxMDExNjI1MDBaMIGeMQow CAYDVQQFEwE1MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExEjAQBgNVBAcTCVN1 bm55dmFsZTESMBAGA1UEChMJT3BlblN0YWNrMREwDwYDVQQLEwhLZXlzdG9uZTEl MCMGCSqGSIb3DQEJARYWa2V5c3RvbmVAb3BlbnN0YWNrLm9yZzEUMBIGA1UEAxML U2VsZiBTaWduZWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCh1U+N 3g2cjFi7GeVf21FIv8MDhughFCey9rysAuqFONSFYo2rectLgpDtVy4BFFUFlxmh 8Ci9TEZ5LiA31tbc4584GxvlLt4dg8aFsUJRBKq0L9i7W5v9uFpHrY1Zr+P4vwG+ v7IWOuzw19f517eGpp6LLcj2vrpN9Yb63rrydKOqr0KJodMd+vFKmi+euFcPqs6s w1OiC5DpJN479CGl2Fs1WzMoKDedRNiXG7ysrVrYQIkfMBABBPIwilq1xXZz9Ybo 0PbNgOu6xpSsy9hq+IzxcwYsr5CwIcbqW6Ju+Ti2iBEaff20lW7dFzO4kwrcqOr9 Jnn7qE8YfJo9Hyj3AgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcN AQEFBQADggEBAGWFTQTe2FwvwGWa/Bx3Ypc8pJ05ucmGDm8XZiUHj1mOvFHTcveL Iofb+vR2lynr+MwF9Dn1szGteVNn/QxrHJIoxsgf1n/9fdyYqjoKWXblNBMt5jhr IlMGdQMqHSDzlkZKbcXg5vzHnG5mrwh0rojcZItZznXTSo/XnujEtHwIvCo6rk9c tRRzpkcDkg+/SZf2izchsLoEQVsJsIZMnWl0hUGFHaDfx2JQn7bnAcC84wPVhRJ+ Xa3kDok1r7Nd7Vr/Wf0hCNRxyv2dySD/bq5iCEl1HNik3KCq4eUicTtkGe5N+Was ucf1RhPD3oZbxlTX4QDN7grSCdrTESyuhfc= -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/examples/pki/certs/middleware.pem0000664000175000017500000000572600000000000022036 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDpjCCAo4CARAwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgZAxCzAJBgNVBAYTAlVTMQsw CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv cGVuc3RhY2sub3JnMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQC5dpW18l3bs+Mcj/JdhaAa+qw1RJwShm06g+q38ZoC cCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4GSI1pZa3iqbT9Yj70nxN+0l94iym+ v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6BdmwS0FuOy2qfKPnPhyBDH2VawtOgY MLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69KBJQElFXPQ9Nu0ABCPWWC2tN87L5 pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQuRnkMvQ/g887Sp6nEJ22ABPEFhuRr 89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cTnV9Dv6bfAgMBAAEwDQYJKoZIhvcN AQEFBQADggEBAIVz3ZwxSUF/y5ABmjnVIQaVVxH97bu07smFQUe0AB2I9R4xnBJ9 jn93DpeixZvArCZuDuJEJvNER8S6L3r/OPMPrVzayxibXATaZRE8khMWEJpsnyeW 8paA5NuZJwN2NjlPOmT47J1m7ZjLgkrVwjhwQZPMnh5kG9690TBJNhg9x3Z8f6p3 iKj2AfZWGhp9Xr2xOZCpfvAZmyvKOMeuHVrRZ2VWGuzojQd7fjSEDw/+Tg8Gw1LV BQXjXiKQHsD1YID2a9Pe9yrBjO00ZMxMw8+wN9qrh+8vxfmwTO8tEkmcpvM4ivO3 /oGGhQh6nSncERVI7rx+wBDnIHKBz6MU2Ow= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5dpW18l3bs+Mc j/JdhaAa+qw1RJwShm06g+q38ZoCcCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4G SI1pZa3iqbT9Yj70nxN+0l94iym+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6Bd mwS0FuOy2qfKPnPhyBDH2VawtOgYMLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69 KBJQElFXPQ9Nu0ABCPWWC2tN87L5pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQu RnkMvQ/g887Sp6nEJ22ABPEFhuRr89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cT nV9Dv6bfAgMBAAECggEBAIB1K5L/kZUITulMptGyKUgmkjq/D98g7u0Vy/CmTkcc Cx6F+LGsL9D8mfplDBKOpo4S530sfKk1+Uwu2ovDGqKhazQJ5ZMnz6gK7Ieg1ERD wDDURTIeyKf0HtJMGD0av2QU+GIeYXQEO446PhLCu+n42zkQ8tDS8xSJbCsu0odV ok6+i7nEg9sP4uDfAAtM8CUJbRpFTha+m2a7pOz3ylU7/ZV4FDIgJ+FEynaphXAo bZE4MX5I7A4DDBp7/9g9HsgefByY4xiABuk7Rsyztyf2TrJEtcsVhiV4sCIIHsow u60KGEcTQWj4npBIMgW1QUdrwmAAh/35gOjt9ZndgTkCgYEA2yT5DmihjVaNF65B 8VtdFcpESr8rr6FBmJ7z31m7MufeV1Inc5GqCK9agRmpr5sTYcgFB9it2IhW2WsA xHv+7J04bd9DBtgTv58GWrISsCR/abMZnJrm+F5Rafk77jwjCx/SwFj79ybI83Ia VJYMd7jqkxc00+DZT/3QWZqRrlsCgYEA2KeBBqUVdCpwNiJpgFM18HWjJx36HRk7 YoFapXot/6R6A/rYmS+/goBZt2CWqqGtnXqWEZvH+v4L+WlUmYQrWwtoxpdR1oXz EmlCxN7D9MbRVR7QVW24h5zdwPOlbCTGoKzowOs8UEjMfQ81zoMinLmcJgHQSyzs OawgSF+DmM0CgYBQz26EELNaMktvKxQoE3/c9CyAv8Q1TKqqxBq8BxPP7s7/tkzU AigIcdlW+Aapue7IxQCN5yocShJ0tE+hJPRZfpR7d7P4xx9pLxQhx766c4sEiEXu iPSZK/artHuUG1r01DRcN7QabJP3qeDpxjcswuTFfu49H5IjPD5jfGsyNwKBgFjh bvdQ5lo/xsUOnQV+HZTGTeaQT7l8TnZ85rkYRKKp0TysvgsqIYDiMuwd/fGGXnlK fyI+LG51pmftpD1OkZLKPXOrRHGjhjK5aCDn2rAimGI5P/KsDpXj7r1ntyeEdtAX 32y1lIrDMtDjWomcFqkBJGQbPl540Xhfeub1+EDJAoGAUZGPT2itKnxEFsa1SKHW yLeEsag/a9imAVyizo1WJn2WJaUhi1aHK49w6JRowIAzXXb7zLQt7BL8v+ydPVw3 eySpXGqFuN/Prm3So0SeWllWcPsKFAzjgE0CWjNuB0GlAZGOaJOcWUNoOZjX/SDC FpolIoaSad28tGc8tbEk3fU= -----END PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/examples/pki/certs/signing_cert.pem0000664000175000017500000000245600000000000022371 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDpTCCAo0CAREwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgY8xCzAJBgNVBAYTAlVTMQsw CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv cGVuc3RhY2sub3JnMREwDwYDVQQDEwhLZXlzdG9uZTCCASIwDQYJKoZIhvcNAQEB BQADggEPADCCAQoCggEBAMTC6IdNd9Cg1DshcrT5gRVRF36nEmjSA9QWdik7B925 PK70U4F6j4pz/5JL7plIo/8rJ4jJz9ccE7m0iA+IuABtEhEwXkG9rj47Oy0J4ZyD GSh2K1Bl78PA9zxXSzysUTSjBKdAh29dPYbJY7cgZJ0uC3AtfVceYiAOIi14SdFe Z0LZLDXBuLaqUmSMrmKwJ9wAMOCb/jbBP9/3Ycd0GYjlvrSBU4Bqb8/NHasyO4Dp PN68OAoyD5r5jUtV8QZN03UjIsoux8e0lrL6+MVtJo0OfWvlSrlzS5HKSryY+uqq QEuxtZKpJM2MV85ujvjc8eDSChh2shhDjBem3FIlHKUCAwEAATANBgkqhkiG9w0B AQUFAAOCAQEAed9fHgdJrk+gZcO5gsqq6uURfDOuYD66GsSdZw4BqHjYAcnyWq2d a+iw7Uxkqu7iLf2k4+Hu3xjDFrce479OwZkSnbXmqB7XspTGOuM8MgT7jB/ypKTO Z6qaZKSWK1Hta995hMrVVlhUNBLh0MPGqoVWYA4d7mblujgH9vp+4mpCciJagHks 8K5FBmI+pobB+uFdSYDoRzX9LTpStspK4e3IoY8baILuGcdKimRNBv6ItG4hMrnt Ae1/nWMJyUu5rDTGf2V/vAaS0S/faJBwQSz1o38QHMTWHNspfwIdX3yMqI9u7/vY lz3rLy5WdBdUgZrZ3/VLmJTiJVZu5Owq4Q== -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/examples/pki/certs/ssl_cert.pem0000664000175000017500000000245600000000000021534 0ustar00zuulzuul00000000000000-----BEGIN CERTIFICATE----- MIIDpjCCAo4CARAwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNV BAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQK EwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZr ZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0x MzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgZAxCzAJBgNVBAYTAlVTMQsw CQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3Rh Y2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBv cGVuc3RhY2sub3JnMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3DQEB AQUAA4IBDwAwggEKAoIBAQC5dpW18l3bs+Mcj/JdhaAa+qw1RJwShm06g+q38ZoC cCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4GSI1pZa3iqbT9Yj70nxN+0l94iym+ v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6BdmwS0FuOy2qfKPnPhyBDH2VawtOgY MLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69KBJQElFXPQ9Nu0ABCPWWC2tN87L5 pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQuRnkMvQ/g887Sp6nEJ22ABPEFhuRr 89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cTnV9Dv6bfAgMBAAEwDQYJKoZIhvcN AQEFBQADggEBAIVz3ZwxSUF/y5ABmjnVIQaVVxH97bu07smFQUe0AB2I9R4xnBJ9 jn93DpeixZvArCZuDuJEJvNER8S6L3r/OPMPrVzayxibXATaZRE8khMWEJpsnyeW 8paA5NuZJwN2NjlPOmT47J1m7ZjLgkrVwjhwQZPMnh5kG9690TBJNhg9x3Z8f6p3 iKj2AfZWGhp9Xr2xOZCpfvAZmyvKOMeuHVrRZ2VWGuzojQd7fjSEDw/+Tg8Gw1LV BQXjXiKQHsD1YID2a9Pe9yrBjO00ZMxMw8+wN9qrh+8vxfmwTO8tEkmcpvM4ivO3 /oGGhQh6nSncERVI7rx+wBDnIHKBz6MU2Ow= -----END CERTIFICATE----- ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4901142 keystone-26.0.0/examples/pki/private/0000775000175000017500000000000000000000000017536 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/examples/pki/private/cakey.pem0000664000175000017500000000325000000000000021335 0ustar00zuulzuul00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCh1U+N3g2cjFi7 GeVf21FIv8MDhughFCey9rysAuqFONSFYo2rectLgpDtVy4BFFUFlxmh8Ci9TEZ5 LiA31tbc4584GxvlLt4dg8aFsUJRBKq0L9i7W5v9uFpHrY1Zr+P4vwG+v7IWOuzw 19f517eGpp6LLcj2vrpN9Yb63rrydKOqr0KJodMd+vFKmi+euFcPqs6sw1OiC5Dp JN479CGl2Fs1WzMoKDedRNiXG7ysrVrYQIkfMBABBPIwilq1xXZz9Ybo0PbNgOu6 xpSsy9hq+IzxcwYsr5CwIcbqW6Ju+Ti2iBEaff20lW7dFzO4kwrcqOr9Jnn7qE8Y fJo9Hyj3AgMBAAECggEAPeEVaTaF190mNGyDczKmEv4X8CpOag+N2nVT0SXQTJ5d TJ9RckbAwB+tkMLr+Uev9tI+39e3jCI1NDK56QAB6jYy9D4RXYGdNoXji80qgVYa e4lsAr/Vlp8+DfhDew6xSbSnUytzSeLAJJsznvmn2Bmvt6ILHKXzEMoYEabGrtvk 0n31mmd6sszW6i1cYEhr3gK/VXaO4gM1oWit9aeIJDg3/D3UNUW7aoCTeCz91Gif 87/JH3UIPEIt960jb3oV7ltajRSpiSOfefJFwz/2n09+/P/Sg1+SWAraqkqaLqhO zoslYSYUuOQv+j97iD/tDVBjiWR1TrzQjf/3noOl+QKBgQDTExaIe0YYI8KdBNZ6 1cG3vztNWDh0PaP1n0n/bJYAGmAfxfn/gSrABXfeIAjy01f76EK2lPa/i8+DR7vL dJnUMO10OxaIZKr+OtR1XrMM6kREj6H5yHTNz0sJ3hDEfwJ1BndqwrXlCLAe7upe veXI9LVfPjPVmf8t9UwyxtaNiwKBgQDERzCGEuyKIeSfgytcdknJ0W+AbdkshC92 tZQPbI35YOLac2/y7GMjjf5Xg5VJRIYwXAG8ha+61Tvd7+qCVdzNyYfyOoBEE69B Gc9UdpXRfIjxokfidqh7mIIfjFNSI/UyVmvL9wrregXPcM+s7OlLC/0O82gOcNxU GKF3oP5XxQKBgQCPZEZIjcZ+m7yYQzMZ26FwnL9Cug4QGdgLAx2YIkJ8624l568A ftV2AcD+67Boll8NSSoZM3W1htuAifjwLNRcLKkD7yhNnGX1tC2lVqI4weWC1jjp od6H+q01lOC7PLWEntH9ey1q3M4ZFaGunz89l9CnVXCNScLri9sqG56iJQKBgHOc 50UiInhe7HbU4ZauClq5Za9FhRXGqtqGrDbFn38UBavdMUTq3p6Txgwwcp/coBoe J9uu90razU+2QPESuGPy4IPa17DB04pKNKiwzSC+9T83cpY/hJCAzazdkDqi+Yv0 Abz7wE/h6Ug+T+WxCt3sqtvCnjlbWzyh4YJAr3BtAoGBAIibPCEfVOwOfMOXkhIb liRVVGNxXQa6MwGVVfyR9gmlM85IjcBjh+Tf5+v3Mo286OlzLXQjfYW5pXR5Mgaw bKe+z5AqJlOsA+lJGTyCNnPKwaXAYHt8dZ41WhgzekibHCx7EQ+8jH1jkz2Gwou6 MDbnRu+e0FCyRFSuhB9Cim/K -----END PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/examples/pki/private/signing_key.pem0000664000175000017500000000325000000000000022547 0ustar00zuulzuul00000000000000-----BEGIN PRIVATE KEY----- MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDEwuiHTXfQoNQ7 IXK0+YEVURd+pxJo0gPUFnYpOwfduTyu9FOBeo+Kc/+SS+6ZSKP/KyeIyc/XHBO5 tIgPiLgAbRIRMF5Bva4+OzstCeGcgxkoditQZe/DwPc8V0s8rFE0owSnQIdvXT2G yWO3IGSdLgtwLX1XHmIgDiIteEnRXmdC2Sw1wbi2qlJkjK5isCfcADDgm/42wT/f 92HHdBmI5b60gVOAam/PzR2rMjuA6TzevDgKMg+a+Y1LVfEGTdN1IyLKLsfHtJay +vjFbSaNDn1r5Uq5c0uRykq8mPrqqkBLsbWSqSTNjFfObo743PHg0goYdrIYQ4wX ptxSJRylAgMBAAECggEBAIDQPVz/CXarI+ZGQotaYPisqx3+kN3QyDLcNaVOgRrW P3UmfVjh/QEeae3ECkONu9e8z9gMjyX7uqo0F3NcBWI6Bb79FGgjnuQc8OPOeUZ2 yUyk+DxdT/eu5+04FQh2o387TjuU0lXFDBem1sI30cbZMyHQliMnwAPOXO+5tVH8 PusGNBMVvoCyfnj52uVjmAjPqLXyOMcKEhuJFbhnUURKvzkHRf43SWQsb081eh2m ACQ7uNzX7vg3aPXxSZXY2+hHX67POdqosjddu6CfoXcEHAOAUujvTOFvd1gGRkRo uOi5hNQqcN5uaqeq9enVThINDyFMzngZBhMCzRTWeK0CgYEA4qUhB7lJZLt9niDW 4Fudda1Pzu3XfxHsSG4D+xx5LunKb3ChG5x7PSLJvusfvnkm5fqhEEhbSVARo6Vn AAA52u5SPDDNwyk1ttvBR/Fc7eGwpbRQry2I6ui6baKiIOSV2K3vJlsSK8/GMQqu j0fstJuSvQR7Y6NUYxlWi+VNussCgYEA3j7tFAdGFc5JkeTHSzsU4h2+17uVDSSi yr7Duc9+9fwAbsO4go9x1CAOvV2r0WX10jPsTGg1d31pWLvJrS6QsAffmM+A0QIT eBX+umcavXWy69VExWa0xKU9wTE/nQvX9Fr8A+Klh/WfMcvoomK2zgOKoRSmes04 WKYlHWsSaE8CgYBUYcZ6abG5n1SVmwRlY7asKWqdUE/7L2EZVlyFEYTMwp5r/zL8 ZLY9fMZAHqoi8FhbJ4Tv2wChuv3WP66pgWwI5tIXNtRk5OLqwcakUmiW6IAsMYYY sotXam5+gx55wKFJmvh+/0k0ppbTi3aSQeUPGRz44sJNxnGUs8pVK3pVIQKBgQDD ga+lEtEAlbv6b7sx3wN79pbPyOBR84yRtkcPygzx74Gh7uL9V5rW9GyDAUgIqR0a kTqp7HI8b0KhIHFFu9TkRcjY8JFtS9o8pXy0FcdcK5H+DFq3HKag5ovwy5YeXTDY cMGJ2XOsqtIkSDCZySTvDgaBtVzOYoHS2jWEL5C92QKBgGmL2juXIB+HAi7UuKPg nWkVTikt5Zr2GNgYtso75E7+ljaRuf4D9eEBiOD1qYKQm8KvsiVzEs71BSmT1p1C b2hlM/5Crb7KumIkHTARQFr5NPwuBZ6NA6RLnd++vKi0WgOJtDAlR3bgwugfQdzZ 4Isaq9Rgfa/EHCKB2weQ7c3r -----END PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/examples/pki/private/ssl_key.pem0000664000175000017500000000325000000000000021712 0ustar00zuulzuul00000000000000-----BEGIN PRIVATE KEY----- MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC5dpW18l3bs+Mc j/JdhaAa+qw1RJwShm06g+q38ZoCcCmRO3/XyHghgHWdVa+FKVm2ug923dE2PW4G SI1pZa3iqbT9Yj70nxN+0l94iym+v9/P7irolvo5OWBbBIJT1Ubjps5fJ//gz6Bd mwS0FuOy2qfKPnPhyBDH2VawtOgYMLk+PSG3YQh7vM2YvDALPTPz/f4qPmhQpb69 KBJQElFXPQ9Nu0ABCPWWC2tN87L5pakFw5zq46pttSJ7Izc8MXh3KQrh9FvjmiQu RnkMvQ/g887Sp6nEJ22ABPEFhuRr89aup6wRD2CkA/8L3zSB5BV7tTK4hQiq07cT nV9Dv6bfAgMBAAECggEBAIB1K5L/kZUITulMptGyKUgmkjq/D98g7u0Vy/CmTkcc Cx6F+LGsL9D8mfplDBKOpo4S530sfKk1+Uwu2ovDGqKhazQJ5ZMnz6gK7Ieg1ERD wDDURTIeyKf0HtJMGD0av2QU+GIeYXQEO446PhLCu+n42zkQ8tDS8xSJbCsu0odV ok6+i7nEg9sP4uDfAAtM8CUJbRpFTha+m2a7pOz3ylU7/ZV4FDIgJ+FEynaphXAo bZE4MX5I7A4DDBp7/9g9HsgefByY4xiABuk7Rsyztyf2TrJEtcsVhiV4sCIIHsow u60KGEcTQWj4npBIMgW1QUdrwmAAh/35gOjt9ZndgTkCgYEA2yT5DmihjVaNF65B 8VtdFcpESr8rr6FBmJ7z31m7MufeV1Inc5GqCK9agRmpr5sTYcgFB9it2IhW2WsA xHv+7J04bd9DBtgTv58GWrISsCR/abMZnJrm+F5Rafk77jwjCx/SwFj79ybI83Ia VJYMd7jqkxc00+DZT/3QWZqRrlsCgYEA2KeBBqUVdCpwNiJpgFM18HWjJx36HRk7 YoFapXot/6R6A/rYmS+/goBZt2CWqqGtnXqWEZvH+v4L+WlUmYQrWwtoxpdR1oXz EmlCxN7D9MbRVR7QVW24h5zdwPOlbCTGoKzowOs8UEjMfQ81zoMinLmcJgHQSyzs OawgSF+DmM0CgYBQz26EELNaMktvKxQoE3/c9CyAv8Q1TKqqxBq8BxPP7s7/tkzU AigIcdlW+Aapue7IxQCN5yocShJ0tE+hJPRZfpR7d7P4xx9pLxQhx766c4sEiEXu iPSZK/artHuUG1r01DRcN7QabJP3qeDpxjcswuTFfu49H5IjPD5jfGsyNwKBgFjh bvdQ5lo/xsUOnQV+HZTGTeaQT7l8TnZ85rkYRKKp0TysvgsqIYDiMuwd/fGGXnlK fyI+LG51pmftpD1OkZLKPXOrRHGjhjK5aCDn2rAimGI5P/KsDpXj7r1ntyeEdtAX 32y1lIrDMtDjWomcFqkBJGQbPl540Xhfeub1+EDJAoGAUZGPT2itKnxEFsa1SKHW yLeEsag/a9imAVyizo1WJn2WJaUhi1aHK49w6JRowIAzXXb7zLQt7BL8v+ydPVw3 eySpXGqFuN/Prm3So0SeWllWcPsKFAzjgE0CWjNuB0GlAZGOaJOcWUNoOZjX/SDC FpolIoaSad28tGc8tbEk3fU= -----END PRIVATE KEY----- ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4901142 keystone-26.0.0/httpd/0000775000175000017500000000000000000000000014606 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/httpd/README0000664000175000017500000000011700000000000015465 0ustar00zuulzuul00000000000000Documentation for running Keystone with Apache HTTPD is in doc/source/install/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/httpd/keystone-uwsgi-admin.ini0000664000175000017500000000115700000000000021376 0ustar00zuulzuul00000000000000[uwsgi] wsgi-file = /usr/local/bin/keystone-wsgi-admin # Versions of mod_proxy_uwsgi>=2.0.6 should use a UNIX socket, see # http://uwsgi-docs.readthedocs.org/en/latest/Apache.html#mod-proxy-uwsgi uwsgi-socket = 127.0.0.1:35358 # Override the default size for headers from the 4k default. buffer-size = 65535 # This is running standalone master = true enable-threads = true # Tune this to your environment. processes = 4 # uwsgi recommends this to prevent thundering herd on accept. thunder-lock = true plugins = python # This ensures that file descriptors aren't shared between keystone processes. lazy-apps = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/httpd/keystone-uwsgi-public.ini0000664000175000017500000000115700000000000021564 0ustar00zuulzuul00000000000000[uwsgi] wsgi-file = /usr/local/bin/keystone-wsgi-public # Versions of mod_proxy_uwsgi>=2.0.6 should use a UNIX socket, see # http://uwsgi-docs.readthedocs.org/en/latest/Apache.html#mod-proxy-uwsgi uwsgi-socket = 127.0.0.1:5001 # Override the default size for headers from the 4k default. buffer-size = 65535 # This is running standalone master = true enable-threads = true # Tune this to your environment. processes = 4 # uwsgi recommends this to prevent thundering herd on accept. thunder-lock = true plugins = python # This ensures that file descriptors aren't shared between keystone processes. lazy-apps = true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/httpd/uwsgi-keystone.conf0000664000175000017500000000042500000000000020453 0ustar00zuulzuul00000000000000Listen 5000 Listen 35357 ProxyPass / uwsgi://127.0.0.1:5001/ ProxyPass / uwsgi://127.0.0.1:35358/ ProxyPass /identity uwsgi://127.0.0.1:5001/ ProxyPass /identity_admin uwsgi://127.0.0.1:35358/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/httpd/wsgi-keystone.conf0000664000175000017500000000173700000000000020275 0ustar00zuulzuul00000000000000Listen 5000 WSGIDaemonProcess keystone-public processes=5 threads=1 user=keystone group=keystone display-name=%{GROUP} WSGIProcessGroup keystone-public WSGIScriptAlias / /usr/local/bin/keystone-wsgi-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On LimitRequestBody 114688 = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/apache2/keystone.log CustomLog /var/log/apache2/keystone_access.log combined = 2.4> Require all granted Order allow,deny Allow from all Alias /identity /usr/local/bin/keystone-wsgi-public SetHandler wsgi-script Options +ExecCGI WSGIProcessGroup keystone-public WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4901142 keystone-26.0.0/keystone/0000775000175000017500000000000000000000000015324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/__init__.py0000664000175000017500000000000000000000000017423 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/api/0000775000175000017500000000000000000000000016075 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/api/__init__.py0000664000175000017500000000456000000000000020213 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.api import auth from keystone.api import credentials from keystone.api import discovery from keystone.api import domains from keystone.api import ec2tokens from keystone.api import endpoints from keystone.api import groups from keystone.api import limits from keystone.api import os_ep_filter from keystone.api import os_federation from keystone.api import os_inherit from keystone.api import os_oauth1 from keystone.api import os_oauth2 from keystone.api import os_revoke from keystone.api import os_simple_cert from keystone.api import policy from keystone.api import projects from keystone.api import regions from keystone.api import registered_limits from keystone.api import role_assignments from keystone.api import role_inferences from keystone.api import roles from keystone.api import s3tokens from keystone.api import services from keystone.api import system from keystone.api import trusts from keystone.api import users __all__ = ( 'auth', 'discovery', 'credentials', 'domains', 'ec2tokens', 'endpoints', 'groups', 'limits', 'os_ep_filter', 'os_federation', 'os_inherit', 'os_oauth1', 'os_oauth2', 'os_revoke', 'os_simple_cert', 'policy', 'projects', 'regions', 'registered_limits', 'role_assignments', 'role_inferences', 'roles', 's3tokens', 'services', 'system', 'trusts', 'users', ) __apis__ = ( discovery, auth, credentials, domains, ec2tokens, endpoints, groups, limits, os_ep_filter, os_federation, os_inherit, os_oauth1, os_oauth2, os_revoke, os_simple_cert, policy, projects, regions, registered_limits, role_assignments, role_inferences, roles, s3tokens, services, system, trusts, users, ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/api/_shared/0000775000175000017500000000000000000000000017502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/_shared/EC2_S3_Resource.py0000664000175000017500000001731100000000000022644 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Common base resource for EC2 and S3 Authentication import datetime from oslo_serialization import jsonutils from oslo_utils import timeutils from werkzeug import exceptions from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception as ks_exceptions from keystone.i18n import _ from keystone.server import flask as ks_flask CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs CRED_TYPE_EC2 = 'ec2' class ResourceBase(ks_flask.ResourceBase): @ks_flask.unenforced_api def get(self): # SPECIAL CASE: GET is not allowed, raise METHOD_NOT_ALLOWED raise exceptions.MethodNotAllowed(valid_methods=['POST']) @staticmethod def _check_signature(cred_ref, credentials): # NOTE(morgan): @staticmethod doesn't always play nice with # the ABC module. raise NotImplementedError() @staticmethod def _check_timestamp(credentials): timestamp = ( # AWS Signature v1/v2 credentials.get('params', {}).get('Timestamp') or # AWS Signature v4 credentials.get('headers', {}).get('X-Amz-Date') or credentials.get('params', {}).get('X-Amz-Date') ) if not timestamp: # If the signed payload doesn't include a timestamp then the signer # must have intentionally left it off return try: timestamp = timeutils.parse_isotime(timestamp) timestamp = timeutils.normalize_time(timestamp) except Exception as e: raise ks_exceptions.Unauthorized( _('Credential timestamp is invalid: %s') % e ) auth_ttl = datetime.timedelta(minutes=CONF.credential.auth_ttl) current_time = timeutils.normalize_time(timeutils.utcnow()) if current_time > timestamp + auth_ttl: raise ks_exceptions.Unauthorized(_('Credential is expired')) def handle_authenticate(self): # TODO(morgan): convert this dirty check to JSON Schema validation # this mirrors the previous behavior of the webob system where an # empty request body for s3 and ec2 tokens would result in a BAD # REQUEST. Almost all other APIs use JSON Schema and therefore would # catch this early on. S3 and EC2 did not ever get json schema # implemented for them. if not self.request_body_json: msg = _('request must include a request body') raise ks_exceptions.ValidationError(msg) # NOTE(morgan): THIS IS SLOPPY! Apparently... keystone passed values # as "credential" and "credentials" in into the s3/ec2 authenticate # methods. There is no reason the multiple names should have worked # except that we totally did something wonky in the past... so now # there are 2 dirty "acceptable" body hacks for compatibility.... # Try "credentials" then "credential" and THEN ec2Credentials. Final # default is {} credentials = ( self.request_body_json.get('credentials') or self.request_body_json.get('credential') or self.request_body_json.get('ec2Credentials') ) if not credentials: credentials = {} if 'access' not in credentials: raise ks_exceptions.Unauthorized(_('EC2 Signature not supplied')) # Load the credential from the backend credential_id = utils.hash_access_key(credentials['access']) cred = PROVIDERS.credential_api.get_credential(credential_id) if not cred or cred['type'] != CRED_TYPE_EC2: raise ks_exceptions.Unauthorized(_('EC2 access key not found.')) # load from json if needed try: loaded = jsonutils.loads(cred['blob']) except TypeError: loaded = cred['blob'] # Convert to the legacy format cred_data = dict( user_id=cred.get('user_id'), project_id=cred.get('project_id'), access=loaded.get('access'), secret=loaded.get('secret'), trust_id=loaded.get('trust_id'), app_cred_id=loaded.get('app_cred_id'), access_token_id=loaded.get('access_token_id'), ) # validate the signature self._check_signature(cred_data, credentials) project_ref = PROVIDERS.resource_api.get_project( cred_data['project_id'] ) user_ref = PROVIDERS.identity_api.get_user(cred_data['user_id']) # validate that the auth info is valid and nothing is disabled try: PROVIDERS.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref ) PROVIDERS.resource_api.assert_project_enabled( project_id=project_ref['id'], project=project_ref ) except AssertionError as e: raise ks_exceptions.Unauthorized from e self._check_timestamp(credentials) trustee_user_id = None auth_context = None if cred_data['trust_id']: trust = PROVIDERS.trust_api.get_trust(cred_data['trust_id']) roles = [r['id'] for r in trust['roles']] # NOTE(cmurphy): if this credential was created using a # trust-scoped token with impersonation, the user_id will be for # the trustor, not the trustee. In this case, issuing a # trust-scoped token to the trustor will fail. In order to get a # trust-scoped token, use the user ID of the trustee. With # impersonation, the resulting token will still be for the trustor. # Without impersonation, the token will be for the trustee. if trust['impersonation'] is True: trustee_user_id = trust['trustee_user_id'] elif cred_data['app_cred_id']: ac_client = PROVIDERS.application_credential_api app_cred = ac_client.get_application_credential( cred_data['app_cred_id'] ) roles = [r['id'] for r in app_cred['roles']] elif cred_data['access_token_id']: access_token = PROVIDERS.oauth_api.get_access_token( cred_data['access_token_id'] ) roles = jsonutils.loads(access_token['role_ids']) auth_context = {'access_token_id': cred_data['access_token_id']} else: roles = PROVIDERS.assignment_api.get_roles_for_user_and_project( user_ref['id'], project_ref['id'] ) if not roles: raise ks_exceptions.Unauthorized(_('User not valid for project.')) for r_id in roles: # Assert all roles exist. PROVIDERS.role_api.get_role(r_id) method_names = ['ec2credential'] if trustee_user_id: user_id = trustee_user_id else: user_id = user_ref['id'] token = PROVIDERS.token_provider_api.issue_token( user_id=user_id, method_names=method_names, project_id=project_ref['id'], trust_id=cred_data['trust_id'], app_cred_id=cred_data['app_cred_id'], auth_context=auth_context, ) return token ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/_shared/__init__.py0000664000175000017500000000064100000000000021614 0ustar00zuulzuul00000000000000# flake8: noqa # NOTE(morgan): The keystone.api._shared module is explicitly for shared code # between the APIs that should not be duplicated. This occurs infrequently. # For the most part adding a new file or code to anything in this module is # incorrect. If you are unsure of what you are doing, do not add code here. # WARNING: THIS FILE SHOULD CONTAIN NO CODE, it is explicitly ignored by # flake8 completely. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/_shared/authentication.py0000664000175000017500000002555000000000000023102 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Shared code for Authentication flows. This module is where actual auth # happens. The code here is shared between Federation and Auth. # TODO(morgan): Deprecate all auth flows in /v3/OS-FEDERATION, merge this code # into keystone.api.auth. For now this is the best place for the code to # exist. import flask from oslo_log import log from keystone.auth import core from keystone.common import provider_api from keystone import exception from keystone.federation import constants from keystone.i18n import _ from keystone.receipt import handlers as receipt_handlers LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs def _check_and_set_default_scoping(auth_info, auth_context): (domain_id, project_id, trust, unscoped, system) = auth_info.get_scope() if trust: project_id = trust['project_id'] if system or domain_id or project_id or trust: # scope is specified return # Skip scoping when unscoped federated token is being issued if constants.IDENTITY_PROVIDER in auth_context: return # Do not scope if request is for explicitly unscoped token if unscoped is not None: return # fill in default_project_id if it is available try: user_ref = PROVIDERS.identity_api.get_user(auth_context['user_id']) except exception.UserNotFound as e: LOG.warning(e) raise exception.Unauthorized(e) default_project_id = user_ref.get('default_project_id') if not default_project_id: # User has no default project. He shall get an unscoped token. return # make sure user's default project is legit before scoping to it try: default_project_ref = PROVIDERS.resource_api.get_project( default_project_id ) default_project_domain_ref = PROVIDERS.resource_api.get_domain( default_project_ref['domain_id'] ) if default_project_ref.get( 'enabled', True ) and default_project_domain_ref.get('enabled', True): if PROVIDERS.assignment_api.get_roles_for_user_and_project( user_ref['id'], default_project_id ): auth_info.set_scope(project_id=default_project_id) else: msg = ( "User %(user_id)s doesn't have access to" " default project %(project_id)s. The token" " will be unscoped rather than scoped to the" " project." ) LOG.debug( msg, { 'user_id': user_ref['id'], 'project_id': default_project_id, }, ) else: msg = ( "User %(user_id)s's default project %(project_id)s" " is disabled. The token will be unscoped rather" " than scoped to the project." ) LOG.debug( msg, {'user_id': user_ref['id'], 'project_id': default_project_id}, ) except (exception.ProjectNotFound, exception.DomainNotFound): # default project or default project domain doesn't exist, # will issue unscoped token instead msg = ( "User %(user_id)s's default project %(project_id)s not" " found. The token will be unscoped rather than" " scoped to the project." ) LOG.debug( msg, {'user_id': user_ref['id'], 'project_id': default_project_id} ) def authenticate(auth_info, auth_context): """Authenticate user.""" # NOTE(notmorgan): This is not super pythonic, but we lean on the # __setitem__ method in auth_context to handle edge cases and security # of the attributes set by the plugins. This check to ensure # `auth_context` is an instance of AuthContext is extra insurance and # will prevent regressions. if not isinstance(auth_context, core.AuthContext): LOG.error( '`auth_context` passed to the Auth controller ' '`authenticate` method is not of type ' '`keystone.auth.core.AuthContext`. For security ' 'purposes this is required. This is likely a programming ' 'error. Received object of type `%s`', type(auth_context), ) raise exception.Unauthorized( _('Cannot Authenticate due to internal error.') ) # The 'external' method allows any 'REMOTE_USER' based authentication # In some cases the server can set REMOTE_USER as '' instead of # dropping it, so this must be filtered out if flask.request.remote_user: try: external = core.get_auth_method('external') resp = external.authenticate(auth_info) if resp and resp.status: # NOTE(notmorgan): ``external`` plugin cannot be multi-step # it is either a plain success/fail. auth_context.setdefault('method_names', []).insert( 0, 'external' ) # NOTE(notmorgan): All updates to auth_context is handled # here in the .authenticate method. auth_context.update(resp.response_data or {}) except exception.AuthMethodNotSupported: # This will happen there is no 'external' plugin registered # and the container is performing authentication. # The 'kerberos' and 'saml' methods will be used this way. # In those cases, it is correct to not register an # 'external' plugin; if there is both an 'external' and a # 'kerberos' plugin, it would run the check on identity twice. LOG.debug("No 'external' plugin is registered.") except exception.Unauthorized: # If external fails then continue and attempt to determine # user identity using remaining auth methods LOG.debug("Authorization failed for 'external' auth method.") # need to aggregate the results in case two or more methods # are specified auth_response = {'methods': []} for method_name in auth_info.get_method_names(): method = core.get_auth_method(method_name) resp = method.authenticate(auth_info.get_method_data(method_name)) if resp: if resp.status: auth_context.setdefault('method_names', []).insert( 0, method_name ) # NOTE(notmorgan): All updates to auth_context is handled # here in the .authenticate method. If the auth attempt was # not successful do not update the auth_context resp_method_names = resp.response_data.pop('method_names', []) auth_context['method_names'].extend(resp_method_names) auth_context.update(resp.response_data or {}) elif resp.response_body: auth_response['methods'].append(method_name) auth_response[method_name] = resp.response_body if auth_response["methods"]: # authentication continuation required raise exception.AdditionalAuthRequired(auth_response) if 'user_id' not in auth_context: msg = 'User not found by auth plugin; authentication failed' tr_msg = _('User not found by auth plugin; authentication failed') LOG.warning(msg) raise exception.Unauthorized(tr_msg) def authenticate_for_token(auth=None): """Authenticate user and issue a token.""" try: auth_info = core.AuthInfo.create(auth=auth) auth_context = core.AuthContext(method_names=[], bind={}) authenticate(auth_info, auth_context) if auth_context.get('access_token_id'): auth_info.set_scope(None, auth_context['project_id'], None) _check_and_set_default_scoping(auth_info, auth_context) (domain_id, project_id, trust, unscoped, system) = ( auth_info.get_scope() ) trust_id = trust.get('id') if trust else None receipt = receipt_handlers.extract_receipt(auth_context) # NOTE(notmorgan): only methods that actually run and succeed will # be in the auth_context['method_names'] list. Do not blindly take # the values from auth_info, look at the authoritative values. Make # sure the set is unique. # NOTE(adriant): The set of methods will also include any methods from # the given receipt. if receipt: method_names_set = set( auth_context.get('method_names', []) + receipt.methods ) else: method_names_set = set(auth_context.get('method_names', [])) method_names = list(method_names_set) app_cred_id = None if 'application_credential' in method_names: token_auth = auth_info.auth['identity'] app_cred_id = token_auth['application_credential']['id'] # Do MFA Rule Validation for the user if not core.UserMFARulesValidator.check_auth_methods_against_rules( auth_context['user_id'], method_names_set ): raise exception.InsufficientAuthMethods( user_id=auth_context['user_id'], methods=method_names ) expires_at = auth_context.get('expires_at') token_audit_id = auth_context.get('audit_id') token = PROVIDERS.token_provider_api.issue_token( auth_context['user_id'], method_names, expires_at=expires_at, system=system, project_id=project_id, domain_id=domain_id, auth_context=auth_context, trust_id=trust_id, app_cred_id=app_cred_id, parent_audit_id=token_audit_id, ) # NOTE(wanghong): We consume a trust use only when we are using # trusts and have successfully issued a token. if trust: PROVIDERS.trust_api.consume_use(token.trust_id) return token except exception.TrustNotFound as e: LOG.warning(e) raise exception.Unauthorized(e) def federated_authenticate_for_token(identity_provider, protocol_id): auth = { 'identity': { 'methods': [protocol_id], protocol_id: { 'identity_provider': identity_provider, 'protocol': protocol_id, }, } } return authenticate_for_token(auth) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/_shared/implied_roles.py0000664000175000017500000000337400000000000022712 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morgan): This file does not implement any API-specific code. It simply # supplies shared functions between the "role_inferences" and "roles" (for # implied roles) APIs. In general, all code for an API should be isolated to # it's own keystone.api.XXX module and not in the _shared module. from keystone.common import provider_api from keystone.server import flask as ks_flask PROVIDERS = provider_api.ProviderAPIs def build_prior_role_response_data(prior_role_id, prior_role_name): return { 'id': prior_role_id, 'links': {'self': ks_flask.base_url(path='/roles/%s' % prior_role_id)}, 'name': prior_role_name, } def build_implied_role_response_data(implied_role): return { 'id': implied_role['id'], 'links': { 'self': ks_flask.base_url(path='/roles/%s' % implied_role['id']) }, 'name': implied_role['name'], } def role_inference_response(prior_role_id): prior_role = PROVIDERS.role_api.get_role(prior_role_id) response = { 'role_inference': { 'prior_role': build_prior_role_response_data( prior_role_id, prior_role['name'] ) } } return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/_shared/json_home_relations.py0000664000175000017500000000773300000000000024127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morgan): This module contains json_home partial functions for # what were called "extensions" before. As keystone does not have extensions # any longer, once Keystone is converted to flask fully, there should be no # reason to add more elements to this module. import functools from keystone.common import json_home # OS-EC2 "extension" os_ec2_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EC2', extension_version='1.0', ) # s3token "extension" s3_token_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='s3tokens', extension_version='1.0', ) # OS-EP-FILTER "extension" os_ep_filter_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EP-FILTER', extension_version='1.0', ) os_ep_filter_parameter_rel_func = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-EP-FILTER', extension_version='1.0', ) # OS-OAUTH1 "extension" os_oauth1_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-OAUTH1', extension_version='1.0', ) os_oauth1_parameter_rel_func = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-OAUTH1', extension_version='1.0', ) # OS-OAUTH2 "extension" os_oauth2_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-OAUTH2', extension_version='1.0', ) os_oauth2_parameter_rel_func = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-OAUTH2', extension_version='1.0', ) # OS-REVOKE "extension" os_revoke_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-REVOKE', extension_version='1.0', ) # OS-SIMPLE-CERT "extension" os_simple_cert_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-SIMPLE-CERT', extension_version='1.0', ) # OS-TRUST "extension" os_trust_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-TRUST', extension_version='1.0', ) os_trust_parameter_rel_func = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-TRUST', extension_version='1.0', ) # OS-ENDPOINT-POLICY "extension" os_endpoint_policy_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-ENDPOINT-POLICY', extension_version='1.0', ) # OS-FEDERATION "extension" os_federation_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-FEDERATION', extension_version='1.0', ) os_federation_parameter_rel_func = functools.partial( json_home.build_v3_extension_parameter_relation, extension_name='OS-FEDERATION', extension_version='1.0', ) # OS-INHERIT "extension" os_inherit_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-INHERIT', extension_version='1.0', ) # OS-PKI (revoked) "extension" os_pki_resource_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-PKI', extension_version='1.0', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/_shared/saml.py0000664000175000017500000000575600000000000021025 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.federation import idp as keystone_idp from keystone.federation import utils as federation_utils from keystone.i18n import _ CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def create_base_saml_assertion(auth): issuer = CONF.saml.idp_entity_id sp_id = auth['scope']['service_provider']['id'] service_provider = PROVIDERS.federation_api.get_sp(sp_id) federation_utils.assert_enabled_service_provider_object(service_provider) sp_url = service_provider['sp_url'] token_id = auth['identity']['token']['id'] token = PROVIDERS.token_provider_api.validate_token(token_id) if not token.project_scoped: action = _( 'Use a project scoped token when attempting to create ' 'a SAML assertion' ) raise exception.ForbiddenAction(action=action) subject = token.user['name'] role_names = [] for role in token.roles: role_names.append(role['name']) project = token.project['name'] # NOTE(rodrigods): the domain name is necessary in order to distinguish # between projects and users with the same name in different domains. project_domain_name = token.project_domain['name'] subject_domain_name = token.user_domain['name'] def group_membership(): """Return a list of dictionaries serialized as strings. The expected return structure is:: ['JSON:{"name":"group1","domain":{"name":"Default"}}', 'JSON:{"name":"group2","domain":{"name":"Default"}}'] """ user_groups = [] groups = PROVIDERS.identity_api.list_groups_for_user(token.user_id) for group in groups: user_group = {} group_domain_name = PROVIDERS.resource_api.get_domain( group['domain_id'] )['name'] user_group["name"] = group['name'] user_group["domain"] = {'name': group_domain_name} user_groups.append('JSON:' + jsonutils.dumps(user_group)) return user_groups groups = group_membership() generator = keystone_idp.SAMLGenerator() response = generator.samlize_token( issuer, sp_url, subject, subject_domain_name, role_names, project, project_domain_name, groups, ) return response, service_provider ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/auth.py0000664000175000017500000004746300000000000017426 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client # This file handles all flask-restful resources for /v3/auth import string # noqa: I202 import urllib import flask import flask_restful from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import strutils import werkzeug.exceptions from keystone.api._shared import authentication from keystone.api._shared import json_home_relations from keystone.api._shared import saml from keystone.auth import schema as auth_schema from keystone.common import authorization from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import render_token from keystone.common import utils as k_utils from keystone.common import validation import keystone.conf from keystone import exception from keystone.federation import idp as keystone_idp from keystone.federation import schema as federation_schema from keystone.federation import utils as federation_utils from keystone.i18n import _ from keystone.server import flask as ks_flask CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs def _combine_lists_uniquely(a, b): # it's most likely that only one of these will be filled so avoid # the combination if possible. if a and b: return {x['id']: x for x in a + b}.values() else: return a or b def _build_response_headers(service_provider): # URLs in header are encoded into bytes return [ ('Content-Type', 'text/xml'), ('X-sp-url', service_provider['sp_url'].encode('utf-8')), ('X-auth-url', service_provider['auth_url'].encode('utf-8')), ] def _get_sso_origin_host(): """Validate and return originating dashboard URL. Make sure the parameter is specified in the request's URL as well its value belongs to a list of trusted dashboards. :raises keystone.exception.ValidationError: ``origin`` query parameter was not specified. The URL is deemed invalid. :raises keystone.exception.Unauthorized: URL specified in origin query parameter does not exist in list of websso trusted dashboards. :returns: URL with the originating dashboard """ origin = flask.request.args.get('origin') if not origin: msg = 'Request must have an origin query parameter' tr_msg = _('Request must have an origin query parameter') LOG.error(msg) raise exception.ValidationError(tr_msg) host = urllib.parse.unquote_plus(origin) # change trusted_dashboard hostnames to lowercase before comparison trusted_dashboards = [ k_utils.lower_case_hostname(trusted) for trusted in CONF.federation.trusted_dashboard ] if host not in trusted_dashboards: msg = f'{host} is not a trusted dashboard host' tr_msg = _('%(host)s is not a trusted dashboard host') % {'host': host} LOG.error(msg) raise exception.Unauthorized(tr_msg) return host class _AuthFederationWebSSOBase(ks_flask.ResourceBase): @staticmethod def _render_template_response(host, token_id): with open(CONF.federation.sso_callback_template) as template: src = string.Template(template.read()) subs = {'host': host, 'token': token_id} body = src.substitute(subs) resp = flask.make_response(body, http.client.OK) resp.charset = 'utf-8' resp.headers['Content-Type'] = 'text/html' return resp class AuthProjectsResource(ks_flask.ResourceBase): collection_key = 'projects' member_key = 'project' def get(self): """Get possible project scopes for token. GET/HEAD /v3/auth/projects GET/HEAD /v3/OS-FEDERATION/projects """ ENFORCER.enforce_call(action='identity:get_auth_projects') user_id = self.auth_context.get('user_id') group_ids = self.auth_context.get('group_ids') user_p_refs = [] grp_p_refs = [] if user_id: try: user_p_refs = PROVIDERS.assignment_api.list_projects_for_user( user_id ) except exception.UserNotFound: # nosec # federated users have an id but they don't link to anything pass if group_ids: grp_p_refs = PROVIDERS.assignment_api.list_projects_for_groups( group_ids ) refs = _combine_lists_uniquely(user_p_refs, grp_p_refs) return self.wrap_collection(refs) class AuthDomainsResource(ks_flask.ResourceBase): collection_key = 'domains' member_key = 'domain' def get(self): """Get possible domain scopes for token. GET/HEAD /v3/auth/domains GET/HEAD /v3/OS-FEDERATION/domains """ ENFORCER.enforce_call(action='identity:get_auth_domains') user_id = self.auth_context.get('user_id') group_ids = self.auth_context.get('group_ids') user_d_refs = [] grp_d_refs = [] if user_id: try: user_d_refs = PROVIDERS.assignment_api.list_domains_for_user( user_id ) except exception.UserNotFound: # nosec # federated users have an id but they don't link to anything pass if group_ids: grp_d_refs = PROVIDERS.assignment_api.list_domains_for_groups( group_ids ) refs = _combine_lists_uniquely(user_d_refs, grp_d_refs) return self.wrap_collection(refs) class AuthSystemResource(_AuthFederationWebSSOBase): def get(self): """Get possible system scopes for token. GET/HEAD /v3/auth/system """ ENFORCER.enforce_call(action='identity:get_auth_system') user_id = self.auth_context.get('user_id') group_ids = self.auth_context.get('group_ids') user_assignments = [] group_assignments = [] if user_id: try: user_assignments = ( PROVIDERS.assignment_api.list_system_grants_for_user( user_id ) ) except exception.UserNotFound: # nosec # federated users have an id but they don't link to anything pass if group_ids: group_assignments = ( PROVIDERS.assignment_api.list_system_grants_for_groups( group_ids ) ) assignments = _combine_lists_uniquely( user_assignments, group_assignments ) if assignments: response = { 'system': [{'all': True}], 'links': {'self': ks_flask.base_url(path='auth/system')}, } else: response = { 'system': [], 'links': {'self': ks_flask.base_url(path='auth/system')}, } return response class AuthCatalogResource(_AuthFederationWebSSOBase): def get(self): """Get service catalog for token. GET/HEAD /v3/auth/catalog """ ENFORCER.enforce_call(action='identity:get_auth_catalog') user_id = self.auth_context.get('user_id') project_id = self.auth_context.get('project_id') if not project_id: raise exception.Forbidden( _( 'A project-scoped token is required to produce a ' 'service catalog.' ) ) return { 'catalog': PROVIDERS.catalog_api.get_v3_catalog( user_id, project_id ), 'links': {'self': ks_flask.base_url(path='auth/catalog')}, } class AuthTokenOSPKIResource(flask_restful.Resource): @ks_flask.unenforced_api def get(self): """Deprecated; get revoked token list. GET/HEAD /v3/auth/tokens/OS-PKI/revoked """ if not CONF.token.revoke_by_id: raise exception.Gone() # NOTE(lbragstad): This API is deprecated and isn't supported. Keystone # also doesn't store tokens, so returning a list of revoked tokens # would require keystone to write invalid tokens to disk, which defeats # the purpose. Return a 403 instead of removing the API altogether. raise exception.Forbidden() class AuthTokenResource(_AuthFederationWebSSOBase): def get(self): """Validate a token. HEAD/GET /v3/auth/tokens """ # TODO(morgan): eliminate the check_token action only use validate # NOTE(morgan): Well lookie here, we have different enforcements # for no good reason (historical), because the methods previously # had to be named different names. Check which method and do the # correct enforcement. if flask.request.method == 'HEAD': ENFORCER.enforce_call(action='identity:check_token') else: ENFORCER.enforce_call(action='identity:validate_token') token_id = flask.request.headers.get( authorization.SUBJECT_TOKEN_HEADER ) access_rules_support = flask.request.headers.get( authorization.ACCESS_RULES_HEADER ) allow_expired = strutils.bool_from_string( flask.request.args.get('allow_expired') ) window_secs = CONF.token.allow_expired_window if allow_expired else 0 include_catalog = 'nocatalog' not in flask.request.args token = PROVIDERS.token_provider_api.validate_token( token_id, window_seconds=window_secs, access_rules_support=access_rules_support, ) token_resp = render_token.render_token_response_from_model( token, include_catalog=include_catalog ) resp_body = jsonutils.dumps(token_resp) response = flask.make_response(resp_body, http.client.OK) response.headers['X-Subject-Token'] = token_id response.headers['Content-Type'] = 'application/json' return response @ks_flask.unenforced_api def post(self): """Issue a token. POST /v3/auth/tokens """ include_catalog = 'nocatalog' not in flask.request.args auth_data = self.request_body_json.get('auth') auth_schema.validate_issue_token_auth(auth_data) token = authentication.authenticate_for_token(auth_data) resp_data = render_token.render_token_response_from_model( token, include_catalog=include_catalog ) resp_body = jsonutils.dumps(resp_data) response = flask.make_response(resp_body, http.client.CREATED) response.headers['X-Subject-Token'] = token.id response.headers['Content-Type'] = 'application/json' return response def delete(self): """Revoke a token. DELETE /v3/auth/tokens """ ENFORCER.enforce_call(action='identity:revoke_token') token_id = flask.request.headers.get( authorization.SUBJECT_TOKEN_HEADER ) PROVIDERS.token_provider_api.revoke_token(token_id) return None, http.client.NO_CONTENT class AuthFederationWebSSOResource(_AuthFederationWebSSOBase): @classmethod def _perform_auth(cls, protocol_id): idps = PROVIDERS.federation_api.list_idps() remote_id = None for idp in idps: try: remote_id_name = federation_utils.get_remote_id_parameter( idp, protocol_id ) except exception.FederatedProtocolNotFound: # no protocol for this IdP, so this can't be the IdP we're # looking for continue remote_id = flask.request.environ.get(remote_id_name) if remote_id: break if not remote_id: msg = 'Missing entity ID from environment' tr_msg = _('Missing entity ID from environment') LOG.error(msg) raise exception.Unauthorized(tr_msg) host = _get_sso_origin_host() ref = PROVIDERS.federation_api.get_idp_from_remote_id(remote_id) identity_provider = ref['idp_id'] token = authentication.federated_authenticate_for_token( identity_provider=identity_provider, protocol_id=protocol_id ) return cls._render_template_response(host, token.id) @ks_flask.unenforced_api def get(self, protocol_id): return self._perform_auth(protocol_id) @ks_flask.unenforced_api def post(self, protocol_id): return self._perform_auth(protocol_id) class AuthFederationWebSSOIDPsResource(_AuthFederationWebSSOBase): @classmethod def _perform_auth(cls, idp_id, protocol_id): host = _get_sso_origin_host() token = authentication.federated_authenticate_for_token( identity_provider=idp_id, protocol_id=protocol_id ) return cls._render_template_response(host, token.id) @ks_flask.unenforced_api def get(self, idp_id, protocol_id): return self._perform_auth(idp_id, protocol_id) @ks_flask.unenforced_api def post(self, idp_id, protocol_id): return self._perform_auth(idp_id, protocol_id) class AuthFederationSaml2Resource(_AuthFederationWebSSOBase): def get(self): raise werkzeug.exceptions.MethodNotAllowed(valid_methods=['POST']) @ks_flask.unenforced_api def post(self): """Exchange a scoped token for a SAML assertion. POST /v3/auth/OS-FEDERATION/saml2 """ auth = self.request_body_json.get('auth') validation.lazy_validate(federation_schema.saml_create, auth) response, service_provider = saml.create_base_saml_assertion(auth) headers = _build_response_headers(service_provider) response = flask.make_response(response.to_string(), http.client.OK) for header, value in headers: response.headers[header] = value return response class AuthFederationSaml2ECPResource(_AuthFederationWebSSOBase): def get(self): raise werkzeug.exceptions.MethodNotAllowed(valid_methods=['POST']) @ks_flask.unenforced_api def post(self): """Exchange a scoped token for an ECP assertion. POST /v3/auth/OS-FEDERATION/saml2/ecp """ auth = self.request_body_json.get('auth') validation.lazy_validate(federation_schema.saml_create, auth) saml_assertion, service_provider = saml.create_base_saml_assertion( auth ) relay_state_prefix = service_provider['relay_state_prefix'] generator = keystone_idp.ECPGenerator() ecp_assertion = generator.generate_ecp( saml_assertion, relay_state_prefix ) headers = _build_response_headers(service_provider) response = flask.make_response( ecp_assertion.to_string(), http.client.OK ) for header, value in headers: response.headers[header] = value return response class AuthAPI(ks_flask.APIBase): _name = 'auth' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=AuthProjectsResource, url='/auth/projects', alternate_urls=[ dict( url='/OS-FEDERATION/projects', json_home=ks_flask.construct_json_home_data( rel='projects', resource_relation_func=( json_home_relations.os_federation_resource_rel_func ), ), ) ], rel='auth_projects', resource_kwargs={}, ), ks_flask.construct_resource_map( resource=AuthDomainsResource, url='/auth/domains', alternate_urls=[ dict( url='/OS-FEDERATION/domains', json_home=ks_flask.construct_json_home_data( rel='domains', resource_relation_func=( json_home_relations.os_federation_resource_rel_func ), ), ) ], rel='auth_domains', resource_kwargs={}, ), ks_flask.construct_resource_map( resource=AuthSystemResource, url='/auth/system', resource_kwargs={}, rel='auth_system', ), ks_flask.construct_resource_map( resource=AuthCatalogResource, url='/auth/catalog', resource_kwargs={}, rel='auth_catalog', ), ks_flask.construct_resource_map( resource=AuthTokenOSPKIResource, url='/auth/tokens/OS-PKI/revoked', resource_kwargs={}, rel='revocations', resource_relation_func=json_home_relations.os_pki_resource_rel_func, ), ks_flask.construct_resource_map( resource=AuthTokenResource, url='/auth/tokens', resource_kwargs={}, rel='auth_tokens', ), ] class AuthFederationAPI(ks_flask.APIBase): _name = 'auth/OS-FEDERATION' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=AuthFederationSaml2Resource, url='/auth/OS-FEDERATION/saml2', resource_kwargs={}, resource_relation_func=( json_home_relations.os_federation_resource_rel_func ), rel='saml2', ), ks_flask.construct_resource_map( resource=AuthFederationSaml2ECPResource, url='/auth/OS-FEDERATION/saml2/ecp', resource_kwargs={}, resource_relation_func=( json_home_relations.os_federation_resource_rel_func ), rel='ecp', ), ks_flask.construct_resource_map( resource=AuthFederationWebSSOResource, url='/auth/OS-FEDERATION/websso/', resource_kwargs={}, rel='websso', resource_relation_func=( json_home_relations.os_federation_resource_rel_func ), path_vars={ 'protocol_id': ( json_home_relations.os_federation_parameter_rel_func( parameter_name='protocol_id' ) ) }, ), ks_flask.construct_resource_map( resource=AuthFederationWebSSOIDPsResource, url=( '/auth/OS-FEDERATION/identity_providers//' 'protocols//websso' ), resource_kwargs={}, rel='identity_providers_websso', resource_relation_func=( json_home_relations.os_federation_resource_rel_func ), path_vars={ 'idp_id': ( json_home_relations.os_federation_parameter_rel_func( parameter_name='idp_id' ) ), 'protocol_id': ( json_home_relations.os_federation_parameter_rel_func( parameter_name='protocol_id' ) ), }, ), ] APIs = ( AuthAPI, AuthFederationAPI, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/credentials.py0000664000175000017500000002203500000000000020746 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/credentials import hashlib import http.client import flask from oslo_serialization import jsonutils from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation import keystone.conf from keystone.credential import schema from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs ENFORCER = rbac_enforcer.RBACEnforcer def _build_target_enforcement(): target = {} try: target['credential'] = PROVIDERS.credential_api.get_credential( flask.request.view_args.get('credential_id') ) except exception.NotFound: # nosec # Defer existance in the event the credential doesn't exist, we'll # check this later anyway. pass return target class CredentialResource(ks_flask.ResourceBase): collection_key = 'credentials' member_key = 'credential' @staticmethod def _blob_to_json(ref): # credentials stored via ec2tokens before the fix for #1259584 # need json_serailzing, as that's the documented API format blob = ref.get('blob') if isinstance(blob, dict): ref = ref.copy() ref['blob'] = jsonutils.dumps(blob) return ref def _validate_blob_json(self, ref): try: blob = jsonutils.loads(ref.get('blob')) except (ValueError, TabError): raise exception.ValidationError( message=_('Invalid blob in credential') ) if not blob or not isinstance(blob, dict): raise exception.ValidationError( attribute='blob', target='credential' ) if blob.get('access') is None: raise exception.ValidationError( attribute='access', target='credential' ) return blob def _assign_unique_id( self, ref, trust_id=None, app_cred_id=None, access_token_id=None ): # Generates an assigns a unique identifier to a credential reference. if ref.get('type', '').lower() == 'ec2': blob = self._validate_blob_json(ref) ref = ref.copy() ref['id'] = hashlib.sha256( blob['access'].encode('utf8') ).hexdigest() # update the blob with the trust_id or app_cred_id, so credentials # created with a trust- or app cred-scoped token will result in # trust- or app cred-scoped tokens when authentication via # ec2tokens happens if trust_id is not None: blob['trust_id'] = trust_id ref['blob'] = jsonutils.dumps(blob) if app_cred_id is not None: blob['app_cred_id'] = app_cred_id ref['blob'] = jsonutils.dumps(blob) if access_token_id is not None: blob['access_token_id'] = access_token_id ref['blob'] = jsonutils.dumps(blob) return ref else: return super()._assign_unique_id(ref) def _list_credentials(self): filters = ['user_id', 'type'] if not self.oslo_context.system_scope: target = {'credential': {'user_id': self.oslo_context.user_id}} else: target = None ENFORCER.enforce_call( action='identity:list_credentials', filters=filters, target_attr=target, ) hints = self.build_driver_hints(filters) refs = PROVIDERS.credential_api.list_credentials(hints) # If the request was filtered, make sure to return only the # credentials specific to that user. This makes it so that users with # roles on projects can't see credentials that aren't theirs. filtered_refs = [] for ref in refs: # Check each credential again to make sure the user has access to # it, either by owning it, being a project admin with # enforce_scope=false, being a system user, or having some other # custom policy that allows access. try: cred = PROVIDERS.credential_api.get_credential(ref['id']) ENFORCER.enforce_call( action='identity:get_credential', target_attr={'credential': cred}, ) filtered_refs.append(ref) except exception.Forbidden: pass refs = filtered_refs refs = [self._blob_to_json(r) for r in refs] return self.wrap_collection(refs, hints=hints) def _get_credential(self, credential_id): ENFORCER.enforce_call( action='identity:get_credential', build_target=_build_target_enforcement, ) credential = PROVIDERS.credential_api.get_credential(credential_id) return self.wrap_member(self._blob_to_json(credential)) def get(self, credential_id=None): # Get Credential or List of credentials. if credential_id is None: # No Parameter passed means that we're doing a LIST action. return self._list_credentials() else: return self._get_credential(credential_id) def post(self): # Create a new credential credential = self.request_body_json.get('credential', {}) target = {} target['credential'] = credential ENFORCER.enforce_call( action='identity:create_credential', target_attr=target ) validation.lazy_validate(schema.credential_create, credential) trust_id = getattr(self.oslo_context, 'trust_id', None) app_cred_id = getattr( self.auth_context['token'], 'application_credential_id', None ) access_token_id = getattr( self.auth_context['token'], 'access_token_id', None ) ref = self._assign_unique_id( self._normalize_dict(credential), trust_id=trust_id, app_cred_id=app_cred_id, access_token_id=access_token_id, ) ref = PROVIDERS.credential_api.create_credential( ref['id'], ref, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def _validate_blob_update_keys(self, credential, ref): if credential.get('type', '').lower() == 'ec2': new_blob = self._validate_blob_json(ref) old_blob = credential.get('blob') if isinstance(old_blob, str): old_blob = jsonutils.loads(old_blob) # if there was a scope set, prevent changing it or unsetting it for key in [ 'trust_id', 'app_cred_id', 'access_token_id', 'access_id', ]: if old_blob.get(key) != new_blob.get(key): message = _('%s can not be updated for credential') % key raise exception.ValidationError(message=message) def patch(self, credential_id): # Update Credential ENFORCER.enforce_call( action='identity:update_credential', build_target=_build_target_enforcement, ) current = PROVIDERS.credential_api.get_credential(credential_id) credential = self.request_body_json.get('credential', {}) validation.lazy_validate(schema.credential_update, credential) self._validate_blob_update_keys(current.copy(), credential.copy()) self._require_matching_id(credential) # Check that the user hasn't illegally modified the owner or scope target = {'credential': dict(current, **credential)} ENFORCER.enforce_call( action='identity:update_credential', target_attr=target ) ref = PROVIDERS.credential_api.update_credential( credential_id, credential ) return self.wrap_member(ref) def delete(self, credential_id): # Delete credentials ENFORCER.enforce_call( action='identity:delete_credential', build_target=_build_target_enforcement, ) return ( PROVIDERS.credential_api.delete_credential( credential_id, initiator=self.audit_initiator ), http.client.NO_CONTENT, ) class CredentialAPI(ks_flask.APIBase): _name = 'credentials' _import_name = __name__ resource_mapping = [] resources = [CredentialResource] APIs = (CredentialAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/discovery.py0000664000175000017500000001027000000000000020456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import flask from flask import request from oslo_serialization import jsonutils from keystone.common import json_home import keystone.conf from keystone.server import flask as ks_flask CONF = keystone.conf.CONF MEDIA_TYPE_JSON = 'application/vnd.openstack.identity-%s+json' _DISCOVERY_BLUEPRINT = flask.Blueprint('Discovery', __name__) def _get_versions_list(identity_url): versions = {} versions['v3'] = { 'id': 'v3.14', 'status': 'stable', 'updated': '2020-04-07T00:00:00Z', 'links': [ { 'rel': 'self', 'href': identity_url, } ], 'media-types': [ {'base': 'application/json', 'type': MEDIA_TYPE_JSON % 'v3'} ], } return versions class MimeTypes: JSON = 'application/json' JSON_HOME = 'application/json-home' def v3_mime_type_best_match(): if not request.accept_mimetypes: return MimeTypes.JSON return request.accept_mimetypes.best_match( [MimeTypes.JSON, MimeTypes.JSON_HOME] ) @_DISCOVERY_BLUEPRINT.route('/') def get_versions(): if v3_mime_type_best_match() == MimeTypes.JSON_HOME: # RENDER JSON-Home form, we have a clever client who will # understand the JSON-Home document. v3_json_home = json_home.JsonHomeResources.resources() json_home.translate_urls(v3_json_home, '/v3') return flask.Response( response=jsonutils.dumps(v3_json_home), mimetype=MimeTypes.JSON_HOME, ) else: identity_url = '%s/' % ks_flask.base_url() versions = _get_versions_list(identity_url) # Set the preferred version to the latest "stable" version. # TODO(morgan): If we ever have more API versions find the latest # stable version instead of just using the "base_url", for now we # simply have a single version so use it as the preferred location. preferred_location = identity_url response = flask.Response( response=jsonutils.dumps( {'versions': {'values': list(versions.values())}} ), mimetype=MimeTypes.JSON, status=http.client.MULTIPLE_CHOICES, ) response.headers['Location'] = preferred_location return response @_DISCOVERY_BLUEPRINT.route('/v3') def get_version_v3(): if v3_mime_type_best_match() == MimeTypes.JSON_HOME: # RENDER JSON-Home form, we have a clever client who will # understand the JSON-Home document. content = json_home.JsonHomeResources.resources() return flask.Response( response=jsonutils.dumps(content), mimetype=MimeTypes.JSON_HOME ) else: identity_url = '%s/' % ks_flask.base_url() versions = _get_versions_list(identity_url) return flask.Response( response=jsonutils.dumps({'version': versions['v3']}), mimetype=MimeTypes.JSON, ) class DiscoveryAPI: # NOTE(morgan): The Discovery Bits are so special they cannot conform to # Flask-RESTful-isms. We are using straight flask Blueprint(s) here so that # we have a lot more control over what the heck is going on. This is just # a stub object to ensure we can load discovery in the same manner we # handle the rest of keystone.api @staticmethod def instantiate_and_register_to_app(flask_app): # This is a lot more magical than the normal setup as the discovery # API does not lean on flask-restful. We're statically building a # single blueprint here. flask_app.register_blueprint(_DISCOVERY_BLUEPRINT) APIs = (DiscoveryAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/domains.py0000664000175000017500000005026400000000000020110 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/domains import functools import http.client import flask import flask_restful from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation import keystone.conf from keystone import exception from keystone.resource import schema from keystone.server import flask as ks_flask CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs def _build_domain_enforcement_target(): target = {} try: target['domain'] = PROVIDERS.resource_api.get_domain( flask.request.view_args.get('domain_id') ) except exception.NotFound: # nosec # Defer existence in the event the domain doesn't exist, we'll # check this later anyway. pass return target def _build_enforcement_target(allow_non_existing=False): target = {} if flask.request.view_args: domain_id = flask.request.view_args.get('domain_id', None) if domain_id: target['domain'] = PROVIDERS.resource_api.get_domain(domain_id) role_id = flask.request.view_args.get('role_id', None) if role_id: target['role'] = PROVIDERS.role_api.get_role(role_id) if flask.request.view_args.get('user_id'): try: target['user'] = PROVIDERS.identity_api.get_user( flask.request.view_args['user_id'] ) except exception.UserNotFound: if not allow_non_existing: raise else: try: target['group'] = PROVIDERS.identity_api.get_group( flask.request.view_args.get('group_id') ) except exception.GroupNotFound: if not allow_non_existing: raise return target class DomainResource(ks_flask.ResourceBase): collection_key = 'domains' member_key = 'domain' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='resource_api', method='get_domain' ) def get(self, domain_id=None): """Get domain or list domains. GET/HEAD /v3/domains GET/HEAD /v3/domains/{domain_id} """ if domain_id is not None: return self._get_domain(domain_id) return self._list_domains() def _get_domain(self, domain_id): ENFORCER.enforce_call( action='identity:get_domain', build_target=_build_domain_enforcement_target, ) domain = PROVIDERS.resource_api.get_domain(domain_id) return self.wrap_member(domain) def _list_domains(self): filters = ['name', 'enabled'] target = None if self.oslo_context.domain_id: target = {'domain': {'id': self.oslo_context.domain_id}} ENFORCER.enforce_call( action='identity:list_domains', filters=filters, target_attr=target ) hints = self.build_driver_hints(filters) refs = PROVIDERS.resource_api.list_domains(hints=hints) if self.oslo_context.domain_id: domain_id = self.oslo_context.domain_id filtered_refs = [ref for ref in refs if ref['id'] == domain_id] else: filtered_refs = refs return self.wrap_collection(filtered_refs, hints=hints) def post(self): """Create domain. POST /v3/domains """ ENFORCER.enforce_call(action='identity:create_domain') domain = self.request_body_json.get('domain', {}) validation.lazy_validate(schema.domain_create, domain) domain_id = domain.get('explicit_domain_id') if domain_id is None: domain = self._assign_unique_id(domain) else: # Domain ID validation provided by PyCADF try: self._validate_id_format(domain_id) except ValueError: raise exception.DomainIdInvalid domain['id'] = domain_id domain = self._normalize_dict(domain) ref = PROVIDERS.resource_api.create_domain( domain['id'], domain, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def patch(self, domain_id): """Update domain. PATCH /v3/domains/{domain_id} """ ENFORCER.enforce_call(action='identity:update_domain') domain = self.request_body_json.get('domain', {}) validation.lazy_validate(schema.domain_update, domain) PROVIDERS.resource_api.get_domain(domain_id) ref = PROVIDERS.resource_api.update_domain( domain_id, domain, initiator=self.audit_initiator ) return self.wrap_member(ref) def delete(self, domain_id): """Delete domain. DELETE /v3/domains/{domain_id} """ ENFORCER.enforce_call(action='identity:delete_domain') PROVIDERS.resource_api.delete_domain( domain_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class DomainConfigBase(ks_flask.ResourceBase): member_key = 'config' def get(self, domain_id=None, group=None, option=None): """Check if config option exists. GET/HEAD /v3/domains/{domain_id}/config GET/HEAD /v3/domains/{domain_id}/config/{group} GET/HEAD /v3/domains/{domain_id}/config/{group}/{option} """ err = None config = {} try: PROVIDERS.resource_api.get_domain(domain_id) except Exception as e: # nosec # We don't raise out here, we raise out after enforcement, this # ensures we do not leak domain existance. err = e finally: if group and group == 'security_compliance': config = self._get_security_compliance_config( domain_id, group, option ) else: config = self._get_config(domain_id, group, option) if err is not None: raise err return {self.member_key: config} def _get_config(self, domain_id, group, option): ENFORCER.enforce_call(action='identity:get_domain_config') return PROVIDERS.domain_config_api.get_config( domain_id, group=group, option=option ) def _get_security_compliance_config(self, domain_id, group, option): ENFORCER.enforce_call( action='identity:get_security_compliance_domain_config' ) return PROVIDERS.domain_config_api.get_security_compliance_config( domain_id, group, option=option ) def patch(self, domain_id=None, group=None, option=None): """Update domain config option. PATCH /v3/domains/{domain_id}/config PATCH /v3/domains/{domain_id}/config/{group} PATCH /v3/domains/{domain_id}/config/{group}/{option} """ ENFORCER.enforce_call(action='identity:update_domain_config') PROVIDERS.resource_api.get_domain(domain_id) config = self.request_body_json.get('config', {}) ref = PROVIDERS.domain_config_api.update_config( domain_id, config, group, option=option ) return {self.member_key: ref} def delete(self, domain_id=None, group=None, option=None): """Delete domain config. DELETE /v3/domains/{domain_id}/config DELETE /v3/domains/{domain_id}/config/{group} DELETE /v3/domains/{domain_id}/config/{group}/{option} """ ENFORCER.enforce_call(action='identity:delete_domain_config') PROVIDERS.resource_api.get_domain(domain_id) PROVIDERS.domain_config_api.delete_config( domain_id, group, option=option ) return None, http.client.NO_CONTENT class DomainConfigResource(DomainConfigBase): """Provides config routing functionality. This class leans on DomainConfigBase to provide the following APIs: GET/HEAD /v3/domains/{domain_id}/config PATCH /v3/domains/{domain_id}/config DELETE /v3/domains/{domain_id}/config """ def put(self, domain_id): """Create domain config. PUT /v3/domains/{domain_id}/config """ ENFORCER.enforce_call(action='identity:create_domain_config') PROVIDERS.resource_api.get_domain(domain_id) config = self.request_body_json.get('config', {}) original_config = ( PROVIDERS.domain_config_api.get_config_with_sensitive_info( domain_id ) ) ref = PROVIDERS.domain_config_api.create_config(domain_id, config) if original_config: return {self.member_key: ref} else: return {self.member_key: ref}, http.client.CREATED class DomainConfigGroupResource(DomainConfigBase): """Provides config group routing functionality. This class leans on DomainConfigBase to provide the following APIs: GET/HEAD /v3/domains/{domain_id}/config/{group} PATCH /v3/domains/{domain_id}/config/{group} DELETE /v3/domains/{domain_id}/config/{group} """ class DomainConfigOptionResource(DomainConfigBase): """Provides config option routing functionality. This class leans on DomainConfigBase to provide the following APIs: GET/HEAD /v3/domains/{domain_id}/config/{group}/{option} PATCH /v3/domains/{domain_id}/config/{group}/{option} DELETE /v3/domains/{domain_id}/config/{group}/{option} """ class DefaultConfigResource(flask_restful.Resource): def get(self): """Get default domain config. GET/HEAD /v3/domains/config/default """ ENFORCER.enforce_call(action='identity:get_domain_config_default') ref = PROVIDERS.domain_config_api.get_config_default() return {'config': ref} class DefaultConfigGroupResource(flask_restful.Resource): def get(self, group=None): """Get default domain group config. GET/HEAD /v3/domains/config/{group}/default """ ENFORCER.enforce_call(action='identity:get_domain_config_default') ref = PROVIDERS.domain_config_api.get_config_default(group=group) return {'config': ref} class DefaultConfigOptionResource(flask_restful.Resource): def get(self, group=None, option=None): """Get default domain group option config. GET/HEAD /v3/domains/config/{group}/{option}/default """ ENFORCER.enforce_call(action='identity:get_domain_config_default') ref = PROVIDERS.domain_config_api.get_config_default( group=group, option=option ) return {'config': ref} class DomainUserListResource(flask_restful.Resource): def get(self, domain_id=None, user_id=None): """Get user grant. GET/HEAD /v3/domains/{domain_id}/users/{user_id}/roles """ ENFORCER.enforce_call( action='identity:list_grants', build_target=_build_enforcement_target, ) refs = PROVIDERS.assignment_api.list_grants( domain_id=domain_id, user_id=user_id, inherited_to_projects=False ) return ks_flask.ResourceBase.wrap_collection( refs, collection_name='roles' ) class DomainUserResource(ks_flask.ResourceBase): member_key = 'grant' collection_key = 'grants' def get(self, domain_id=None, user_id=None, role_id=None): """Check if a user has a specific role on the domain. GET/HEAD /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:check_grant', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.get_grant( role_id, domain_id=domain_id, user_id=user_id, inherited_to_projects=False, ) return None, http.client.NO_CONTENT def put(self, domain_id=None, user_id=None, role_id=None): """Create a role to a user on a domain. PUT /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:create_grant', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.create_grant( role_id, domain_id=domain_id, user_id=user_id, inherited_to_projects=False, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT def delete(self, domain_id=None, user_id=None, role_id=None): """Revoke a role from user on a domain. DELETE /v3/domains/{domain_id}/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( _build_enforcement_target, allow_non_existing=True ), ) PROVIDERS.assignment_api.delete_grant( role_id, domain_id=domain_id, user_id=user_id, inherited_to_projects=False, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT class DomainGroupListResource(flask_restful.Resource): def get(self, domain_id=None, group_id=None): """List all domain grats for a specific group. GET/HEAD /v3/domains/{domain_id}/groups/{group_id}/roles """ ENFORCER.enforce_call( action='identity:list_grants', build_target=_build_enforcement_target, ) refs = PROVIDERS.assignment_api.list_grants( domain_id=domain_id, group_id=group_id, inherited_to_projects=False ) return ks_flask.ResourceBase.wrap_collection( refs, collection_name='roles' ) class DomainGroupResource(ks_flask.ResourceBase): member_key = 'grant' collection_key = 'grants' def get(self, domain_id=None, group_id=None, role_id=None): """Check if a group has a specific role on a domain. GET/HEAD /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:check_grant', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.get_grant( role_id, domain_id=domain_id, group_id=group_id, inherited_to_projects=False, ) return None, http.client.NO_CONTENT def put(self, domain_id=None, group_id=None, role_id=None): """Grant a role to a group on a domain. PUT /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:create_grant', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.create_grant( role_id, domain_id=domain_id, group_id=group_id, inherited_to_projects=False, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT def delete(self, domain_id=None, group_id=None, role_id=None): """Revoke a role from a group on a domain. DELETE /v3/domains/{domain_id}/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( _build_enforcement_target, allow_non_existing=True ), ) PROVIDERS.assignment_api.delete_grant( role_id, domain_id=domain_id, group_id=group_id, inherited_to_projects=False, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT class DomainAPI(ks_flask.APIBase): CONFIG_GROUP = json_home.build_v3_parameter_relation('config_group') CONFIG_OPTION = json_home.build_v3_parameter_relation('config_option') _name = 'domains' _import_name = __name__ resources = [DomainResource] resource_mapping = [ ks_flask.construct_resource_map( resource=DomainConfigResource, url=('/domains//config'), resource_kwargs={}, rel='domain_config', path_vars={'domain_id': json_home.Parameters.DOMAIN_ID}, ), ks_flask.construct_resource_map( resource=DomainConfigGroupResource, url='/domains//config/', resource_kwargs={}, rel='domain_config_group', path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': CONFIG_GROUP, }, ), ks_flask.construct_resource_map( resource=DomainConfigOptionResource, url=( '/domains//config/' '/' ), resource_kwargs={}, rel='domain_config_option', path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': CONFIG_GROUP, 'option': CONFIG_OPTION, }, ), ks_flask.construct_resource_map( resource=DefaultConfigResource, url=('/domains/config/default'), resource_kwargs={}, rel='domain_config_default', path_vars={}, ), ks_flask.construct_resource_map( resource=DefaultConfigGroupResource, url='/domains/config//default', resource_kwargs={}, rel='domain_config_default_group', path_vars={'group': CONFIG_GROUP}, ), ks_flask.construct_resource_map( resource=DefaultConfigOptionResource, url=('/domains/config///default'), resource_kwargs={}, rel='domain_config_default_option', path_vars={'group': CONFIG_GROUP, 'option': CONFIG_OPTION}, ), ks_flask.construct_resource_map( resource=DomainUserListResource, url=('/domains//users//roles'), resource_kwargs={}, rel='domain_user_roles', path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=DomainUserResource, url=( '/domains//users' '//roles/' ), resource_kwargs={}, rel='domain_user_role', path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ks_flask.construct_resource_map( resource=DomainGroupListResource, url=('/domains//groups//roles'), resource_kwargs={}, rel='domain_group_roles', path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, }, ), ks_flask.construct_resource_map( resource=DomainGroupResource, url=( '/domains//groups' '//roles/' ), resource_kwargs={}, rel='domain_group_role', path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ] APIs = (DomainAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/ec2tokens.py0000664000175000017500000000672100000000000020352 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/ec2tokens import http.client import urllib.parse import flask from keystoneclient.contrib.ec2 import utils as ec2_utils from oslo_serialization import jsonutils from keystone.api._shared import EC2_S3_Resource from keystone.api._shared import json_home_relations from keystone.common import render_token from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask CRED_TYPE_EC2 = 'ec2' class EC2TokensResource(EC2_S3_Resource.ResourceBase): @staticmethod def _check_signature(creds_ref, credentials): signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) # NOTE(davechecn): credentials.get('signature') is not guaranteed to # exist, we need to check it explicitly. if credentials.get('signature'): if utils.auth_str_equal(credentials['signature'], signature): return True # NOTE(vish): Some client libraries don't use the port when # signing requests, so try again without the port. elif ':' in credentials['host']: parsed = urllib.parse.urlsplit('//' + credentials['host']) credentials['host'] = parsed.hostname # NOTE(davechen): we need to reinitialize 'signer' to avoid # contaminated status of signature, this is similar with # other programming language libraries, JAVA for example. signer = ec2_utils.Ec2Signer(creds_ref['secret']) signature = signer.generate(credentials) if utils.auth_str_equal(credentials['signature'], signature): return True raise exception.Unauthorized(_('Invalid EC2 signature.')) # Raise the exception when credentials.get('signature') is None else: raise exception.Unauthorized(_('EC2 signature not supplied.')) @ks_flask.unenforced_api def post(self): """Authenticate ec2 token. POST /v3/ec2tokens """ token = self.handle_authenticate() token_reference = render_token.render_token_response_from_model(token) resp_body = jsonutils.dumps(token_reference) response = flask.make_response(resp_body, http.client.OK) response.headers['X-Subject-Token'] = token.id response.headers['Content-Type'] = 'application/json' return response class EC2TokensAPI(ks_flask.APIBase): _name = 'ec2tokens' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=EC2TokensResource, url='/ec2tokens', resource_kwargs={}, rel='ec2tokens', resource_relation_func=( json_home_relations.os_ec2_resource_rel_func ), ) ] APIs = (EC2TokensAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/endpoints.py0000664000175000017500000001342700000000000020461 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/services import http.client import flask_restful from keystone.api._shared import json_home_relations from keystone.catalog import schema from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import utils from keystone.common import validation from keystone import exception from keystone import notifications from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs _resource_rel_func = json_home_relations.os_endpoint_policy_resource_rel_func def _filter_endpoint(ref): ref.pop('legacy_endpoint_id', None) ref['region'] = ref['region_id'] return ref class EndpointResource(ks_flask.ResourceBase): collection_key = 'endpoints' member_key = 'endpoint' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='catalog_api', method='get_endpoint' ) @staticmethod def _validate_endpoint_region(endpoint): """Ensure the region for the endpoint exists. If 'region_id' is used to specify the region, then we will let the manager/driver take care of this. If, however, 'region' is used, then for backward compatibility, we will auto-create the region. """ if ( endpoint.get('region_id') is None and endpoint.get('region') is not None ): # To maintain backward compatibility with clients that are # using the v3 API in the same way as they used the v2 API, # create the endpoint region, if that region does not exist # in keystone. endpoint['region_id'] = endpoint.pop('region') try: PROVIDERS.catalog_api.get_region(endpoint['region_id']) except exception.RegionNotFound: region = dict(id=endpoint['region_id']) PROVIDERS.catalog_api.create_region( region, initiator=notifications.build_audit_initiator() ) return endpoint def _get_endpoint(self, endpoint_id): ENFORCER.enforce_call(action='identity:get_endpoint') return self.wrap_member( _filter_endpoint(PROVIDERS.catalog_api.get_endpoint(endpoint_id)) ) def _list_endpoints(self): filters = ['interface', 'service_id', 'region_id'] ENFORCER.enforce_call( action='identity:list_endpoints', filters=filters ) hints = self.build_driver_hints(filters) refs = PROVIDERS.catalog_api.list_endpoints(hints=hints) return self.wrap_collection( [_filter_endpoint(r) for r in refs], hints=hints ) def get(self, endpoint_id=None): if endpoint_id is not None: return self._get_endpoint(endpoint_id) return self._list_endpoints() def post(self): ENFORCER.enforce_call(action='identity:create_endpoint') endpoint = self.request_body_json.get('endpoint') validation.lazy_validate(schema.endpoint_create, endpoint) utils.check_endpoint_url(endpoint['url']) endpoint = self._assign_unique_id(self._normalize_dict(endpoint)) endpoint = self._validate_endpoint_region(endpoint) ref = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint, initiator=self.audit_initiator ) return self.wrap_member(_filter_endpoint(ref)), http.client.CREATED def patch(self, endpoint_id): ENFORCER.enforce_call(action='identity:update_endpoint') endpoint = self.request_body_json.get('endpoint') validation.lazy_validate(schema.endpoint_update, endpoint) self._require_matching_id(endpoint) endpoint = self._validate_endpoint_region(endpoint) ref = PROVIDERS.catalog_api.update_endpoint( endpoint_id, endpoint, initiator=self.audit_initiator ) return self.wrap_member(_filter_endpoint(ref)) def delete(self, endpoint_id): ENFORCER.enforce_call(action='identity:delete_endpoint') PROVIDERS.catalog_api.delete_endpoint( endpoint_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class EndpointPolicyEndpointResource(flask_restful.Resource): def get(self, endpoint_id): ENFORCER.enforce_call(action='identity:get_policy_for_endpoint') PROVIDERS.catalog_api.get_endpoint(endpoint_id) ref = PROVIDERS.endpoint_policy_api.get_policy_for_endpoint( endpoint_id ) return ks_flask.ResourceBase.wrap_member( ref, collection_name='endpoints', member_name='policy' ) class EndpointAPI(ks_flask.APIBase): _name = 'endpoints' _import_name = __name__ resources = [EndpointResource] resource_mapping = [ ks_flask.construct_resource_map( resource=EndpointPolicyEndpointResource, url='/endpoints//OS-ENDPOINT-POLICY/policy', resource_kwargs={}, rel='endpoint_policy', resource_relation_func=_resource_rel_func, path_vars={'endpoint_id': json_home.Parameters.ENDPOINT_ID}, ) ] APIs = (EndpointAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/groups.py0000664000175000017500000002124200000000000017767 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/groups import functools import http.client import flask import flask_restful from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation import keystone.conf from keystone import exception from keystone.identity import schema from keystone import notifications from keystone.server import flask as ks_flask CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs def _build_group_target_enforcement(): target = {} try: target['group'] = PROVIDERS.identity_api.get_group( flask.request.view_args.get('group_id') ) except exception.NotFound: # nosec # Defer existance in the event the group doesn't exist, we'll # check this later anyway. pass return target class GroupsResource(ks_flask.ResourceBase): collection_key = 'groups' member_key = 'group' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='identity_api', method='get_group' ) def get(self, group_id=None): if group_id is not None: return self._get_group(group_id) return self._list_groups() def _get_group(self, group_id): """Get a group reference. GET/HEAD /groups/{group_id} """ ENFORCER.enforce_call( action='identity:get_group', build_target=_build_group_target_enforcement, ) return self.wrap_member(PROVIDERS.identity_api.get_group(group_id)) def _list_groups(self): """List groups. GET/HEAD /groups """ filters = ['domain_id', 'name'] target = None if self.oslo_context.domain_id: target = {'group': {'domain_id': self.oslo_context.domain_id}} ENFORCER.enforce_call( action='identity:list_groups', filters=filters, target_attr=target ) hints = self.build_driver_hints(filters) domain = self._get_domain_id_for_list_request() refs = PROVIDERS.identity_api.list_groups( domain_scope=domain, hints=hints ) if self.oslo_context.domain_id: filtered_refs = [] for ref in refs: if ref['domain_id'] == target['group']['domain_id']: filtered_refs.append(ref) refs = filtered_refs return self.wrap_collection(refs, hints=hints) def post(self): """Create group. POST /groups """ group = self.request_body_json.get('group', {}) target = {'group': group} ENFORCER.enforce_call( action='identity:create_group', target_attr=target ) validation.lazy_validate(schema.group_create, group) group = self._normalize_dict(group) group = self._normalize_domain_id(group) ref = PROVIDERS.identity_api.create_group( group, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def patch(self, group_id): """Update group. PATCH /groups/{group_id} """ ENFORCER.enforce_call( action='identity:update_group', build_target=_build_group_target_enforcement, ) group = self.request_body_json.get('group', {}) validation.lazy_validate(schema.group_update, group) self._require_matching_id(group) ref = PROVIDERS.identity_api.update_group( group_id, group, initiator=self.audit_initiator ) return self.wrap_member(ref) def delete(self, group_id): """Delete group. DELETE /groups/{group_id} """ ENFORCER.enforce_call(action='identity:delete_group') PROVIDERS.identity_api.delete_group( group_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class GroupUsersResource(ks_flask.ResourceBase): def get(self, group_id): """Get list of users in group. GET/HEAD /groups/{group_id}/users """ filters = ['domain_id', 'enabled', 'name', 'password_expires_at'] target = None try: target = {'group': PROVIDERS.identity_api.get_group(group_id)} except exception.GroupNotFound: # NOTE(morgan): If we have an issue populating the group # data, leage target empty. This is the safest route and does not # leak data before enforcement happens. pass ENFORCER.enforce_call( action='identity:list_users_in_group', target_attr=target, filters=filters, ) hints = ks_flask.ResourceBase.build_driver_hints(filters) refs = PROVIDERS.identity_api.list_users_in_group( group_id, hints=hints ) if self.oslo_context.domain_id: filtered_refs = [] for ref in refs: if ref['domain_id'] == self.oslo_context.domain_id: filtered_refs.append(ref) refs = filtered_refs return ks_flask.ResourceBase.wrap_collection( refs, hints=hints, collection_name='users' ) class UserGroupCRUDResource(flask_restful.Resource): @staticmethod def _build_enforcement_target_attr(user_id, group_id): target = {} try: target['group'] = PROVIDERS.identity_api.get_group(group_id) except exception.GroupNotFound: # Don't populate group data if group is not found. pass try: target['user'] = PROVIDERS.identity_api.get_user(user_id) except exception.UserNotFound: # Don't populate user data if user is not found pass return target def get(self, group_id, user_id): """Check if a user is in a group. GET/HEAD /groups/{group_id}/users/{user_id} """ ENFORCER.enforce_call( action='identity:check_user_in_group', build_target=functools.partial( self._build_enforcement_target_attr, user_id, group_id ), ) PROVIDERS.identity_api.check_user_in_group(user_id, group_id) return None, http.client.NO_CONTENT def put(self, group_id, user_id): """Add user to group. PUT /groups/{group_id}/users/{user_id} """ ENFORCER.enforce_call( action='identity:add_user_to_group', build_target=functools.partial( self._build_enforcement_target_attr, user_id, group_id ), ) PROVIDERS.identity_api.add_user_to_group( user_id, group_id, initiator=notifications.build_audit_initiator() ) return None, http.client.NO_CONTENT def delete(self, group_id, user_id): """Remove user from group. DELETE /groups/{group_id}/users/{user_id} """ ENFORCER.enforce_call( action='identity:remove_user_from_group', build_target=functools.partial( self._build_enforcement_target_attr, user_id, group_id ), ) PROVIDERS.identity_api.remove_user_from_group( user_id, group_id, initiator=notifications.build_audit_initiator() ) return None, http.client.NO_CONTENT class GroupAPI(ks_flask.APIBase): _name = 'groups' _import_name = __name__ resources = [GroupsResource] resource_mapping = [ ks_flask.construct_resource_map( resource=GroupUsersResource, url='/groups//users', resource_kwargs={}, rel='group_users', path_vars={'group_id': json_home.Parameters.GROUP_ID}, ), ks_flask.construct_resource_map( resource=UserGroupCRUDResource, url='/groups//users/', resource_kwargs={}, rel='group_user', path_vars={ 'group_id': json_home.Parameters.GROUP_ID, 'user_id': json_home.Parameters.USER_ID, }, ), ] APIs = (GroupAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/limits.py0000664000175000017500000001315300000000000017753 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/limits import http.client import flask import flask_restful from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation from keystone import exception from keystone.limit import schema from keystone.server import flask as ks_flask PROVIDERS = provider_api.ProviderAPIs ENFORCER = rbac_enforcer.RBACEnforcer def _build_limit_enforcement_target(): target = {} try: limit = PROVIDERS.unified_limit_api.get_limit( flask.request.view_args.get('limit_id') ) target['limit'] = limit if limit.get('project_id'): project = PROVIDERS.resource_api.get_project(limit['project_id']) target['limit']['project'] = project elif limit.get('domain_id'): domain = PROVIDERS.resource_api.get_domain(limit['domain_id']) target['limit']['domain'] = domain except exception.NotFound: # nosec # Defer the existence check in the event the limit doesn't exist, this # is checked later anyway. pass return target class LimitsResource(ks_flask.ResourceBase): collection_key = 'limits' member_key = 'limit' json_home_resource_status = json_home.Status.EXPERIMENTAL get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='unified_limit_api', method='get_limit' ) def _list_limits(self): filters = [ 'service_id', 'region_id', 'resource_name', 'project_id', 'domain_id', ] ENFORCER.enforce_call(action='identity:list_limits', filters=filters) hints = self.build_driver_hints(filters) filtered_refs = [] if self.oslo_context.system_scope: refs = PROVIDERS.unified_limit_api.list_limits(hints) filtered_refs = refs elif self.oslo_context.domain_id: refs = PROVIDERS.unified_limit_api.list_limits(hints) projects = PROVIDERS.resource_api.list_projects_in_domain( self.oslo_context.domain_id ) project_ids = [project['id'] for project in projects] for limit in refs: if limit.get('project_id'): if limit['project_id'] in project_ids: filtered_refs.append(limit) elif limit.get('domain_id'): if limit['domain_id'] == self.oslo_context.domain_id: filtered_refs.append(limit) elif self.oslo_context.project_id: hints.add_filter('project_id', self.oslo_context.project_id) refs = PROVIDERS.unified_limit_api.list_limits(hints) filtered_refs = refs return self.wrap_collection(filtered_refs, hints=hints) def _get_limit(self, limit_id): ENFORCER.enforce_call( action='identity:get_limit', build_target=_build_limit_enforcement_target, ) ref = PROVIDERS.unified_limit_api.get_limit(limit_id) return self.wrap_member(ref) def get(self, limit_id=None): if limit_id is not None: return self._get_limit(limit_id) return self._list_limits() def post(self): ENFORCER.enforce_call(action='identity:create_limits') limits_b = (flask.request.get_json(silent=True, force=True) or {}).get( 'limits', {} ) validation.lazy_validate(schema.limit_create, limits_b) limits = [ self._assign_unique_id(self._normalize_dict(limit)) for limit in limits_b ] refs = PROVIDERS.unified_limit_api.create_limits(limits) refs = self.wrap_collection(refs) refs.pop('links') return refs, http.client.CREATED def patch(self, limit_id): ENFORCER.enforce_call(action='identity:update_limit') limit = (flask.request.get_json(silent=True, force=True) or {}).get( 'limit', {} ) validation.lazy_validate(schema.limit_update, limit) self._require_matching_id(limit) ref = PROVIDERS.unified_limit_api.update_limit(limit_id, limit) return self.wrap_member(ref) def delete(self, limit_id): ENFORCER.enforce_call(action='identity:delete_limit') return ( PROVIDERS.unified_limit_api.delete_limit(limit_id), http.client.NO_CONTENT, ) class LimitModelResource(flask_restful.Resource): def get(self): ENFORCER.enforce_call(action='identity:get_limit_model') model = PROVIDERS.unified_limit_api.get_model() return {'model': model} class LimitsAPI(ks_flask.APIBase): _name = 'limits' _import_name = __name__ resources = [LimitsResource] resource_mapping = [ ks_flask.construct_resource_map( resource=LimitModelResource, resource_kwargs={}, url='/limits/model', rel='limit_model', status=json_home.Status.EXPERIMENTAL, ) ] APIs = (LimitsAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/os_ep_filter.py0000664000175000017500000003303300000000000021123 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /OS-EP-FILTER import http.client import flask_restful from keystone.api._shared import json_home_relations from keystone.api import endpoints as _endpoints_api from keystone.catalog import schema from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs _build_resource_relation = json_home_relations.os_ep_filter_resource_rel_func _build_parameter_relation = json_home_relations.os_ep_filter_parameter_rel_func _ENDPOINT_GROUP_PARAMETER_RELATION = _build_parameter_relation( parameter_name='endpoint_group_id' ) # NOTE(morgan): This is shared from keystone.api.endpoint, this is a special # case where cross-api code is used. This pattern should not be replicated. _filter_endpoint = _endpoints_api._filter_endpoint class EndpointGroupsResource(ks_flask.ResourceBase): collection_key = 'endpoint_groups' member_key = 'endpoint_group' api_prefix = '/OS-EP-FILTER' json_home_resource_rel_func = _build_resource_relation json_home_parameter_rel_func = _build_parameter_relation @staticmethod def _require_valid_filter(endpoint_group): valid_filter_keys = ['service_id', 'region_id', 'interface'] filters = endpoint_group.get('filters') for key in filters.keys(): if key not in valid_filter_keys: raise exception.ValidationError( attribute=' or '.join(valid_filter_keys), target='endpoint_group', ) def _get_endpoint_group(self, endpoint_group_id): ENFORCER.enforce_call(action='identity:get_endpoint_group') return self.wrap_member( PROVIDERS.catalog_api.get_endpoint_group(endpoint_group_id) ) def _list_endpoint_groups(self): filters = 'name' ENFORCER.enforce_call( action='identity:list_endpoint_groups', filters=filters ) hints = self.build_driver_hints(filters) refs = PROVIDERS.catalog_api.list_endpoint_groups(hints) return self.wrap_collection(refs, hints=hints) def get(self, endpoint_group_id=None): if endpoint_group_id is not None: return self._get_endpoint_group(endpoint_group_id) return self._list_endpoint_groups() def post(self): ENFORCER.enforce_call(action='identity:create_endpoint_group') ep_group = self.request_body_json.get('endpoint_group', {}) validation.lazy_validate(schema.endpoint_group_create, ep_group) if not ep_group.get('filters'): # TODO(morgan): Make this not require substitution. Substitution is # done here due to String Freeze in the Rocky release. msg = _('%s field is required and cannot be empty') % 'filters' raise exception.ValidationError(message=msg) self._require_valid_filter(ep_group) ep_group = self._assign_unique_id(ep_group) return ( self.wrap_member( PROVIDERS.catalog_api.create_endpoint_group( ep_group['id'], ep_group ) ), http.client.CREATED, ) def patch(self, endpoint_group_id): ENFORCER.enforce_call(action='identity:update_endpoint_group') ep_group = self.request_body_json.get('endpoint_group', {}) validation.lazy_validate(schema.endpoint_group_update, ep_group) if 'filters' in ep_group: self._require_valid_filter(ep_group) self._require_matching_id(ep_group) return self.wrap_member( PROVIDERS.catalog_api.update_endpoint_group( endpoint_group_id, ep_group ) ) def delete(self, endpoint_group_id): ENFORCER.enforce_call(action='identity:delete_endpoint_group') return ( PROVIDERS.catalog_api.delete_endpoint_group(endpoint_group_id), http.client.NO_CONTENT, ) class EPFilterEndpointProjectsResource(flask_restful.Resource): def get(self, endpoint_id): """Return a list of projects associated with the endpoint.""" ENFORCER.enforce_call(action='identity:list_projects_for_endpoint') PROVIDERS.catalog_api.get_endpoint(endpoint_id) refs = PROVIDERS.catalog_api.list_projects_for_endpoint(endpoint_id) projects = [ PROVIDERS.resource_api.get_project(ref['project_id']) for ref in refs ] return ks_flask.ResourceBase.wrap_collection( projects, collection_name='projects' ) class EPFilterProjectsEndpointsResource(flask_restful.Resource): def get(self, project_id, endpoint_id): ENFORCER.enforce_call(action='identity:check_endpoint_in_project') PROVIDERS.catalog_api.get_endpoint(endpoint_id) PROVIDERS.resource_api.get_project(project_id) PROVIDERS.catalog_api.check_endpoint_in_project( endpoint_id, project_id ) return None, http.client.NO_CONTENT def put(self, project_id, endpoint_id): ENFORCER.enforce_call(action='identity:add_endpoint_to_project') PROVIDERS.catalog_api.get_endpoint(endpoint_id) PROVIDERS.resource_api.get_project(project_id) PROVIDERS.catalog_api.add_endpoint_to_project(endpoint_id, project_id) return None, http.client.NO_CONTENT def delete(self, project_id, endpoint_id): ENFORCER.enforce_call(action='identity:remove_endpoint_from_project') return ( PROVIDERS.catalog_api.remove_endpoint_from_project( endpoint_id, project_id ), http.client.NO_CONTENT, ) class EPFilterProjectEndpointsListResource(flask_restful.Resource): def get(self, project_id): ENFORCER.enforce_call(action='identity:list_endpoints_for_project') PROVIDERS.resource_api.get_project(project_id) filtered_endpoints = PROVIDERS.catalog_api.list_endpoints_for_project( project_id ) return ks_flask.ResourceBase.wrap_collection( [_filter_endpoint(v) for v in filtered_endpoints.values()], collection_name='endpoints', ) class EndpointFilterProjectEndpointGroupsListResource(flask_restful.Resource): def get(self, project_id): ENFORCER.enforce_call( action='identity:list_endpoint_groups_for_project' ) return EndpointGroupsResource.wrap_collection( PROVIDERS.catalog_api.get_endpoint_groups_for_project(project_id) ) class EndpointFilterEPGroupsProjects(flask_restful.Resource): def get(self, endpoint_group_id): ENFORCER.enforce_call( action='identity:list_projects_associated_with_endpoint_group' ) endpoint_group_refs = ( PROVIDERS.catalog_api.list_projects_associated_with_endpoint_group( endpoint_group_id ) ) projects = [] for endpoint_group_ref in endpoint_group_refs: project = PROVIDERS.resource_api.get_project( endpoint_group_ref['project_id'] ) if project: projects.append(project) return ks_flask.ResourceBase.wrap_collection( projects, collection_name='projects' ) class EndpointFilterEPGroupsEndpoints(flask_restful.Resource): def get(self, endpoint_group_id): ENFORCER.enforce_call( action='identity:list_endpoints_associated_with_endpoint_group' ) filtered_endpoints = ( PROVIDERS.catalog_api.get_endpoints_filtered_by_endpoint_group( endpoint_group_id ) ) return ks_flask.ResourceBase.wrap_collection( [_filter_endpoint(e) for e in filtered_endpoints], collection_name='endpoints', ) class EPFilterGroupsProjectsResource(ks_flask.ResourceBase): collection_key = 'project_endpoint_groups' member_key = 'project_endpoint_group' @classmethod def _add_self_referential_link(cls, ref, collection_name=None): url = ( '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/projects/%(project_id)s' % { 'endpoint_group_id': ref['endpoint_group_id'], 'project_id': ref['project_id'], } ) ref.setdefault('links', {}) ref['links']['self'] = url def get(self, endpoint_group_id, project_id): ENFORCER.enforce_call(action='identity:get_endpoint_group_in_project') PROVIDERS.resource_api.get_project(project_id) PROVIDERS.catalog_api.get_endpoint_group(endpoint_group_id) ref = PROVIDERS.catalog_api.get_endpoint_group_in_project( endpoint_group_id, project_id ) return self.wrap_member(ref) def put(self, endpoint_group_id, project_id): ENFORCER.enforce_call(action='identity:add_endpoint_group_to_project') PROVIDERS.resource_api.get_project(project_id) PROVIDERS.catalog_api.get_endpoint_group(endpoint_group_id) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group_id, project_id ) return None, http.client.NO_CONTENT def delete(self, endpoint_group_id, project_id): ENFORCER.enforce_call( action='identity:remove_endpoint_group_from_project' ) PROVIDERS.resource_api.get_project(project_id) PROVIDERS.catalog_api.get_endpoint_group(endpoint_group_id) PROVIDERS.catalog_api.remove_endpoint_group_from_project( endpoint_group_id, project_id ) return None, http.client.NO_CONTENT class EPFilterAPI(ks_flask.APIBase): _name = 'OS-EP-FILTER' _import_name = __name__ _api_url_prefix = '/OS-EP-FILTER' resources = [EndpointGroupsResource] resource_mapping = [ ks_flask.construct_resource_map( resource=EPFilterEndpointProjectsResource, url='/endpoints//projects', resource_kwargs={}, rel='endpoint_projects', resource_relation_func=_build_resource_relation, path_vars={'endpoint_id': json_home.Parameters.ENDPOINT_ID}, ), ks_flask.construct_resource_map( resource=EPFilterProjectsEndpointsResource, url='/projects//endpoints/', resource_kwargs={}, rel='project_endpoint', resource_relation_func=_build_resource_relation, path_vars={ 'endpoint_id': json_home.Parameters.ENDPOINT_ID, 'project_id': json_home.Parameters.PROJECT_ID, }, ), ks_flask.construct_resource_map( resource=EPFilterProjectEndpointsListResource, url='/projects//endpoints', resource_kwargs={}, rel='project_endpoints', resource_relation_func=_build_resource_relation, path_vars={'project_id': json_home.Parameters.PROJECT_ID}, ), ks_flask.construct_resource_map( resource=EndpointFilterProjectEndpointGroupsListResource, url='/projects//endpoint_groups', resource_kwargs={}, rel='project_endpoint_groups', resource_relation_func=_build_resource_relation, path_vars={'project_id': json_home.Parameters.PROJECT_ID}, ), ks_flask.construct_resource_map( resource=EndpointFilterEPGroupsEndpoints, url='/endpoint_groups//endpoints', resource_kwargs={}, rel='endpoints_in_endpoint_group', resource_relation_func=_build_resource_relation, path_vars={ 'endpoint_group_id': _ENDPOINT_GROUP_PARAMETER_RELATION }, ), ks_flask.construct_resource_map( resource=EndpointFilterEPGroupsProjects, url='/endpoint_groups//projects', resource_kwargs={}, rel='projects_associated_with_endpoint_group', resource_relation_func=_build_resource_relation, path_vars={ 'endpoint_group_id': _ENDPOINT_GROUP_PARAMETER_RELATION }, ), ks_flask.construct_resource_map( resource=EPFilterGroupsProjectsResource, url=( '/endpoint_groups//projects/' '' ), resource_kwargs={}, rel='endpoint_group_to_project_association', resource_relation_func=_build_resource_relation, path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'endpoint_group_id': _ENDPOINT_GROUP_PARAMETER_RELATION, }, ), ] APIs = (EPFilterAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/os_federation.py0000664000175000017500000004733400000000000021303 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/OS-FEDERATION import http.client import flask import flask_restful from oslo_log import log from oslo_serialization import jsonutils from keystone.api._shared import authentication from keystone.api._shared import json_home_relations from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import render_token from keystone.common import validation import keystone.conf from keystone import exception from keystone.federation import schema from keystone.federation import utils from keystone.server import flask as ks_flask LOG = log.getLogger(__name__) CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs _build_param_relation = json_home_relations.os_federation_parameter_rel_func _build_resource_relation = json_home_relations.os_federation_resource_rel_func IDP_ID_PARAMETER_RELATION = _build_param_relation(parameter_name='idp_id') PROTOCOL_ID_PARAMETER_RELATION = _build_param_relation( parameter_name='protocol_id' ) SP_ID_PARAMETER_RELATION = _build_param_relation(parameter_name='sp_id') def _combine_lists_uniquely(a, b): # it's most likely that only one of these will be filled so avoid # the combination if possible. if a and b: return {x['id']: x for x in a + b}.values() else: return a or b class _ResourceBase(ks_flask.ResourceBase): json_home_resource_rel_func = _build_resource_relation json_home_parameter_rel_func = _build_param_relation @classmethod def wrap_member(cls, ref, collection_name=None, member_name=None): cls._add_self_referential_link(ref, collection_name) cls._add_related_links(ref) return {member_name or cls.member_key: cls.filter_params(ref)} @staticmethod def _add_related_links(ref): # Do Nothing, This is in support of child class mechanisms. pass class IdentityProvidersResource(_ResourceBase): collection_key = 'identity_providers' member_key = 'identity_provider' api_prefix = '/OS-FEDERATION' _public_parameters = frozenset( [ 'id', 'enabled', 'description', 'remote_ids', 'links', 'domain_id', 'authorization_ttl', ] ) _id_path_param_name_override = 'idp_id' @staticmethod def _add_related_links(ref): """Add URLs for entities related with Identity Provider. Add URLs pointing to: - protocols tied to the Identity Provider """ base_path = ref['links'].get('self') if base_path is None: base_path = '/'.join(ks_flask.base_url(path='/%s' % ref['id'])) for name in ['protocols']: ref['links'][name] = '/'.join([base_path, name]) def get(self, idp_id=None): if idp_id is not None: return self._get_idp(idp_id) return self._list_idps() def _get_idp(self, idp_id): """Get an IDP resource. GET/HEAD /OS-FEDERATION/identity_providers/{idp_id} """ ENFORCER.enforce_call(action='identity:get_identity_provider') ref = PROVIDERS.federation_api.get_idp(idp_id) return self.wrap_member(ref) def _list_idps(self): """List all identity providers. GET/HEAD /OS-FEDERATION/identity_providers """ filters = ['id', 'enabled'] ENFORCER.enforce_call( action='identity:list_identity_providers', filters=filters ) hints = self.build_driver_hints(filters) refs = PROVIDERS.federation_api.list_idps(hints=hints) refs = [self.filter_params(r) for r in refs] collection = self.wrap_collection(refs, hints=hints) for r in collection[self.collection_key]: # Add the related links explicitly self._add_related_links(r) return collection def put(self, idp_id): """Create an idp resource for federated authentication. PUT /OS-FEDERATION/identity_providers/{idp_id} """ ENFORCER.enforce_call(action='identity:create_identity_provider') idp = self.request_body_json.get('identity_provider', {}) validation.lazy_validate(schema.identity_provider_create, idp) idp = self._normalize_dict(idp) idp.setdefault('enabled', False) idp_ref = PROVIDERS.federation_api.create_idp(idp_id, idp) return self.wrap_member(idp_ref), http.client.CREATED def patch(self, idp_id): ENFORCER.enforce_call(action='identity:update_identity_provider') idp = self.request_body_json.get('identity_provider', {}) validation.lazy_validate(schema.identity_provider_update, idp) idp = self._normalize_dict(idp) idp_ref = PROVIDERS.federation_api.update_idp(idp_id, idp) return self.wrap_member(idp_ref) def delete(self, idp_id): ENFORCER.enforce_call(action='identity:delete_identity_provider') PROVIDERS.federation_api.delete_idp(idp_id) return None, http.client.NO_CONTENT class _IdentityProvidersProtocolsResourceBase(_ResourceBase): collection_key = 'protocols' member_key = 'protocol' _public_parameters = frozenset(['id', 'mapping_id', 'links']) json_home_additional_parameters = {'idp_id': IDP_ID_PARAMETER_RELATION} json_home_collection_resource_name_override = 'identity_provider_protocols' json_home_member_resource_name_override = 'identity_provider_protocol' @staticmethod def _add_related_links(ref): """Add new entries to the 'links' subdictionary in the response. Adds 'identity_provider' key with URL pointing to related identity provider as a value. :param ref: response dictionary """ ref.setdefault('links', {}) ref['links']['identity_provider'] = ks_flask.base_url( path=ref['idp_id'] ) class IDPProtocolsListResource(_IdentityProvidersProtocolsResourceBase): def get(self, idp_id): """List protocols for an IDP. HEAD/GET /OS-FEDERATION/identity_providers/{idp_id}/protocols """ ENFORCER.enforce_call(action='identity:list_protocols') protocol_refs = PROVIDERS.federation_api.list_protocols(idp_id) protocols = list(protocol_refs) collection = self.wrap_collection(protocols) for r in collection[self.collection_key]: # explicitly add related links self._add_related_links(r) return collection class IDPProtocolsCRUDResource(_IdentityProvidersProtocolsResourceBase): def get(self, idp_id, protocol_id): """Get protocols for an IDP. HEAD/GET /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id} """ ENFORCER.enforce_call(action='identity:get_protocol') ref = PROVIDERS.federation_api.get_protocol(idp_id, protocol_id) return self.wrap_member(ref) def put(self, idp_id, protocol_id): """Create protocol for an IDP. PUT /OS-Federation/identity_providers/{idp_id}/protocols/{protocol_id} """ ENFORCER.enforce_call(action='identity:create_protocol') protocol = self.request_body_json.get('protocol', {}) validation.lazy_validate(schema.protocol_create, protocol) protocol = self._normalize_dict(protocol) ref = PROVIDERS.federation_api.create_protocol( idp_id, protocol_id, protocol ) return self.wrap_member(ref), http.client.CREATED def patch(self, idp_id, protocol_id): """Update protocol for an IDP. PATCH /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id} """ ENFORCER.enforce_call(action='identity:update_protocol') protocol = self.request_body_json.get('protocol', {}) validation.lazy_validate(schema.protocol_update, protocol) ref = PROVIDERS.federation_api.update_protocol( idp_id, protocol_id, protocol ) return self.wrap_member(ref) def delete(self, idp_id, protocol_id): """Delete a protocol from an IDP. DELETE /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id} """ ENFORCER.enforce_call(action='identity:delete_protocol') PROVIDERS.federation_api.delete_protocol(idp_id, protocol_id) return None, http.client.NO_CONTENT class MappingResource(_ResourceBase): collection_key = 'mappings' member_key = 'mapping' api_prefix = '/OS-FEDERATION' def get(self, mapping_id=None): if mapping_id is not None: return self._get_mapping(mapping_id) return self._list_mappings() def _get_mapping(self, mapping_id): """Get a mapping. HEAD/GET /OS-FEDERATION/mappings/{mapping_id} """ ENFORCER.enforce_call(action='identity:get_mapping') return self.wrap_member( PROVIDERS.federation_api.get_mapping(mapping_id) ) def _list_mappings(self): """List mappings. HEAD/GET /OS-FEDERATION/mappings """ ENFORCER.enforce_call(action='identity:list_mappings') return self.wrap_collection(PROVIDERS.federation_api.list_mappings()) def _internal_normalize_and_validate_attribute_mapping( self, action_executed_message="created" ): mapping = self.request_body_json.get('mapping', {}) mapping = self._normalize_dict(mapping) if not mapping.get('schema_version'): default_schema_version = ( utils.get_default_attribute_mapping_schema_version() ) LOG.debug( "A mapping [%s] was %s without providing a " "'schema_version'; therefore, we need to set one. The " "current default is [%s]. We will use this value for " "the attribute mapping being registered. It is " "recommended that one does not rely on this default " "value, as it can change, and the already persisted " "attribute mappings will remain with the previous " "default values.", mapping, action_executed_message, default_schema_version, ) mapping['schema_version'] = default_schema_version utils.validate_mapping_structure(mapping) return mapping def put(self, mapping_id): """Create a mapping. PUT /OS-FEDERATION/mappings/{mapping_id} """ ENFORCER.enforce_call(action='identity:create_mapping') am = self._internal_normalize_and_validate_attribute_mapping( "registered" ) mapping_ref = PROVIDERS.federation_api.create_mapping(mapping_id, am) return self.wrap_member(mapping_ref), http.client.CREATED def patch(self, mapping_id): """Update an attribute mapping for identity federation. PATCH /OS-FEDERATION/mappings/{mapping_id} """ ENFORCER.enforce_call(action='identity:update_mapping') am = self._internal_normalize_and_validate_attribute_mapping("updated") mapping_ref = PROVIDERS.federation_api.update_mapping(mapping_id, am) return self.wrap_member(mapping_ref) def delete(self, mapping_id): """Delete a mapping. DELETE /OS-FEDERATION/mappings/{mapping_id} """ ENFORCER.enforce_call(action='identity:delete_mapping') PROVIDERS.federation_api.delete_mapping(mapping_id) return None, http.client.NO_CONTENT class ServiceProvidersResource(_ResourceBase): collection_key = 'service_providers' member_key = 'service_provider' _public_parameters = frozenset( [ 'auth_url', 'id', 'enabled', 'description', 'links', 'relay_state_prefix', 'sp_url', ] ) _id_path_param_name_override = 'sp_id' api_prefix = '/OS-FEDERATION' def get(self, sp_id=None): if sp_id is not None: return self._get_service_provider(sp_id) return self._list_service_providers() def _get_service_provider(self, sp_id): """Get a service provider. GET/HEAD /OS-FEDERATION/service_providers/{sp_id} """ ENFORCER.enforce_call(action='identity:get_service_provider') return self.wrap_member(PROVIDERS.federation_api.get_sp(sp_id)) def _list_service_providers(self): """List service providers. GET/HEAD /OS-FEDERATION/service_providers """ filters = ['id', 'enabled'] ENFORCER.enforce_call( action='identity:list_service_providers', filters=filters ) hints = self.build_driver_hints(filters) refs = [ self.filter_params(r) for r in PROVIDERS.federation_api.list_sps(hints=hints) ] return self.wrap_collection(refs, hints=hints) def put(self, sp_id): """Create a service provider. PUT /OS-FEDERATION/service_providers/{sp_id} """ ENFORCER.enforce_call(action='identity:create_service_provider') sp = self.request_body_json.get('service_provider', {}) validation.lazy_validate(schema.service_provider_create, sp) sp = self._normalize_dict(sp) sp.setdefault('enabled', False) sp.setdefault('relay_state_prefix', CONF.saml.relay_state_prefix) sp_ref = PROVIDERS.federation_api.create_sp(sp_id, sp) return self.wrap_member(sp_ref), http.client.CREATED def patch(self, sp_id): """Update a service provider. PATCH /OS-FEDERATION/service_providers/{sp_id} """ ENFORCER.enforce_call(action='identity:update_service_provider') sp = self.request_body_json.get('service_provider', {}) validation.lazy_validate(schema.service_provider_update, sp) sp = self._normalize_dict(sp) sp_ref = PROVIDERS.federation_api.update_sp(sp_id, sp) return self.wrap_member(sp_ref) def delete(self, sp_id): """Delete a service provider. DELETE /OS-FEDERATION/service_providers/{sp_id} """ ENFORCER.enforce_call(action='identity:delete_service_provider') PROVIDERS.federation_api.delete_sp(sp_id) return None, http.client.NO_CONTENT class SAML2MetadataResource(flask_restful.Resource): @ks_flask.unenforced_api def get(self): """Get SAML2 metadata. GET/HEAD /OS-FEDERATION/saml2/metadata """ metadata_path = CONF.saml.idp_metadata_path try: with open(metadata_path) as metadata_handler: metadata = metadata_handler.read() except OSError as e: # Raise HTTP 500 in case Metadata file cannot be read. raise exception.MetadataFileError(reason=e) resp = flask.make_response(metadata, http.client.OK) resp.headers['Content-Type'] = 'text/xml' return resp class OSFederationAuthResource(flask_restful.Resource): @ks_flask.unenforced_api def get(self, idp_id, protocol_id): """Authenticate from dedicated uri endpoint. GET/HEAD /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id}/auth """ return self._auth(idp_id, protocol_id) @ks_flask.unenforced_api def post(self, idp_id, protocol_id): """Authenticate from dedicated uri endpoint. POST /OS-FEDERATION/identity_providers/ {idp_id}/protocols/{protocol_id}/auth """ return self._auth(idp_id, protocol_id) def _auth(self, idp_id, protocol_id): """Build and pass auth data to authentication code. Build HTTP request body for federated authentication and inject it into the ``authenticate_for_token`` function. """ auth = { 'identity': { 'methods': [protocol_id], protocol_id: { 'identity_provider': idp_id, 'protocol': protocol_id, }, } } token = authentication.authenticate_for_token(auth) token_data = render_token.render_token_response_from_model(token) resp_data = jsonutils.dumps(token_data) flask_resp = flask.make_response(resp_data, http.client.CREATED) flask_resp.headers['X-Subject-Token'] = token.id flask_resp.headers['Content-Type'] = 'application/json' return flask_resp class OSFederationAPI(ks_flask.APIBase): _name = 'OS-FEDERATION' _import_name = __name__ _api_url_prefix = '/OS-FEDERATION' resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=SAML2MetadataResource, url='/saml2/metadata', resource_kwargs={}, rel='metadata', resource_relation_func=_build_resource_relation, ), ks_flask.construct_resource_map( resource=OSFederationAuthResource, url=( '/identity_providers//protocols/' '/auth' ), resource_kwargs={}, rel='identity_provider_protocol_auth', resource_relation_func=_build_resource_relation, path_vars={ 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION, }, ), ] class OSFederationIdentityProvidersAPI(ks_flask.APIBase): _name = 'identity_providers' _import_name = __name__ _api_url_prefix = '/OS-FEDERATION' resources = [IdentityProvidersResource] resource_mapping = [] class OSFederationIdentityProvidersProtocolsAPI(ks_flask.APIBase): _name = 'protocols' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=IDPProtocolsCRUDResource, url=( '/OS-FEDERATION/identity_providers//protocols/' '' ), resource_kwargs={}, rel='identity_provider_protocol', resource_relation_func=_build_resource_relation, path_vars={ 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAMETER_RELATION, }, ), ks_flask.construct_resource_map( resource=IDPProtocolsListResource, url='/OS-FEDERATION/identity_providers//protocols', resource_kwargs={}, rel='identity_provider_protocols', resource_relation_func=_build_resource_relation, path_vars={'idp_id': IDP_ID_PARAMETER_RELATION}, ), ] class OSFederationMappingsAPI(ks_flask.APIBase): _name = 'mappings' _import_name = __name__ _api_url_prefix = '/OS-FEDERATION' resources = [MappingResource] resource_mapping = [] class OSFederationServiceProvidersAPI(ks_flask.APIBase): _name = 'service_providers' _import_name = __name__ _api_url_prefix = '/OS-FEDERATION' resources = [ServiceProvidersResource] resource_mapping = [] APIs = ( OSFederationAPI, OSFederationIdentityProvidersAPI, OSFederationIdentityProvidersProtocolsAPI, OSFederationMappingsAPI, OSFederationServiceProvidersAPI, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/os_inherit.py0000664000175000017500000004567400000000000020632 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/OS-INHERIT import functools import http.client import flask_restful from oslo_log import log from keystone.api._shared import json_home_relations from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone import exception from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs LOG = log.getLogger(__name__) _build_resource_relation = json_home_relations.os_inherit_resource_rel_func def _build_enforcement_target_attr( role_id=None, user_id=None, group_id=None, project_id=None, domain_id=None, allow_non_existing=False, ): """Check protection for role grant APIs. The policy rule might want to inspect attributes of any of the entities involved in the grant. So we get these and pass them to the check_protection() handler in the controller. """ # !!!!!!!!!! WARNING: Security Concern !!!!!!!!!! # # NOTE(morgan): This function must handle all expected exceptions, # including NOT FOUNDs otherwise the exception will be raised up to the # end user before enforcement, resulting in the exception being returned # instead of an appropriate 403. In each case, it is logged that a value # was not found and the target is explicitly set to empty. This allows for # the enforcement rule to decide what to do (most of the time raise an # appropriate 403). # # ############################################### target = {} if role_id: try: target['role'] = PROVIDERS.role_api.get_role(role_id) except exception.RoleNotFound: LOG.info( 'Role (%(role_id)s) not found, Enforcement target of ' '`role` remaind empty', {'role_id': role_id}, ) target['role'] = {} if user_id: try: target['user'] = PROVIDERS.identity_api.get_user(user_id) except exception.UserNotFound: if not allow_non_existing: LOG.info( 'User (%(user_id)s) was not found. Enforcement target' ' of `user` remains empty.', {'user_id': user_id}, ) target['user'] = {} else: try: target['group'] = PROVIDERS.identity_api.get_group(group_id) except exception.GroupNotFound: if not allow_non_existing: LOG.info( 'Group (%(group_id)s) was not found. Enforcement ' 'target of `group` remains empty.', {'group_id': group_id}, ) target['group'] = {} # NOTE(lbragstad): This if/else check will need to be expanded in the # future to handle system hierarchies if that is implemented. if domain_id: try: target['domain'] = PROVIDERS.resource_api.get_domain(domain_id) except exception.DomainNotFound: LOG.info( 'Domain (%(domain_id)s) was not found. Enforcement ' 'target of `domain` remains empty.', {'domain_id': domain_id}, ) target['domain'] = {} elif project_id: try: target['project'] = PROVIDERS.resource_api.get_project(project_id) except exception.ProjectNotFound: LOG.info( 'Project (%(project_id)s) was not found. Enforcement ' 'target of `project` remains empty.', {'project_id': project_id}, ) target['project'] = {} return target class OSInheritDomainGroupRolesResource(flask_restful.Resource): def get(self, domain_id, group_id, role_id): """Check for an inherited grant for a group on a domain. GET/HEAD /OS-INHERIT/domains/{domain_id}/groups/{group_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:check_grant', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, group_id=group_id, role_id=role_id, ), ) PROVIDERS.assignment_api.get_grant( domain_id=domain_id, group_id=group_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def put(self, domain_id, group_id, role_id): """Create an inherited grant for a group on a domain. PUT /OS-INHERIT/domains/{domain_id}/groups/{group_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:create_grant', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, group_id=group_id, role_id=role_id, ), ) PROVIDERS.assignment_api.create_grant( domain_id=domain_id, group_id=group_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def delete(self, domain_id, group_id, role_id): """Revoke an inherited grant for a group on a domain. DELETE /OS-INHERIT/domains/{domain_id}/groups/{group_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, group_id=group_id, role_id=role_id, ), ) PROVIDERS.assignment_api.delete_grant( domain_id=domain_id, group_id=group_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT class OSInheritDomainGroupRolesListResource(flask_restful.Resource): def get(self, domain_id, group_id): """List roles (inherited) for a group on a domain. GET/HEAD /OS-INHERIT/domains/{domain_id}/groups/{group_id} /roles/inherited_to_projects """ ENFORCER.enforce_call( action='identity:list_grants', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, group_id=group_id, ), ) refs = PROVIDERS.assignment_api.list_grants( domain_id=domain_id, group_id=group_id, inherited_to_projects=True ) return ks_flask.ResourceBase.wrap_collection( refs, collection_name='roles' ) class OSInheritDomainUserRolesResource(flask_restful.Resource): def get(self, domain_id, user_id, role_id): """Check for an inherited grant for a user on a domain. GET/HEAD /OS-INHERIT/domains/{domain_id}/users/{user_id}/roles /{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:check_grant', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, user_id=user_id, role_id=role_id, ), ) PROVIDERS.assignment_api.get_grant( domain_id=domain_id, user_id=user_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def put(self, domain_id, user_id, role_id): """Create an inherited grant for a user on a domain. PUT /OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/{role_id} /inherited_to_projects """ ENFORCER.enforce_call( action='identity:create_grant', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, user_id=user_id, role_id=role_id, ), ) PROVIDERS.assignment_api.create_grant( domain_id=domain_id, user_id=user_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def delete(self, domain_id, user_id, role_id): """Revoke a grant from a user on a domain. DELETE /OS-INHERIT/domains/{domain_id}/users/{user_id}/roles /{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, user_id=user_id, role_id=role_id, ), ) PROVIDERS.assignment_api.delete_grant( domain_id=domain_id, user_id=user_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT class OSInheritDomainUserRolesListResource(flask_restful.Resource): def get(self, domain_id, user_id): """List roles (inherited) for a user on a domain. GET/HEAD /OS-INHERIT/domains/{domain_id}/users/{user_id} /roles/inherited_to_projects """ ENFORCER.enforce_call( action='identity:list_grants', build_target=functools.partial( _build_enforcement_target_attr, domain_id=domain_id, user_id=user_id, ), ) refs = PROVIDERS.assignment_api.list_grants( domain_id=domain_id, user_id=user_id, inherited_to_projects=True ) return ks_flask.ResourceBase.wrap_collection( refs, collection_name='roles' ) class OSInheritProjectUserResource(flask_restful.Resource): def get(self, project_id, user_id, role_id): """Check for an inherited grant for a user on a project. GET/HEAD /OS-INHERIT/projects/{project_id}/users/{user_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:check_grant', build_target=functools.partial( _build_enforcement_target_attr, project_id=project_id, user_id=user_id, role_id=role_id, ), ) PROVIDERS.assignment_api.get_grant( project_id=project_id, user_id=user_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def put(self, project_id, user_id, role_id): """Create an inherited grant for a user on a project. PUT /OS-INHERIT/projects/{project_id}/users/{user_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:create_grant', build_target=functools.partial( _build_enforcement_target_attr, project_id=project_id, user_id=user_id, role_id=role_id, ), ) PROVIDERS.assignment_api.create_grant( project_id=project_id, user_id=user_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def delete(self, project_id, user_id, role_id): """Revoke an inherited grant for a user on a project. DELETE /OS-INHERIT/projects/{project_id}/users/{user_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( _build_enforcement_target_attr, project_id=project_id, user_id=user_id, role_id=role_id, ), ) PROVIDERS.assignment_api.delete_grant( project_id=project_id, user_id=user_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT class OSInheritProjectGroupResource(flask_restful.Resource): def get(self, project_id, group_id, role_id): """Check for an inherited grant for a group on a project. GET/HEAD /OS-INHERIT/projects/{project_id}/groups/{group_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:check_grant', build_target=functools.partial( _build_enforcement_target_attr, project_id=project_id, group_id=group_id, role_id=role_id, ), ) PROVIDERS.assignment_api.get_grant( project_id=project_id, group_id=group_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def put(self, project_id, group_id, role_id): """Create an inherited grant for a group on a project. PUT /OS-INHERIT/projects/{project_id}/groups/{group_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:create_grant', build_target=functools.partial( _build_enforcement_target_attr, project_id=project_id, group_id=group_id, role_id=role_id, ), ) PROVIDERS.assignment_api.create_grant( project_id=project_id, group_id=group_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT def delete(self, project_id, group_id, role_id): """Revoke an inherited grant for a group on a project. DELETE /OS-INHERIT/projects/{project_id}/groups/{group_id} /roles/{role_id}/inherited_to_projects """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( _build_enforcement_target_attr, project_id=project_id, group_id=group_id, role_id=role_id, ), ) PROVIDERS.assignment_api.delete_grant( project_id=project_id, group_id=group_id, role_id=role_id, inherited_to_projects=True, ) return None, http.client.NO_CONTENT class OSInheritAPI(ks_flask.APIBase): _name = "OS-INHERIT" _import_name = __name__ _api_url_prefix = '/OS-INHERIT' resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=OSInheritDomainGroupRolesResource, url=( '/domains//groups//roles' '//inherited_to_projects' ), resource_kwargs={}, rel='domain_group_role_inherited_to_projects', resource_relation_func=_build_resource_relation, path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ks_flask.construct_resource_map( resource=OSInheritDomainGroupRolesListResource, url=( '/domains//groups//roles' '/inherited_to_projects' ), resource_kwargs={}, rel='domain_group_roles_inherited_to_projects', resource_relation_func=_build_resource_relation, path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, }, ), ks_flask.construct_resource_map( resource=OSInheritDomainUserRolesResource, url=( '/domains//users//roles' '//inherited_to_projects' ), resource_kwargs={}, rel='domain_user_role_inherited_to_projects', resource_relation_func=_build_resource_relation, path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ks_flask.construct_resource_map( resource=OSInheritDomainUserRolesListResource, url=( '/domains//users//roles' '/inherited_to_projects' ), resource_kwargs={}, rel='domain_user_roles_inherited_to_projects', resource_relation_func=_build_resource_relation, path_vars={ 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=OSInheritProjectUserResource, url=( 'projects//users//roles' '//inherited_to_projects' ), resource_kwargs={}, rel='project_user_role_inherited_to_projects', resource_relation_func=_build_resource_relation, path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ks_flask.construct_resource_map( resource=OSInheritProjectGroupResource, url=( 'projects//groups//roles' '//inherited_to_projects' ), resource_kwargs={}, rel='project_group_role_inherited_to_projects', resource_relation_func=_build_resource_relation, path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ] APIs = (OSInheritAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/os_oauth1.py0000664000175000017500000003572700000000000020367 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/OS-OAUTH1/ import http.client from urllib import parse as urlparse import flask import flask_restful from oslo_log import log from oslo_utils import timeutils from werkzeug import exceptions from keystone.api._shared import json_home_relations from keystone.common import authorization from keystone.common import context from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications from keystone.oauth1 import core as oauth1 from keystone.oauth1 import schema from keystone.oauth1 import validator from keystone.server import flask as ks_flask LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs ENFORCER = rbac_enforcer.RBACEnforcer CONF = keystone.conf.CONF _build_resource_relation = json_home_relations.os_oauth1_resource_rel_func _build_parameter_relation = json_home_relations.os_oauth1_parameter_rel_func _ACCESS_TOKEN_ID_PARAMETER_RELATION = _build_parameter_relation( parameter_name='access_token_id' ) def _normalize_role_list(authorize_roles): roles = set() for role in authorize_roles: if role.get('id'): roles.add(role['id']) else: roles.add( PROVIDERS.role_api.get_unique_role_by_name(role['name'])['id'] ) return roles def _update_url_scheme(): """Update request url scheme with base url scheme.""" url = ks_flask.base_url() url_scheme = list(urlparse.urlparse(url))[0] req_url_list = list(urlparse.urlparse(flask.request.url)) req_url_list[0] = url_scheme req_url = urlparse.urlunparse(req_url_list) return req_url class _OAuth1ResourceBase(flask_restful.Resource): def get(self): # GET is not allowed, however flask restful doesn't handle "GET" not # being allowed cleanly. Here we explicitly mark is as not allowed. All # other methods not defined would raise a method NotAllowed error and # this would not be needed. raise exceptions.MethodNotAllowed(valid_methods=['POST']) class ConsumerResource(ks_flask.ResourceBase): collection_key = 'consumers' member_key = 'consumer' api_prefix = '/OS-OAUTH1' json_home_resource_rel_func = _build_resource_relation json_home_parameter_rel_func = _build_parameter_relation def _list_consumers(self): ENFORCER.enforce_call(action='identity:list_consumers') return self.wrap_collection(PROVIDERS.oauth_api.list_consumers()) def _get_consumer(self, consumer_id): ENFORCER.enforce_call(action='identity:get_consumer') return self.wrap_member(PROVIDERS.oauth_api.get_consumer(consumer_id)) def get(self, consumer_id=None): if consumer_id is None: return self._list_consumers() return self._get_consumer(consumer_id) def post(self): ENFORCER.enforce_call(action='identity:create_consumer') consumer = (flask.request.get_json(force=True, silent=True) or {}).get( 'consumer', {} ) consumer = self._normalize_dict(consumer) validation.lazy_validate(schema.consumer_create, consumer) consumer = self._assign_unique_id(consumer) ref = PROVIDERS.oauth_api.create_consumer( consumer, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def delete(self, consumer_id): ENFORCER.enforce_call(action='identity:delete_consumer') reason = ( 'Invalidating token cache because consumer %(consumer_id)s has ' 'been deleted. Authorization for users with OAuth tokens will be ' 'recalculated and enforced accordingly the next time they ' 'authenticate or validate a token.' % {'consumer_id': consumer_id} ) notifications.invalidate_token_cache_notification(reason) PROVIDERS.oauth_api.delete_consumer( consumer_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT def patch(self, consumer_id): ENFORCER.enforce_call(action='identity:update_consumer') consumer = (flask.request.get_json(force=True, silent=True) or {}).get( 'consumer', {} ) validation.lazy_validate(schema.consumer_update, consumer) consumer = self._normalize_dict(consumer) self._require_matching_id(consumer) ref = PROVIDERS.oauth_api.update_consumer( consumer_id, consumer, initiator=self.audit_initiator ) return self.wrap_member(ref) class RequestTokenResource(_OAuth1ResourceBase): @ks_flask.unenforced_api def post(self): oauth_headers = oauth1.get_oauth_headers(flask.request.headers) consumer_id = oauth_headers.get('oauth_consumer_key') requested_project_id = flask.request.headers.get( 'Requested-Project-Id' ) if not consumer_id: raise exception.ValidationError( attribute='oauth_consumer_key', target='request' ) if not requested_project_id: raise exception.ValidationError( attribute='Requested-Project-Id', target='request' ) # NOTE(stevemar): Ensure consumer and requested project exist PROVIDERS.resource_api.get_project(requested_project_id) PROVIDERS.oauth_api.get_consumer(consumer_id) url = _update_url_scheme() req_headers = {'Requested-Project-Id': requested_project_id} req_headers.update(flask.request.headers) request_verifier = oauth1.RequestTokenEndpoint( request_validator=validator.OAuthValidator(), token_generator=oauth1.token_generator, ) h, b, s = request_verifier.create_request_token_response( url, http_method='POST', body=flask.request.args, headers=req_headers, ) if not b: msg = _('Invalid signature') raise exception.Unauthorized(message=msg) # show the details of the failure. oauth1.validate_oauth_params(b) request_token_duration = CONF.oauth1.request_token_duration token_ref = PROVIDERS.oauth_api.create_request_token( consumer_id, requested_project_id, request_token_duration, initiator=notifications.build_audit_initiator(), ) result = 'oauth_token={key}&oauth_token_secret={secret}'.format( key=token_ref['id'], secret=token_ref['request_secret'], ) if CONF.oauth1.request_token_duration > 0: expiry_bit = '&oauth_expires_at=%s' % token_ref['expires_at'] result += expiry_bit resp = flask.make_response(result, http.client.CREATED) resp.headers['Content-Type'] = 'application/x-www-form-urlencoded' return resp class AccessTokenResource(_OAuth1ResourceBase): @ks_flask.unenforced_api def post(self): oauth_headers = oauth1.get_oauth_headers(flask.request.headers) consumer_id = oauth_headers.get('oauth_consumer_key') request_token_id = oauth_headers.get('oauth_token') oauth_verifier = oauth_headers.get('oauth_verifier') if not consumer_id: raise exception.ValidationError( attribute='oauth_consumer_key', target='request' ) if not request_token_id: raise exception.ValidationError( attribute='oauth_token', target='request' ) if not oauth_verifier: raise exception.ValidationError( attribute='oauth_verifier', target='request' ) req_token = PROVIDERS.oauth_api.get_request_token(request_token_id) expires_at = req_token['expires_at'] if expires_at: now = timeutils.utcnow() expires = timeutils.normalize_time( timeutils.parse_isotime(expires_at) ) if now > expires: raise exception.Unauthorized(_('Request token is expired')) url = _update_url_scheme() access_verifier = oauth1.AccessTokenEndpoint( request_validator=validator.OAuthValidator(), token_generator=oauth1.token_generator, ) try: h, b, s = access_verifier.create_access_token_response( url, http_method='POST', body=flask.request.args, headers=dict(flask.request.headers), ) except NotImplementedError: # Client key or request token validation failed, since keystone # does not yet support dummy client or dummy request token, # so we will raise unauthorized exception instead. try: PROVIDERS.oauth_api.get_consumer(consumer_id) except exception.NotFound: msg = _('Provided consumer does not exist.') LOG.warning('Provided consumer does not exist.') raise exception.Unauthorized(message=msg) if req_token['consumer_id'] != consumer_id: msg = ( 'Provided consumer key does not match stored consumer ' 'key.' ) tr_msg = _( 'Provided consumer key does not match stored ' 'consumer key.' ) LOG.warning(msg) raise exception.Unauthorized(message=tr_msg) # The response body is empty since either one of the following reasons if not b: if req_token['verifier'] != oauth_verifier: msg = 'Provided verifier does not match stored verifier' tr_msg = _('Provided verifier does not match stored verifier') else: msg = 'Invalid signature' tr_msg = _('Invalid signature') LOG.warning(msg) raise exception.Unauthorized(message=tr_msg) # show the details of the failure oauth1.validate_oauth_params(b) if not req_token.get('authorizing_user_id'): msg = _('Request Token does not have an authorizing user id.') LOG.warning('Request Token does not have an authorizing user id.') raise exception.Unauthorized(message=msg) access_token_duration = CONF.oauth1.access_token_duration token_ref = PROVIDERS.oauth_api.create_access_token( request_token_id, access_token_duration, initiator=notifications.build_audit_initiator(), ) result = 'oauth_token={key}&oauth_token_secret={secret}'.format( key=token_ref['id'], secret=token_ref['access_secret'], ) if CONF.oauth1.access_token_duration > 0: expiry_bit = '&oauth_expires_at=%s' % (token_ref['expires_at']) result += expiry_bit resp = flask.make_response(result, http.client.CREATED) resp.headers['Content-Type'] = 'application/x-www-form-urlencoded' return resp class AuthorizeResource(_OAuth1ResourceBase): def put(self, request_token_id): ENFORCER.enforce_call(action='identity:authorize_request_token') roles = (flask.request.get_json(force=True, silent=True) or {}).get( 'roles', [] ) validation.lazy_validate(schema.request_token_authorize, roles) ctx = flask.request.environ[context.REQUEST_CONTEXT_ENV] if ctx.is_delegated_auth: raise exception.Forbidden( _( 'Cannot authorize a request token with a token issued via ' 'delegation.' ) ) req_token = PROVIDERS.oauth_api.get_request_token(request_token_id) expires_at = req_token['expires_at'] if expires_at: now = timeutils.utcnow() expires = timeutils.normalize_time( timeutils.parse_isotime(expires_at) ) if now > expires: raise exception.Unauthorized(_('Request token is expired')) authed_roles = _normalize_role_list(roles) # verify the authorizing user has the roles try: auth_context = flask.request.environ[ authorization.AUTH_CONTEXT_ENV ] user_token_ref = auth_context['token'] except KeyError: LOG.warning("Couldn't find the auth context.") raise exception.Unauthorized() user_id = user_token_ref.user_id project_id = req_token['requested_project_id'] user_roles = PROVIDERS.assignment_api.get_roles_for_user_and_project( user_id, project_id ) cred_set = set(user_roles) if not cred_set.issuperset(authed_roles): msg = _('authorizing user does not have role required') raise exception.Unauthorized(message=msg) # create least of just the id's for the backend role_ids = list(authed_roles) # finally authorize the token authed_token = PROVIDERS.oauth_api.authorize_request_token( request_token_id, user_id, role_ids ) to_return = {'token': {'oauth_verifier': authed_token['verifier']}} return to_return class OSAuth1API(ks_flask.APIBase): _name = 'OS-OAUTH1' _import_name = __name__ _api_url_prefix = '/OS-OAUTH1' resources = [ConsumerResource] resource_mapping = [ ks_flask.construct_resource_map( resource=RequestTokenResource, url='/request_token', resource_kwargs={}, rel='request_tokens', resource_relation_func=_build_resource_relation, ), ks_flask.construct_resource_map( resource=AccessTokenResource, url='/access_token', rel='access_tokens', resource_kwargs={}, resource_relation_func=_build_resource_relation, ), ks_flask.construct_resource_map( resource=AuthorizeResource, url='/authorize/', resource_kwargs={}, rel='authorize_request_token', resource_relation_func=_build_resource_relation, path_vars={ 'request_token_id': _build_parameter_relation( parameter_name='request_token_id' ) }, ), ] APIs = (OSAuth1API,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/os_oauth2.py0000664000175000017500000004122000000000000020351 0ustar00zuulzuul00000000000000# Copyright 2022 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import flask from flask import make_response from oslo_log import log from oslo_serialization import jsonutils from keystone.api._shared import authentication from keystone.api._shared import json_home_relations from keystone.common import provider_api from keystone.common import utils from keystone.conf import CONF from keystone import exception from keystone.federation import utils as federation_utils from keystone.i18n import _ from keystone.server import flask as ks_flask LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs _build_resource_relation = json_home_relations.os_oauth2_resource_rel_func class AccessTokenResource(ks_flask.ResourceBase): def _method_not_allowed(self): """Raise a method not allowed error.""" raise exception.OAuth2OtherError( int(http.client.METHOD_NOT_ALLOWED), http.client.responses[http.client.METHOD_NOT_ALLOWED], _('The method is not allowed for the requested URL.'), ) @ks_flask.unenforced_api def get(self): """The method is not allowed.""" self._method_not_allowed() @ks_flask.unenforced_api def head(self): """The method is not allowed.""" self._method_not_allowed() @ks_flask.unenforced_api def put(self): """The method is not allowed.""" self._method_not_allowed() @ks_flask.unenforced_api def patch(self): """The method is not allowed.""" self._method_not_allowed() @ks_flask.unenforced_api def delete(self): """The method is not allowed.""" self._method_not_allowed() @ks_flask.unenforced_api def post(self): """Get an OAuth2.0 Access Token. POST /v3/OS-OAUTH2/token """ grant_type = flask.request.form.get('grant_type') if grant_type is None: error = exception.OAuth2InvalidRequest( int(http.client.BAD_REQUEST), http.client.responses[http.client.BAD_REQUEST], _('The parameter grant_type is required.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' f'{error.message_format}' ) raise error if grant_type != 'client_credentials': error = exception.OAuth2UnsupportedGrantType( int(http.client.BAD_REQUEST), http.client.responses[http.client.BAD_REQUEST], _('The parameter grant_type %s is not supported.') % grant_type, ) LOG.info( 'Get OAuth2.0 Access Token API: ' f'{error.message_format}' ) raise error auth_method = '' client_id = flask.request.form.get('client_id') client_secret = flask.request.form.get('client_secret') client_cert = flask.request.environ.get("SSL_CLIENT_CERT") client_auth = flask.request.authorization if not client_cert and client_auth and client_auth.type == 'basic': client_id = client_auth.username client_secret = client_auth.password if not client_id: error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'failed to get a client_id from the request.' ) raise error if client_cert: auth_method = 'tls_client_auth' elif client_secret: auth_method = 'client_secret_basic' if auth_method in CONF.oauth2.oauth2_authn_methods: if auth_method == 'tls_client_auth': return self._tls_client_auth(client_id, client_cert) if auth_method == 'client_secret_basic': return self._client_secret_basic(client_id, client_secret) error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'failed to get client credentials from the request.' ) raise error def _client_secret_basic(self, client_id, client_secret): """Get an OAuth2.0 basic Access Token.""" auth_data = { 'identity': { 'methods': ['application_credential'], 'application_credential': { 'id': client_id, 'secret': client_secret, }, } } try: token = authentication.authenticate_for_token(auth_data) except exception.Error as error: if error.code == 401: error = exception.OAuth2InvalidClient( error.code, error.title, str(error) ) elif error.code == 400: error = exception.OAuth2InvalidRequest( error.code, error.title, str(error) ) else: error = exception.OAuth2OtherError( error.code, error.title, 'An unknown error occurred and failed to get an OAuth2.0 ' 'access token.', ) LOG.exception(error) raise error except Exception as error: error = exception.OAuth2OtherError( int(http.client.INTERNAL_SERVER_ERROR), http.client.responses[http.client.INTERNAL_SERVER_ERROR], str(error), ) LOG.exception(error) raise error resp = make_response( { 'access_token': token.id, 'token_type': 'Bearer', 'expires_in': CONF.token.expiration, } ) resp.status = '200 OK' return resp def _check_mapped_properties(self, cert_dn, user, user_domain): mapping_id = CONF.oauth2.get('oauth2_cert_dn_mapping_id') try: mapping = PROVIDERS.federation_api.get_mapping(mapping_id) except exception.MappingNotFound: error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'mapping id %s is not found. ', mapping_id, ) raise error rule_processor = federation_utils.RuleProcessor( mapping.get('id'), mapping.get('rules') ) try: mapped_properties = rule_processor.process(cert_dn) except exception.Error as error: LOG.exception(error) error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'mapping rule process failed. ' 'mapping_id: %s, rules: %s, data: %s.', mapping_id, mapping.get('rules'), jsonutils.dumps(cert_dn), ) raise error except Exception as error: LOG.exception(error) error = exception.OAuth2OtherError( int(http.client.INTERNAL_SERVER_ERROR), http.client.responses[http.client.INTERNAL_SERVER_ERROR], str(error), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'mapping rule process failed. ' 'mapping_id: %s, rules: %s, data: %s.', mapping_id, mapping.get('rules'), jsonutils.dumps(cert_dn), ) raise error mapping_user = mapped_properties.get('user', {}) mapping_user_name = mapping_user.get('name') mapping_user_id = mapping_user.get('id') mapping_user_email = mapping_user.get('email') mapping_domain = mapping_user.get('domain', {}) mapping_user_domain_id = mapping_domain.get('id') mapping_user_domain_name = mapping_domain.get('name') if mapping_user_name and mapping_user_name != user.get('name'): err = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.', 'user name', mapping_user_name, user.get('name'), ) raise err if mapping_user_id and mapping_user_id != user.get('id'): err = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.', 'user id', mapping_user_id, user.get('id'), ) raise err if mapping_user_email and mapping_user_email != user.get('email'): err = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.', 'user email', mapping_user_email, user.get('email'), ) raise err if ( mapping_user_domain_id and mapping_user_domain_id != user_domain.get('id') ): err = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.', 'user domain id', mapping_user_domain_id, user_domain.get('id'), ) raise err if ( mapping_user_domain_name and mapping_user_domain_name != user_domain.get('name') ): err = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.', 'user domain name', mapping_user_domain_name, user_domain.get('name'), ) raise err def _tls_client_auth(self, client_id, client_cert): """Get an OAuth2.0 certificate-bound Access Token.""" try: cert_subject_dn = utils.get_certificate_subject_dn(client_cert) except exception.ValidationError: error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'failed to get the subject DN from the certificate.' ) raise error try: cert_issuer_dn = utils.get_certificate_issuer_dn(client_cert) except exception.ValidationError: error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'failed to get the issuer DN from the certificate.' ) raise error client_cert_dn = {} for key in cert_subject_dn: client_cert_dn['SSL_CLIENT_SUBJECT_DN_%s' % key.upper()] = ( cert_subject_dn.get(key) ) for key in cert_issuer_dn: client_cert_dn['SSL_CLIENT_ISSUER_DN_%s' % key.upper()] = ( cert_issuer_dn.get(key) ) try: user = PROVIDERS.identity_api.get_user(client_id) except exception.UserNotFound: error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'the user does not exist. user id: %s.', client_id, ) raise error project_id = user.get('default_project_id') if not project_id: error = exception.OAuth2InvalidClient( int(http.client.UNAUTHORIZED), http.client.responses[http.client.UNAUTHORIZED], _('Client authentication failed.'), ) LOG.info( 'Get OAuth2.0 Access Token API: ' 'the user does not have default project. user id: %s.', client_id, ) raise error user_domain = PROVIDERS.resource_api.get_domain(user.get('domain_id')) self._check_mapped_properties(client_cert_dn, user, user_domain) thumbprint = utils.get_certificate_thumbprint(client_cert) LOG.debug(f'The mTLS certificate thumbprint: {thumbprint}') try: token = PROVIDERS.token_provider_api.issue_token( user_id=client_id, method_names=['oauth2_credential'], project_id=project_id, thumbprint=thumbprint, ) except exception.Error as error: if error.code == 401: error = exception.OAuth2InvalidClient( error.code, error.title, str(error) ) elif error.code == 400: error = exception.OAuth2InvalidRequest( error.code, error.title, str(error) ) else: error = exception.OAuth2OtherError( error.code, error.title, 'An unknown error occurred and failed to get an OAuth2.0 ' 'access token.', ) LOG.exception(error) raise error except Exception as error: error = exception.OAuth2OtherError( int(http.client.INTERNAL_SERVER_ERROR), http.client.responses[http.client.INTERNAL_SERVER_ERROR], str(error), ) LOG.exception(error) raise error resp = make_response( { 'access_token': token.id, 'token_type': 'Bearer', 'expires_in': CONF.token.expiration, } ) resp.status = '200 OK' return resp class OSAuth2API(ks_flask.APIBase): _name = 'OS-OAUTH2' _import_name = __name__ _api_url_prefix = '/OS-OAUTH2' resource_mapping = [ ks_flask.construct_resource_map( resource=AccessTokenResource, url='/token', rel='token', resource_kwargs={}, resource_relation_func=_build_resource_relation, ) ] APIs = (OSAuth2API,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/os_revoke.py0000664000175000017500000000556200000000000020453 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/OS-REVOKE/events import flask import flask_restful from oslo_utils import timeutils from keystone.api._shared import json_home_relations from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask PROVIDERS = provider_api.ProviderAPIs ENFORCER = rbac_enforcer.RBACEnforcer _build_resource_relation = json_home_relations.os_revoke_resource_rel_func class OSRevokeResource(flask_restful.Resource): def get(self): ENFORCER.enforce_call(action='identity:list_revoke_events') since = flask.request.args.get('since') last_fetch = None if since: try: last_fetch = timeutils.normalize_time( timeutils.parse_isotime(since) ) except ValueError: raise exception.ValidationError( message=_('invalidate date format %s') % since ) # FIXME(notmorgan): The revocation events cannot have resource options # added to them or lazy-loaded relationships as long as to_dict # is called outside of an active session context. This API is unused # and should be deprecated in the near future. Fix this before adding # resource_options or any lazy-loaded relationships to the revocation # events themselves. events = PROVIDERS.revoke_api.list_events(last_fetch=last_fetch) # Build the links by hand as the standard controller calls require ids response = { 'events': [event.to_dict() for event in events], 'links': { 'next': None, 'self': ks_flask.base_url(path='/OS-REVOKE/events'), 'previous': None, }, } return response class OSRevokeAPI(ks_flask.APIBase): _name = 'events' _import_name = __name__ _api_url_prefix = '/OS-REVOKE' resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=OSRevokeResource, url='/events', resource_kwargs={}, rel='events', resource_relation_func=_build_resource_relation, ) ] APIs = (OSRevokeAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/os_simple_cert.py0000664000175000017500000000431100000000000021455 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/OS-SIMPLE-CERT import flask_restful from keystone.api._shared import json_home_relations import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask CONF = keystone.conf.CONF _build_resource_relation = json_home_relations.os_simple_cert_resource_rel_func class SimpleCertCAResource(flask_restful.Resource): @ks_flask.unenforced_api def get(self): raise exception.Gone( message=_( 'This API is no longer available due to the removal ' 'of support for PKI tokens.' ) ) class SimpleCertListResource(flask_restful.Resource): @ks_flask.unenforced_api def get(self): raise exception.Gone( message=_( 'This API is no longer available due to the removal ' 'of support for PKI tokens.' ) ) class SimpleCertAPI(ks_flask.APIBase): _name = 'OS-SIMPLE-CERT' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=SimpleCertCAResource, url='/OS-SIMPLE-CERT/ca', resource_kwargs={}, rel='ca_certificate', resource_relation_func=_build_resource_relation, ), ks_flask.construct_resource_map( resource=SimpleCertListResource, url='/OS-SIMPLE-CERT/certificates', resource_kwargs={}, rel='certificates', resource_relation_func=_build_resource_relation, ), ] APIs = (SimpleCertAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/policy.py0000664000175000017500000002523700000000000017757 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /policy import http.client import flask_restful from oslo_log import versionutils from keystone.api._shared import json_home_relations from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation from keystone.policy import schema from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs _resource_rel_func = json_home_relations.os_endpoint_policy_resource_rel_func class PolicyResource(ks_flask.ResourceBase): collection_key = 'policies' member_key = 'policy' def get(self, policy_id=None): if policy_id: return self._get_policy(policy_id) return self._list_policies() @versionutils.deprecated( as_of=versionutils.deprecated.QUEENS, what='identity:get_policy of the v3 Policy APIs', ) def _get_policy(self, policy_id): ENFORCER.enforce_call(action='identity:get_policy') ref = PROVIDERS.policy_api.get_policy(policy_id) return self.wrap_member(ref) @versionutils.deprecated( as_of=versionutils.deprecated.QUEENS, what='identity:list_policies of the v3 Policy APIs', ) def _list_policies(self): ENFORCER.enforce_call(action='identity:list_policies') filters = ['type'] hints = self.build_driver_hints(filters) refs = PROVIDERS.policy_api.list_policies(hints=hints) return self.wrap_collection(refs, hints=hints) @versionutils.deprecated( as_of=versionutils.deprecated.QUEENS, what='identity:create_policy of the v3 Policy APIs', ) def post(self): ENFORCER.enforce_call(action='identity:create_policy') policy_body = self.request_body_json.get('policy', {}) validation.lazy_validate(schema.policy_create, policy_body) policy = self._assign_unique_id(self._normalize_dict(policy_body)) ref = PROVIDERS.policy_api.create_policy( policy['id'], policy, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED @versionutils.deprecated( as_of=versionutils.deprecated.QUEENS, what='identity:update_policy of the v3 Policy APIs', ) def patch(self, policy_id): ENFORCER.enforce_call(action='identity:update_policy') policy_body = self.request_body_json.get('policy', {}) validation.lazy_validate(schema.policy_update, policy_body) ref = PROVIDERS.policy_api.update_policy( policy_id, policy_body, initiator=self.audit_initiator ) return self.wrap_member(ref) @versionutils.deprecated( as_of=versionutils.deprecated.QUEENS, what='identity:delete_policy of the v3 Policy APIs', ) def delete(self, policy_id): ENFORCER.enforce_call(action='identity:delete_policy') res = PROVIDERS.policy_api.delete_policy( policy_id, initiator=self.audit_initiator ) return (res, http.client.NO_CONTENT) class EndpointPolicyResource(flask_restful.Resource): def get(self, policy_id): ENFORCER.enforce_call(action='identity:list_endpoints_for_policy') PROVIDERS.policy_api.get_policy(policy_id) endpoints = PROVIDERS.endpoint_policy_api.list_endpoints_for_policy( policy_id ) self._remove_legacy_ids(endpoints) return ks_flask.ResourceBase.wrap_collection( endpoints, collection_name='endpoints' ) def _remove_legacy_ids(self, endpoints): for endpoint in endpoints: endpoint.pop('legacy_endpoint_id', None) class EndpointPolicyAssociations(flask_restful.Resource): def get(self, policy_id, endpoint_id): action = 'identity:check_policy_association_for_endpoint' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_endpoint(endpoint_id) PROVIDERS.endpoint_policy_api.check_policy_association( policy_id, endpoint_id=endpoint_id ) return None, http.client.NO_CONTENT def put(self, policy_id, endpoint_id): action = 'identity:create_policy_association_for_endpoint' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_endpoint(endpoint_id) PROVIDERS.endpoint_policy_api.create_policy_association( policy_id, endpoint_id=endpoint_id ) return None, http.client.NO_CONTENT def delete(self, policy_id, endpoint_id): action = 'identity:delete_policy_association_for_endpoint' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_endpoint(endpoint_id) PROVIDERS.endpoint_policy_api.delete_policy_association( policy_id, endpoint_id=endpoint_id ) return None, http.client.NO_CONTENT class ServicePolicyAssociations(flask_restful.Resource): def get(self, policy_id, service_id): action = 'identity:check_policy_association_for_service' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_service(service_id) PROVIDERS.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id ) return None, http.client.NO_CONTENT def put(self, policy_id, service_id): action = 'identity:create_policy_association_for_service' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_service(service_id) PROVIDERS.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id ) return None, http.client.NO_CONTENT def delete(self, policy_id, service_id): action = 'identity:delete_policy_association_for_service' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_service(service_id) PROVIDERS.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id ) return None, http.client.NO_CONTENT class ServiceRegionPolicyAssociations(flask_restful.Resource): def get(self, policy_id, service_id, region_id): action = 'identity:check_policy_association_for_region_and_service' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_service(service_id) PROVIDERS.catalog_api.get_region(region_id) PROVIDERS.endpoint_policy_api.check_policy_association( policy_id, service_id=service_id, region_id=region_id ) return None, http.client.NO_CONTENT def put(self, policy_id, service_id, region_id): action = 'identity:create_policy_association_for_region_and_service' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_service(service_id) PROVIDERS.catalog_api.get_region(region_id) PROVIDERS.endpoint_policy_api.create_policy_association( policy_id, service_id=service_id, region_id=region_id ) return None, http.client.NO_CONTENT def delete(self, policy_id, service_id, region_id): action = 'identity:delete_policy_association_for_region_and_service' ENFORCER.enforce_call(action=action) PROVIDERS.policy_api.get_policy(policy_id) PROVIDERS.catalog_api.get_service(service_id) PROVIDERS.catalog_api.get_region(region_id) PROVIDERS.endpoint_policy_api.delete_policy_association( policy_id, service_id=service_id, region_id=region_id ) return None, http.client.NO_CONTENT class PolicyAPI(ks_flask.APIBase): _name = 'policy' _import_name = __name__ resources = [PolicyResource] resource_mapping = [ ks_flask.construct_resource_map( resource=EndpointPolicyResource, url='/policies//OS-ENDPOINT-POLICY/endpoints', resource_kwargs={}, rel='policy_endpoints', path_vars={'policy_id': json_home.Parameters.POLICY_ID}, resource_relation_func=_resource_rel_func, ), ks_flask.construct_resource_map( resource=EndpointPolicyAssociations, url=( '/policies//OS-ENDPOINT-POLICY/' 'endpoints/' ), resource_kwargs={}, rel='endpoint_policy_association', path_vars={ 'policy_id': json_home.Parameters.POLICY_ID, 'endpoint_id': json_home.Parameters.ENDPOINT_ID, }, resource_relation_func=_resource_rel_func, ), ks_flask.construct_resource_map( resource=ServicePolicyAssociations, url=( '/policies//OS-ENDPOINT-POLICY/' 'services/' ), resource_kwargs={}, rel='service_policy_association', path_vars={ 'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, }, resource_relation_func=_resource_rel_func, ), ks_flask.construct_resource_map( resource=ServiceRegionPolicyAssociations, url=( '/policies//OS-ENDPOINT-POLICY/' 'services//regions/' ), resource_kwargs={}, rel='region_and_service_policy_association', path_vars={ 'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, 'region_id': json_home.Parameters.REGION_ID, }, resource_relation_func=_resource_rel_func, ), ] APIs = (PolicyAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/projects.py0000664000175000017500000005352300000000000020310 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/projects import functools import http.client import flask from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.resource import schema from keystone.server import flask as ks_flask CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs def _build_project_target_enforcement(): target = {} try: target['project'] = PROVIDERS.resource_api.get_project( flask.request.view_args.get('project_id') ) except exception.NotFound: # nosec # Defer existence in the event the project doesn't exist, we'll # check this later anyway. pass return target class ProjectResource(ks_flask.ResourceBase): collection_key = 'projects' member_key = 'project' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='resource_api', method='get_project' ) def _expand_project_ref(self, ref): parents_as_list = self.query_filter_is_true('parents_as_list') parents_as_ids = self.query_filter_is_true('parents_as_ids') subtree_as_list = self.query_filter_is_true('subtree_as_list') subtree_as_ids = self.query_filter_is_true('subtree_as_ids') include_limits = self.query_filter_is_true('include_limits') # parents_as_list and parents_as_ids are mutually exclusive if parents_as_list and parents_as_ids: msg = _( 'Cannot use parents_as_list and parents_as_ids query ' 'params at the same time.' ) raise exception.ValidationError(msg) # subtree_as_list and subtree_as_ids are mutually exclusive if subtree_as_list and subtree_as_ids: msg = _( 'Cannot use subtree_as_list and subtree_as_ids query ' 'params at the same time.' ) raise exception.ValidationError(msg) if parents_as_list: parents = PROVIDERS.resource_api.list_project_parents( ref['id'], self.oslo_context.user_id, include_limits ) ref['parents'] = [self.wrap_member(p) for p in parents] elif parents_as_ids: ref['parents'] = PROVIDERS.resource_api.get_project_parents_as_ids( ref ) if subtree_as_list: subtree = PROVIDERS.resource_api.list_projects_in_subtree( ref['id'], self.oslo_context.user_id, include_limits ) ref['subtree'] = [self.wrap_member(p) for p in subtree] elif subtree_as_ids: ref['subtree'] = ( PROVIDERS.resource_api.get_projects_in_subtree_as_ids( ref['id'] ) ) def _get_project(self, project_id): """Get project. GET/HEAD /v3/projects/{project_id} """ ENFORCER.enforce_call( action='identity:get_project', build_target=_build_project_target_enforcement, ) project = PROVIDERS.resource_api.get_project(project_id) self._expand_project_ref(project) return self.wrap_member(project) def _list_projects(self): """List projects. GET/HEAD /v3/projects """ filters = ('domain_id', 'enabled', 'name', 'parent_id', 'is_domain') target = None if self.oslo_context.domain_id: target = {'domain_id': self.oslo_context.domain_id} ENFORCER.enforce_call( action='identity:list_projects', filters=filters, target_attr=target, ) hints = self.build_driver_hints(filters) # If 'is_domain' has not been included as a query, we default it to # False (which in query terms means '0') if 'is_domain' not in flask.request.args: hints.add_filter('is_domain', '0') tag_params = ['tags', 'tags-any', 'not-tags', 'not-tags-any'] for t in tag_params: if t in flask.request.args: hints.add_filter(t, flask.request.args[t]) refs = PROVIDERS.resource_api.list_projects(hints=hints) if self.oslo_context.domain_id: domain_id = self.oslo_context.domain_id filtered_refs = [ ref for ref in refs if ref['domain_id'] == domain_id ] else: filtered_refs = refs return self.wrap_collection(filtered_refs, hints=hints) def get(self, project_id=None): """Get project or list projects. GET/HEAD /v3/projects GET/HEAD /v3/projects/{project_id} """ if project_id is not None: return self._get_project(project_id) else: return self._list_projects() def post(self): """Create project. POST /v3/projects """ project = self.request_body_json.get('project', {}) target = {'project': project} ENFORCER.enforce_call( action='identity:create_project', target_attr=target ) validation.lazy_validate(schema.project_create, project) project = self._assign_unique_id(project) if not project.get('is_domain'): project = self._normalize_domain_id(project) # Our API requires that you specify the location in the hierarchy # unambiguously. This could be by parent_id or, if it is a top # level project, just by providing a domain_id. if not project.get('parent_id'): project['parent_id'] = project.get('domain_id') project = self._normalize_dict(project) try: ref = PROVIDERS.resource_api.create_project( project['id'], project, initiator=self.audit_initiator ) except (exception.DomainNotFound, exception.ProjectNotFound) as e: raise exception.ValidationError(e) return self.wrap_member(ref), http.client.CREATED def patch(self, project_id): """Update project. PATCH /v3/projects/{project_id} """ ENFORCER.enforce_call( action='identity:update_project', build_target=_build_project_target_enforcement, ) project = self.request_body_json.get('project', {}) validation.lazy_validate(schema.project_update, project) self._require_matching_id(project) ref = PROVIDERS.resource_api.update_project( project_id, project, initiator=self.audit_initiator ) return self.wrap_member(ref) def delete(self, project_id): """Delete project. DELETE /v3/projects/{project_id} """ ENFORCER.enforce_call( action='identity:delete_project', build_target=_build_project_target_enforcement, ) PROVIDERS.resource_api.delete_project( project_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class _ProjectTagResourceBase(ks_flask.ResourceBase): collection_key = 'projects' member_key = 'tags' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='resource_api', method='get_project_tag' ) @classmethod def wrap_member(cls, ref, collection_name=None, member_name=None): member_name = member_name or cls.member_key # NOTE(gagehugo): Overriding this due to how the common controller # expects the ref to have an id, which for tags it does not. new_ref = {'links': {'self': ks_flask.full_url()}} new_ref[member_name] = ref or [] return new_ref class ProjectTagsResource(_ProjectTagResourceBase): def get(self, project_id): """List tags associated with a given project. GET /v3/projects/{project_id}/tags """ ENFORCER.enforce_call( action='identity:list_project_tags', build_target=_build_project_target_enforcement, ) ref = PROVIDERS.resource_api.list_project_tags(project_id) return self.wrap_member(ref) def put(self, project_id): """Update all tags associated with a given project. PUT /v3/projects/{project_id}/tags """ ENFORCER.enforce_call( action='identity:update_project_tags', build_target=_build_project_target_enforcement, ) tags = self.request_body_json.get('tags', {}) validation.lazy_validate(schema.project_tags_update, tags) ref = PROVIDERS.resource_api.update_project_tags( project_id, tags, initiator=self.audit_initiator ) return self.wrap_member(ref) def delete(self, project_id): """Delete all tags associated with a given project. DELETE /v3/projects/{project_id}/tags """ ENFORCER.enforce_call( action='identity:delete_project_tags', build_target=_build_project_target_enforcement, ) PROVIDERS.resource_api.update_project_tags(project_id, []) return None, http.client.NO_CONTENT class ProjectTagResource(_ProjectTagResourceBase): def get(self, project_id, value): """Get information for a single tag associated with a given project. GET /v3/projects/{project_id}/tags/{value} """ ENFORCER.enforce_call( action='identity:get_project_tag', build_target=_build_project_target_enforcement, ) PROVIDERS.resource_api.get_project_tag(project_id, value) return None, http.client.NO_CONTENT def put(self, project_id, value): """Add a single tag to a project. PUT /v3/projects/{project_id}/tags/{value} """ ENFORCER.enforce_call( action='identity:create_project_tag', build_target=_build_project_target_enforcement, ) validation.lazy_validate(schema.project_tag_create, value) # Check if we will exceed the max number of tags on this project tags = PROVIDERS.resource_api.list_project_tags(project_id) tags.append(value) validation.lazy_validate(schema.project_tags_update, tags) PROVIDERS.resource_api.create_project_tag( project_id, value, initiator=self.audit_initiator ) url = '/'.join((ks_flask.base_url(), project_id, 'tags', value)) response = flask.make_response('', http.client.CREATED) response.headers['Location'] = url return response def delete(self, project_id, value): """Delete a single tag from a project. /v3/projects/{project_id}/tags/{value} """ ENFORCER.enforce_call( action='identity:delete_project_tag', build_target=_build_project_target_enforcement, ) PROVIDERS.resource_api.delete_project_tag(project_id, value) return None, http.client.NO_CONTENT class _ProjectGrantResourceBase(ks_flask.ResourceBase): collection_key = 'roles' member_key = 'role' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='role_api', method='get_role' ) @staticmethod def _check_if_inherited(): return flask.request.path.endswith('/inherited_to_projects') @staticmethod def _build_enforcement_target_attr( role_id=None, user_id=None, group_id=None, domain_id=None, project_id=None, allow_non_existing=False, ): ref = {} if role_id: ref['role'] = PROVIDERS.role_api.get_role(role_id) try: if user_id: ref['user'] = PROVIDERS.identity_api.get_user(user_id) else: ref['group'] = PROVIDERS.identity_api.get_group(group_id) except (exception.UserNotFound, exception.GroupNotFound): if not allow_non_existing: raise # NOTE(lbragstad): This if/else check will need to be expanded in the # future to handle system hierarchies if that is implemented. if domain_id: ref['domain'] = PROVIDERS.resource_api.get_domain(domain_id) elif project_id: ref['project'] = PROVIDERS.resource_api.get_project(project_id) return ref class ProjectUserGrantResource(_ProjectGrantResourceBase): def get(self, project_id, user_id, role_id): """Check grant for project, user, role. GET/HEAD /v3/projects/{project_id/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:check_grant', build_target=functools.partial( self._build_enforcement_target_attr, role_id=role_id, project_id=project_id, user_id=user_id, ), ) inherited = self._check_if_inherited() PROVIDERS.assignment_api.get_grant( role_id=role_id, user_id=user_id, project_id=project_id, inherited_to_projects=inherited, ) return None, http.client.NO_CONTENT def put(self, project_id, user_id, role_id): """Grant role for user on project. PUT /v3/projects/{project_id}/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:create_grant', build_target=functools.partial( self._build_enforcement_target_attr, role_id=role_id, project_id=project_id, user_id=user_id, ), ) inherited = self._check_if_inherited() PROVIDERS.assignment_api.create_grant( role_id=role_id, user_id=user_id, project_id=project_id, inherited_to_projects=inherited, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT def delete(self, project_id, user_id, role_id): """Delete grant of role for user on project. DELETE /v3/projects/{project_id}/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( self._build_enforcement_target_attr, role_id=role_id, user_id=user_id, project_id=project_id, allow_non_existing=True, ), ) inherited = self._check_if_inherited() PROVIDERS.assignment_api.delete_grant( role_id=role_id, user_id=user_id, project_id=project_id, inherited_to_projects=inherited, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT class ProjectUserListGrantResource(_ProjectGrantResourceBase): def get(self, project_id, user_id): """List grants for user on project. GET/HEAD /v3/projects/{project_id}/users/{user_id} """ ENFORCER.enforce_call( action='identity:list_grants', build_target=functools.partial( self._build_enforcement_target_attr, project_id=project_id, user_id=user_id, ), ) inherited = self._check_if_inherited() refs = PROVIDERS.assignment_api.list_grants( user_id=user_id, project_id=project_id, inherited_to_projects=inherited, ) return self.wrap_collection(refs) class ProjectGroupGrantResource(_ProjectGrantResourceBase): def get(self, project_id, group_id, role_id): """Check grant for project, group, role. GET/HEAD /v3/projects/{project_id/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:check_grant', build_target=functools.partial( self._build_enforcement_target_attr, role_id=role_id, project_id=project_id, group_id=group_id, ), ) inherited = self._check_if_inherited() PROVIDERS.assignment_api.get_grant( role_id=role_id, group_id=group_id, project_id=project_id, inherited_to_projects=inherited, ) return None, http.client.NO_CONTENT def put(self, project_id, group_id, role_id): """Grant role for group on project. PUT /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:create_grant', build_target=functools.partial( self._build_enforcement_target_attr, role_id=role_id, project_id=project_id, group_id=group_id, ), ) inherited = self._check_if_inherited() PROVIDERS.assignment_api.create_grant( role_id=role_id, group_id=group_id, project_id=project_id, inherited_to_projects=inherited, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT def delete(self, project_id, group_id, role_id): """Delete grant of role for group on project. DELETE /v3/projects/{project_id}/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:revoke_grant', build_target=functools.partial( self._build_enforcement_target_attr, role_id=role_id, group_id=group_id, project_id=project_id, allow_non_existing=True, ), ) inherited = self._check_if_inherited() PROVIDERS.assignment_api.delete_grant( role_id=role_id, group_id=group_id, project_id=project_id, inherited_to_projects=inherited, initiator=self.audit_initiator, ) return None, http.client.NO_CONTENT class ProjectGroupListGrantResource(_ProjectGrantResourceBase): def get(self, project_id, group_id): """List grants for group on project. GET/HEAD /v3/projects/{project_id}/groups/{group_id} """ ENFORCER.enforce_call( action='identity:list_grants', build_target=functools.partial( self._build_enforcement_target_attr, project_id=project_id, group_id=group_id, ), ) inherited = self._check_if_inherited() refs = PROVIDERS.assignment_api.list_grants( group_id=group_id, project_id=project_id, inherited_to_projects=inherited, ) return self.wrap_collection(refs) class ProjectAPI(ks_flask.APIBase): _name = 'projects' _import_name = __name__ resources = [ProjectResource] resource_mapping = [ ks_flask.construct_resource_map( resource=ProjectTagsResource, url='/projects//tags', resource_kwargs={}, rel='project_tags', path_vars={'project_id': json_home.Parameters.PROJECT_ID}, ), ks_flask.construct_resource_map( resource=ProjectTagResource, url='/projects//tags/', resource_kwargs={}, rel='project_tags', path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'value': json_home.Parameters.TAG_VALUE, }, ), ks_flask.construct_resource_map( resource=ProjectUserGrantResource, url=( '/projects//users//' 'roles/' ), resource_kwargs={}, rel='project_user_role', path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ks_flask.construct_resource_map( resource=ProjectUserListGrantResource, url='/projects//users//roles', resource_kwargs={}, rel='project_user_roles', path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=ProjectGroupGrantResource, url=( '/projects//groups//' 'roles/' ), resource_kwargs={}, rel='project_group_role', path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, ), ks_flask.construct_resource_map( resource=ProjectGroupListGrantResource, url='/projects//groups//roles', resource_kwargs={}, rel='project_group_roles', path_vars={ 'project_id': json_home.Parameters.PROJECT_ID, 'group_id': json_home.Parameters.GROUP_ID, }, ), ] APIs = (ProjectAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/regions.py0000664000175000017500000001001700000000000020114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/regions import http.client from keystone.catalog import schema from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs class RegionResource(ks_flask.ResourceBase): collection_key = 'regions' member_key = 'region' def _get_region(self, region_id): ENFORCER.enforce_call(action='identity:get_region') return self.wrap_member(PROVIDERS.catalog_api.get_region(region_id)) def _list_regions(self): filters = ['parent_region_id'] ENFORCER.enforce_call(action='identity:list_regions', filters=filters) hints = self.build_driver_hints(filters) refs = PROVIDERS.catalog_api.list_regions(hints) return self.wrap_collection(refs, hints=hints) def get(self, region_id=None): if region_id is not None: return self._get_region(region_id) return self._list_regions() def post(self): ENFORCER.enforce_call(action='identity:create_region') region = self.request_body_json.get('region') validation.lazy_validate(schema.region_create, region) region = self._normalize_dict(region) if not region.get('id'): # NOTE(morgan): even though we officially only support 'id' setting # via the PUT mechanism, this is historical and we need to support # both ways. region = self._assign_unique_id(region) ref = PROVIDERS.catalog_api.create_region( region, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def put(self, region_id): ENFORCER.enforce_call(action='identity:create_region') region = self.request_body_json.get('region') validation.lazy_validate(schema.region_create, region) region = self._normalize_dict(region) if 'id' not in region: region['id'] = region_id elif region_id != region.get('id'): raise exception.ValidationError( _( 'Conflicting region IDs specified: ' '"%(url_id)s" != "%(ref_id)s"' ) % {'url_id': region_id, 'ref_id': region['id']} ) ref = PROVIDERS.catalog_api.create_region( region, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def patch(self, region_id): ENFORCER.enforce_call(action='identity:update_region') region = self.request_body_json.get('region') validation.lazy_validate(schema.region_update, region) self._require_matching_id(region) return self.wrap_member( PROVIDERS.catalog_api.update_region( region_id, region, initiator=self.audit_initiator ) ) def delete(self, region_id): ENFORCER.enforce_call(action='identity:delete_region') return ( PROVIDERS.catalog_api.delete_region( region_id, initiator=self.audit_initiator ), http.client.NO_CONTENT, ) class RegionAPI(ks_flask.APIBase): _name = 'regions' _import_name = __name__ resources = [RegionResource] resource_mapping = [] APIs = (RegionAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/registered_limits.py0000664000175000017500000000727200000000000022175 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/registered_limits import http.client import flask from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation from keystone.limit import schema from keystone.server import flask as ks_flask PROVIDERS = provider_api.ProviderAPIs ENFORCER = rbac_enforcer.RBACEnforcer class RegisteredLimitResource(ks_flask.ResourceBase): collection_key = 'registered_limits' member_key = 'registered_limit' json_home_resource_status = json_home.Status.EXPERIMENTAL def _get_registered_limit(self, registered_limit_id): ENFORCER.enforce_call(action='identity:get_registered_limit') ref = PROVIDERS.unified_limit_api.get_registered_limit( registered_limit_id ) return self.wrap_member(ref) def _list_registered_limits(self): filters = ['service_id', 'region_id', 'resource_name'] ENFORCER.enforce_call( action='identity:list_registered_limits', filters=filters ) hints = self.build_driver_hints(filters) refs = PROVIDERS.unified_limit_api.list_registered_limits(hints) return self.wrap_collection(refs, hints=hints) def get(self, registered_limit_id=None): if registered_limit_id is not None: return self._get_registered_limit(registered_limit_id) return self._list_registered_limits() def post(self): ENFORCER.enforce_call(action='identity:create_registered_limits') reg_limits = ( flask.request.get_json(silent=True, force=True) or {} ).get('registered_limits', {}) validation.lazy_validate(schema.registered_limit_create, reg_limits) registered_limits = [ self._assign_unique_id(self._normalize_dict(r)) for r in reg_limits ] refs = PROVIDERS.unified_limit_api.create_registered_limits( registered_limits ) refs = self.wrap_collection(refs) refs.pop('links') return refs, http.client.CREATED def patch(self, registered_limit_id): ENFORCER.enforce_call(action='identity:update_registered_limit') registered_limit = ( flask.request.get_json(silent=True, force=True) or {} ).get('registered_limit', {}) validation.lazy_validate( schema.registered_limit_update, registered_limit ) self._require_matching_id(registered_limit) ref = PROVIDERS.unified_limit_api.update_registered_limit( registered_limit_id, registered_limit ) return self.wrap_member(ref) def delete(self, registered_limit_id): ENFORCER.enforce_call(action='identity:delete_registered_limit') return ( PROVIDERS.unified_limit_api.delete_registered_limit( registered_limit_id ), http.client.NO_CONTENT, ) class RegisteredLimitsAPI(ks_flask.APIBase): _name = 'registered_limit' _import_name = __name__ resources = [RegisteredLimitResource] resource_mapping = [] APIs = (RegisteredLimitsAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/role_assignments.py0000664000175000017500000004067000000000000022032 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/role_assignments import flask from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs class RoleAssignmentsResource(ks_flask.ResourceBase): # TODO(henry-nash): The current implementation does not provide a full # first class entity for role-assignment. There is no role_assignment_id # and only the list_role_assignment call is supported. Further, since it # is not a first class entity, the links for the individual entities # reference the individual role grant APIs. collection_key = 'role_assignments' member_key = 'role_assignment' def get(self): """List all role assignments. GET/HEAD /v3/role_assignments """ if self.query_filter_is_true('include_subtree'): return self._list_role_assignments_for_tree() return self._list_role_assignments() def _list_role_assignments(self): filters = [ 'group.id', 'role.id', 'scope.domain.id', 'scope.project.id', 'scope.OS-INHERIT:inherited_to', 'user.id', 'scope.system', ] target = None if self.oslo_context.domain_id: # NOTE(dmendiza): Normally we want the target dict to contain # information about the target entity, not information from the # context. In this case, however, we are going to filter the # response to only contain information about the domain in scope # so we reflect the domain_id from the context into the target # to validate domain-scoped tokens. target = {'domain_id': self.oslo_context.domain_id} ENFORCER.enforce_call( action='identity:list_role_assignments', filters=filters, target_attr=target, ) assignments = self._build_role_assignments_list() if self.oslo_context.domain_id: domain_assignments = [] for assignment in assignments['role_assignments']: domain_id = assignment['scope'].get('domain', {}).get('id') project_id = assignment['scope'].get('project', {}).get('id') if domain_id == self.oslo_context.domain_id: domain_assignments.append(assignment) continue elif project_id: project = PROVIDERS.resource_api.get_project(project_id) if project.get('domain_id') == self.oslo_context.domain_id: domain_assignments.append(assignment) assignments['role_assignments'] = domain_assignments return assignments def _list_role_assignments_for_tree(self): filters = [ 'group.id', 'role.id', 'scope.domain.id', 'scope.project.id', 'scope.OS-INHERIT:inherited_to', 'user.id', ] project_id = flask.request.args.get('scope.project.id') target = None if project_id: target = { 'project': PROVIDERS.resource_api.get_project(project_id) } # Add target.domain_id to validate domain-scoped tokens target['domain_id'] = target['project']['domain_id'] ENFORCER.enforce_call( action='identity:list_role_assignments_for_tree', filters=filters, target_attr=target, ) if not project_id: msg = _( 'scope.project.id must be specified if include_subtree ' 'is also specified' ) raise exception.ValidationError(message=msg) return self._build_role_assignments_list(include_subtree=True) def _build_role_assignments_list(self, include_subtree=False): """List role assignments to user and groups on domains and projects. Return a list of all existing role assignments in the system, filtered by assignments attributes, if provided. If effective option is used and OS-INHERIT extension is enabled, the following functions will be applied: 1) For any group role assignment on a target, replace it by a set of role assignments containing one for each user of that group on that target; 2) For any inherited role assignment for an actor on a target, replace it by a set of role assignments for that actor on every project under that target. It means that, if effective mode is used, no group or domain inherited assignments will be present in the resultant list. Thus, combining effective with them is invalid. As a role assignment contains only one actor and one target, providing both user and group ids or domain and project ids is invalid as well. """ params = flask.request.args include_names = self.query_filter_is_true('include_names') self._assert_domain_nand_project() self._assert_system_nand_domain() self._assert_system_nand_project() self._assert_user_nand_group() self._assert_effective_filters_if_needed() refs = PROVIDERS.assignment_api.list_role_assignments( role_id=params.get('role.id'), user_id=params.get('user.id'), group_id=params.get('group.id'), system=params.get('scope.system'), domain_id=params.get('scope.domain.id'), project_id=params.get('scope.project.id'), include_subtree=include_subtree, inherited=self._inherited, effective=self._effective, include_names=include_names, ) formatted_refs = [self._format_entity(ref) for ref in refs] return self.wrap_collection(formatted_refs) def _assert_domain_nand_project(self): if flask.request.args.get( 'scope.domain.id' ) and flask.request.args.get('scope.project.id'): msg = _('Specify a domain or project, not both') raise exception.ValidationError(msg) def _assert_system_nand_domain(self): if flask.request.args.get( 'scope.domain.id' ) and flask.request.args.get('scope.system'): msg = _('Specify system or domain, not both') raise exception.ValidationError(msg) def _assert_system_nand_project(self): if flask.request.args.get( 'scope.project.id' ) and flask.request.args.get('scope.system'): msg = _('Specify system or project, not both') raise exception.ValidationError(msg) def _assert_user_nand_group(self): if flask.request.args.get('user.id') and flask.request.args.get( 'group.id' ): msg = _('Specify a user or group, not both') raise exception.ValidationError(msg) def _assert_effective_filters_if_needed(self): """Assert that useless filter combinations are avoided. In effective mode, the following filter combinations are useless, since they would always return an empty list of role assignments: - group id, since no group assignment is returned in effective mode; - domain id and inherited, since no domain inherited assignment is returned in effective mode. """ if self._effective: if flask.request.args.get('group.id'): msg = _( 'Combining effective and group filter will always ' 'result in an empty list.' ) raise exception.ValidationError(msg) if self._inherited and flask.request.args.get('scope.domain.id'): msg = _( 'Combining effective, domain and inherited filters will ' 'always result in an empty list.' ) raise exception.ValidationError(msg) @property def _inherited(self): inherited = None req_args = flask.request.args if 'scope.OS-INHERIT:inherited_to' in req_args: inherited = req_args['scope.OS-INHERIT:inherited_to'] == 'projects' return inherited @classmethod def _add_self_referential_link(cls, ref, collection_name=None): # NOTE(henry-nash): Since we are not yet a true collection, we override # the wrapper as have already included the links in the entities pass @property def _effective(self): return self.query_filter_is_true('effective') def _format_entity(self, entity): """Format an assignment entity for API response. The driver layer returns entities as dicts containing the ids of the actor (e.g. user or group), target (e.g. domain or project) and role. If it is an inherited role, then this is also indicated. Examples: For a non-inherited expanded assignment from group membership: {'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect': {'group_id': group_id}} or, for a project inherited role: {'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect': {'project_id': parent_id}} or, for a role that was implied by a prior role: {'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect': {'role_id': prior role_id}} It is possible to deduce if a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in the 'indirect' subdict, as well as it is possible to deduce if it has come from inheritance if it contains both a 'project_id' in the main body of the dict and 'parent_id' in the 'indirect' subdict. This function maps this into the format to be returned via the API, e.g. for the second example above: { 'user': { {'id': user_id} }, 'scope': { 'project': { {'id': project_id} }, 'OS-INHERIT:inherited_to': 'projects' }, 'role': { {'id': role_id} }, 'links': { 'assignment': '/OS-INHERIT/projects/parent_id/users/user_id/' 'roles/role_id/inherited_to_projects' } } """ formatted_link = '' formatted_entity = {'links': {}} inherited_assignment = entity.get('inherited_to_projects') if 'project_id' in entity: if 'project_name' in entity: formatted_entity['scope'] = { 'project': { 'id': entity['project_id'], 'name': entity['project_name'], 'domain': { 'id': entity['project_domain_id'], 'name': entity['project_domain_name'], }, } } else: formatted_entity['scope'] = { 'project': {'id': entity['project_id']} } if 'domain_id' in entity.get('indirect', {}): inherited_assignment = True formatted_link = ( '/domains/%s' % entity['indirect']['domain_id'] ) elif 'project_id' in entity.get('indirect', {}): inherited_assignment = True formatted_link = ( '/projects/%s' % entity['indirect']['project_id'] ) else: formatted_link = '/projects/%s' % entity['project_id'] elif 'domain_id' in entity: if 'domain_name' in entity: formatted_entity['scope'] = { 'domain': { 'id': entity['domain_id'], 'name': entity['domain_name'], } } else: formatted_entity['scope'] = { 'domain': {'id': entity['domain_id']} } formatted_link = '/domains/%s' % entity['domain_id'] elif 'system' in entity: formatted_link = '/system' formatted_entity['scope'] = {'system': entity['system']} if 'user_id' in entity: if 'user_name' in entity: formatted_entity['user'] = { 'id': entity['user_id'], 'name': entity['user_name'], 'domain': { 'id': entity['user_domain_id'], 'name': entity['user_domain_name'], }, } else: formatted_entity['user'] = {'id': entity['user_id']} if 'group_id' in entity.get('indirect', {}): membership_url = ks_flask.base_url( path='/groups/%s/users/%s' % (entity['indirect']['group_id'], entity['user_id']) ) formatted_entity['links']['membership'] = membership_url formatted_link += '/groups/%s' % entity['indirect']['group_id'] else: formatted_link += '/users/%s' % entity['user_id'] elif 'group_id' in entity: if 'group_name' in entity: formatted_entity['group'] = { 'id': entity['group_id'], 'name': entity['group_name'], 'domain': { 'id': entity['group_domain_id'], 'name': entity['group_domain_name'], }, } else: formatted_entity['group'] = {'id': entity['group_id']} formatted_link += '/groups/%s' % entity['group_id'] if 'role_name' in entity: formatted_entity['role'] = { 'id': entity['role_id'], 'name': entity['role_name'], } if 'role_domain_id' in entity and 'role_domain_name' in entity: formatted_entity['role'].update( { 'domain': { 'id': entity['role_domain_id'], 'name': entity['role_domain_name'], } } ) else: formatted_entity['role'] = {'id': entity['role_id']} prior_role_link = '' if 'role_id' in entity.get('indirect', {}): formatted_link += '/roles/%s' % entity['indirect']['role_id'] prior_role_link = '/prior_role/{prior}/implies/{implied}'.format( prior=entity['role_id'], implied=entity['indirect']['role_id'], ) else: formatted_link += '/roles/%s' % entity['role_id'] if inherited_assignment: formatted_entity['scope']['OS-INHERIT:inherited_to'] = 'projects' formatted_link = ( '/OS-INHERIT%s/inherited_to_projects' % formatted_link ) formatted_entity['links']['assignment'] = ks_flask.base_url( path=formatted_link ) if prior_role_link: formatted_entity['links']['prior_role'] = ks_flask.base_url( path=prior_role_link ) return formatted_entity class RoleAssignmentsAPI(ks_flask.APIBase): _name = 'role_assignments' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=RoleAssignmentsResource, url='/role_assignments', resource_kwargs={}, rel='role_assignments', ) ] APIs = (RoleAssignmentsAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/role_inferences.py0000664000175000017500000000502600000000000021614 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/role_inferences import flask_restful from keystone.api._shared import implied_roles as shared from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs class RoleInferencesResource(flask_restful.Resource): def get(self): """List role inference rules. GET/HEAD /v3/role_inferences """ ENFORCER.enforce_call(action='identity:list_role_inference_rules') refs = PROVIDERS.role_api.list_role_inference_rules() role_dict = { role_ref['id']: role_ref for role_ref in PROVIDERS.role_api.list_roles() } rules = dict() for ref in refs: implied_role_id = ref['implied_role_id'] prior_role_id = ref['prior_role_id'] implied = rules.get(prior_role_id, []) implied.append( shared.build_implied_role_response_data( role_dict[implied_role_id] ) ) rules[prior_role_id] = implied inferences = [] for ( prior_id, implied, ) in rules.items(): prior_response = shared.build_prior_role_response_data( prior_id, role_dict[prior_id]['name'] ) inferences.append( {'prior_role': prior_response, 'implies': implied} ) results = {'role_inferences': inferences} return results class RoleInferencesAPI(ks_flask.APIBase): _name = 'role_inferences' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=RoleInferencesResource, url='/role_inferences', resource_kwargs={}, rel='role_inferences', ) ] APIs = (RoleInferencesAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/roles.py0000664000175000017500000003032000000000000017571 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/roles import http.client import flask import flask_restful from keystone.api._shared import implied_roles as shared from keystone.assignment import schema from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation import keystone.conf from keystone.server import flask as ks_flask CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs class RoleResource(ks_flask.ResourceBase): collection_key = 'roles' member_key = 'role' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='role_api', method='get_role' ) def _is_domain_role(self, role): return bool(role.get('domain_id')) def get(self, role_id=None): """Get role or list roles. GET/HEAD /v3/roles GET/HEAD /v3/roles/{role_id} """ if role_id is not None: return self._get_role(role_id) return self._list_roles() def _get_role(self, role_id): err = None role = {} try: role = PROVIDERS.role_api.get_role(role_id) except Exception as e: # nosec # We don't raise out here, we raise out after enforcement, this # ensures we do not leak role existence. Do nothing yet, process # enforcement before raising out an error. err = e finally: # NOTE(morgan): There are a couple of cases to be aware of here # if there is an exception (e is not None), then we are enforcing # on "get_role" to be safe. If the role is not a "domain_role", # we are enforcing on "get_role". If the role is "domain_role" we # are inforcing on "get_domain_role" if err is not None or not self._is_domain_role(role): ENFORCER.enforce_call(action='identity:get_role') if err: # reraise the error after enforcement if needed. raise err else: ENFORCER.enforce_call( action='identity:get_domain_role', member_target_type='role', member_target=role, ) return self.wrap_member(role) def _list_roles(self): filters = ['name', 'domain_id'] domain_filter = flask.request.args.get('domain_id') if domain_filter: ENFORCER.enforce_call( action='identity:list_domain_roles', filters=filters ) else: ENFORCER.enforce_call( action='identity:list_roles', filters=filters ) hints = self.build_driver_hints(filters) if not domain_filter: # NOTE(jamielennox): To handle the default case of not domain_id # defined the role_assignment backend does some hackery to # distinguish between global and domain scoped roles. This backend # behaviour relies upon a value of domain_id being set (not just # defaulting to None). Manually set the filter if its not # provided. hints.add_filter('domain_id', None) refs = PROVIDERS.role_api.list_roles(hints=hints) return self.wrap_collection(refs, hints=hints) def post(self): """Create role. POST /v3/roles """ role = self.request_body_json.get('role', {}) if self._is_domain_role(role): target = {'role': role} ENFORCER.enforce_call( action='identity:create_domain_role', target_attr=target ) else: ENFORCER.enforce_call(action='identity:create_role') validation.lazy_validate(schema.role_create, role) role = self._assign_unique_id(role) role = self._normalize_dict(role) ref = PROVIDERS.role_api.create_role( role['id'], role, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def patch(self, role_id): """Update role. PATCH /v3/roles/{role_id} """ err = None role = {} try: role = PROVIDERS.role_api.get_role(role_id) except Exception as e: # nosec # We don't raise out here, we raise out after enforcement, this # ensures we do not leak role existence. Do nothing yet, process # enforcement before raising out an error. err = e finally: if err is not None or not self._is_domain_role(role): ENFORCER.enforce_call(action='identity:update_role') if err: raise err else: ENFORCER.enforce_call( action='identity:update_domain_role', member_target_type='role', member_target=role, ) request_body_role = self.request_body_json.get('role', {}) validation.lazy_validate(schema.role_update, request_body_role) self._require_matching_id(request_body_role) ref = PROVIDERS.role_api.update_role( role_id, request_body_role, initiator=self.audit_initiator ) return self.wrap_member(ref) def delete(self, role_id): """Delete role. DELETE /v3/roles/{role_id} """ err = None role = {} try: role = PROVIDERS.role_api.get_role(role_id) except Exception as e: # nosec # We don't raise out here, we raise out after enforcement, this # ensures we do not leak role existence. Do nothing yet, process # enforcement before raising out an error. err = e finally: if err is not None or not self._is_domain_role(role): ENFORCER.enforce_call(action='identity:delete_role') if err: raise err else: ENFORCER.enforce_call( action='identity:delete_domain_role', member_target_type='role', member_target=role, ) PROVIDERS.role_api.delete_role(role_id, initiator=self.audit_initiator) return None, http.client.NO_CONTENT def _build_enforcement_target_ref(): ref = {} if flask.request.view_args: ref['prior_role'] = PROVIDERS.role_api.get_role( flask.request.view_args.get('prior_role_id') ) if flask.request.view_args.get('implied_role_id'): ref['implied_role'] = PROVIDERS.role_api.get_role( flask.request.view_args['implied_role_id'] ) return ref class RoleImplicationListResource(flask_restful.Resource): def get(self, prior_role_id): """List Implied Roles. GET/HEAD /v3/roles/{prior_role_id}/implies """ ENFORCER.enforce_call( action='identity:list_implied_roles', build_target=_build_enforcement_target_ref, ) ref = PROVIDERS.role_api.list_implied_roles(prior_role_id) implied_ids = [r['implied_role_id'] for r in ref] response_json = shared.role_inference_response(prior_role_id) response_json['role_inference']['implies'] = [] for implied_id in implied_ids: implied_role = PROVIDERS.role_api.get_role(implied_id) response_json['role_inference']['implies'].append( shared.build_implied_role_response_data(implied_role) ) response_json['links'] = { 'self': ks_flask.base_url(path='/roles/%s/implies' % prior_role_id) } return response_json class RoleImplicationResource(flask_restful.Resource): def head(self, prior_role_id, implied_role_id=None): # TODO(morgan): deprecate "check_implied_role" policy, as a user must # have both check_implied_role and get_implied_role to use the head # action. This enforcement of HEAD is historical for # consistent policy enforcement behavior even if it is superfluous. # Alternatively we can keep check_implied_role and reference # ._get_implied_role instead. ENFORCER.enforce_call( action='identity:check_implied_role', build_target=_build_enforcement_target_ref, ) self.get(prior_role_id, implied_role_id) # NOTE(morgan): Our API here breaks HTTP Spec. This should be evaluated # for a future fix. This should just return the above "get" however, # we document and implment this as a NO_CONTENT response. NO_CONTENT # here is incorrect. It is maintained as is for API contract reasons. return None, http.client.NO_CONTENT def get(self, prior_role_id, implied_role_id): """Get implied role. GET/HEAD /v3/roles/{prior_role_id}/implies/{implied_role_id} """ ENFORCER.enforce_call( action='identity:get_implied_role', build_target=_build_enforcement_target_ref, ) return self._get_implied_role(prior_role_id, implied_role_id) def _get_implied_role(self, prior_role_id, implied_role_id): # Isolate this logic so it can be re-used without added enforcement PROVIDERS.role_api.get_implied_role(prior_role_id, implied_role_id) implied_role_ref = PROVIDERS.role_api.get_role(implied_role_id) response_json = shared.role_inference_response(prior_role_id) response_json['role_inference']['implies'] = ( shared.build_implied_role_response_data(implied_role_ref) ) response_json['links'] = { 'self': ks_flask.base_url( path='/roles/%(prior)s/implies/%(implies)s' % {'prior': prior_role_id, 'implies': implied_role_id} ) } return response_json def put(self, prior_role_id, implied_role_id): """Create implied role. PUT /v3/roles/{prior_role_id}/implies/{implied_role_id} """ ENFORCER.enforce_call( action='identity:create_implied_role', build_target=_build_enforcement_target_ref, ) PROVIDERS.role_api.create_implied_role(prior_role_id, implied_role_id) response_json = self._get_implied_role(prior_role_id, implied_role_id) return response_json, http.client.CREATED def delete(self, prior_role_id, implied_role_id): """Delete implied role. DELETE /v3/roles/{prior_role_id}/implies/{implied_role_id} """ ENFORCER.enforce_call( action='identity:delete_implied_role', build_target=_build_enforcement_target_ref, ) PROVIDERS.role_api.delete_implied_role(prior_role_id, implied_role_id) return None, http.client.NO_CONTENT class RoleAPI(ks_flask.APIBase): _name = 'roles' _import_name = __name__ resources = [RoleResource] resource_mapping = [ ks_flask.construct_resource_map( resource=RoleImplicationListResource, url='/roles//implies', resource_kwargs={}, rel='implied_roles', path_vars={'prior_role_id': json_home.Parameters.ROLE_ID}, ), ks_flask.construct_resource_map( resource=RoleImplicationResource, resource_kwargs={}, url=( '/roles//' 'implies/' ), rel='implied_role', path_vars={ 'prior_role_id': json_home.Parameters.ROLE_ID, 'implied_role_id': json_home.Parameters.ROLE_ID, }, ), ] APIs = (RoleAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/s3tokens.py0000664000175000017500000001046600000000000020227 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/s3tokens import base64 import hashlib import hmac import http.client import flask from oslo_serialization import jsonutils from keystone.api._shared import EC2_S3_Resource from keystone.api._shared import json_home_relations from keystone.common import render_token from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask def _calculate_signature_v1(string_to_sign, secret_key): """Calculate a v1 signature. :param bytes string_to_sign: String that contains request params and is used for calculate signature of request :param text secret_key: Second auth key of EC2 account that is used to sign requests """ key = str(secret_key).encode('utf-8') b64_encode = base64.encodebytes signed = ( b64_encode(hmac.new(key, string_to_sign, hashlib.sha1).digest()) .decode('utf-8') .strip() ) return signed def _calculate_signature_v4(string_to_sign, secret_key): """Calculate a v4 signature. :param bytes string_to_sign: String that contains request params and is used for calculate signature of request :param text secret_key: Second auth key of EC2 account that is used to sign requests """ parts = string_to_sign.split(b'\n') if len(parts) != 4 or parts[0] != b'AWS4-HMAC-SHA256': raise exception.Unauthorized(message=_('Invalid EC2 signature.')) scope = parts[2].split(b'/') if len(scope) != 4 or scope[3] != b'aws4_request': raise exception.Unauthorized(message=_('Invalid EC2 signature.')) allowed_services = [b's3', b'iam', b'sts'] if scope[2] not in allowed_services: raise exception.Unauthorized(message=_('Invalid EC2 signature.')) def _sign(key, msg): return hmac.new(key, msg, hashlib.sha256).digest() signed = _sign(('AWS4' + secret_key).encode('utf-8'), scope[0]) signed = _sign(signed, scope[1]) signed = _sign(signed, scope[2]) signed = _sign(signed, b'aws4_request') signature = hmac.new(signed, string_to_sign, hashlib.sha256) return signature.hexdigest() class S3Resource(EC2_S3_Resource.ResourceBase): @staticmethod def _check_signature(creds_ref, credentials): string_to_sign = base64.urlsafe_b64decode(str(credentials['token'])) if string_to_sign[0:4] != b'AWS4': signature = _calculate_signature_v1( string_to_sign, creds_ref['secret'] ) else: signature = _calculate_signature_v4( string_to_sign, creds_ref['secret'] ) if not utils.auth_str_equal(credentials['signature'], signature): raise exception.Unauthorized( message=_('Credential signature mismatch') ) @ks_flask.unenforced_api def post(self): """Authenticate s3token. POST /v3/s3tokens """ token = self.handle_authenticate() token_reference = render_token.render_token_response_from_model(token) resp_body = jsonutils.dumps(token_reference) response = flask.make_response(resp_body, http.client.OK) response.headers['Content-Type'] = 'application/json' return response class S3Api(ks_flask.APIBase): _name = 's3tokens' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=S3Resource, url='/s3tokens', resource_kwargs={}, rel='s3tokens', resource_relation_func=( json_home_relations.s3_token_resource_rel_func ), ) ] APIs = (S3Api,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/services.py0000664000175000017500000000575000000000000020301 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/services import http.client from keystone.catalog import schema from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import validation from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs class ServicesResource(ks_flask.ResourceBase): collection_key = 'services' member_key = 'service' def _get_service(self, service_id): ENFORCER.enforce_call(action='identity:get_service') return self.wrap_member(PROVIDERS.catalog_api.get_service(service_id)) def _list_service(self): filters = ['type', 'name'] ENFORCER.enforce_call(action='identity:list_services', filters=filters) hints = self.build_driver_hints(filters) refs = PROVIDERS.catalog_api.list_services(hints=hints) return self.wrap_collection(refs, hints=hints) def get(self, service_id=None): if service_id is not None: return self._get_service(service_id) return self._list_service() def post(self): ENFORCER.enforce_call(action='identity:create_service') service = self.request_body_json.get('service') validation.lazy_validate(schema.service_create, service) service = self._assign_unique_id(self._normalize_dict(service)) ref = PROVIDERS.catalog_api.create_service( service['id'], service, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def patch(self, service_id): ENFORCER.enforce_call(action='identity:update_service') service = self.request_body_json.get('service') validation.lazy_validate(schema.service_update, service) self._require_matching_id(service) ref = PROVIDERS.catalog_api.update_service( service_id, service, initiator=self.audit_initiator ) return self.wrap_member(ref) def delete(self, service_id): ENFORCER.enforce_call(action='identity:delete_service') return ( PROVIDERS.catalog_api.delete_service( service_id, initiator=self.audit_initiator ), http.client.NO_CONTENT, ) class ServiceAPI(ks_flask.APIBase): _name = 'services' _import_name = __name__ resources = [ServicesResource] resource_mapping = [] APIs = (ServiceAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/system.py0000664000175000017500000001646600000000000020010 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/system import functools import http.client import flask import flask_restful from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone import exception from keystone.server import flask as ks_flask ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs def _build_enforcement_target(allow_non_existing=False): target = {} if flask.request.view_args: if flask.request.view_args.get('role_id'): target['role'] = PROVIDERS.role_api.get_role( flask.request.view_args['role_id'] ) if flask.request.view_args.get('user_id'): try: target['user'] = PROVIDERS.identity_api.get_user( flask.request.view_args['user_id'] ) except exception.UserNotFound: if not allow_non_existing: raise else: try: target['group'] = PROVIDERS.identity_api.get_group( flask.request.view_args.get('group_id') ) except exception.GroupNotFound: if not allow_non_existing: raise return target class SystemUsersListResource(flask_restful.Resource): def get(self, user_id): """List all system grants for a specific user. GET/HEAD /system/users/{user_id}/roles """ ENFORCER.enforce_call( action='identity:list_system_grants_for_user', build_target=_build_enforcement_target, ) refs = PROVIDERS.assignment_api.list_system_grants_for_user(user_id) return ks_flask.ResourceBase.wrap_collection( refs, collection_name='roles' ) class SystemUsersResource(flask_restful.Resource): def get(self, user_id, role_id): """Check if a user has a specific role on the system. GET/HEAD /system/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:check_system_grant_for_user', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.check_system_grant_for_user(user_id, role_id) return None, http.client.NO_CONTENT def put(self, user_id, role_id): """Grant a role to a user on the system. PUT /system/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:create_system_grant_for_user', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.create_system_grant_for_user(user_id, role_id) return None, http.client.NO_CONTENT def delete(self, user_id, role_id): """Revoke a role from user on the system. DELETE /system/users/{user_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:revoke_system_grant_for_user', build_target=functools.partial( _build_enforcement_target, allow_non_existing=True ), ) PROVIDERS.assignment_api.delete_system_grant_for_user(user_id, role_id) return None, http.client.NO_CONTENT class SystemGroupsRolesListResource(flask_restful.Resource): def get(self, group_id): """List all system grants for a specific group. GET/HEAD /system/groups/{group_id}/roles """ ENFORCER.enforce_call( action='identity:list_system_grants_for_group', build_target=_build_enforcement_target, ) refs = PROVIDERS.assignment_api.list_system_grants_for_group(group_id) return ks_flask.ResourceBase.wrap_collection( refs, collection_name='roles' ) class SystemGroupsRolestResource(flask_restful.Resource): def get(self, group_id, role_id): """Check if a group has a specific role on the system. GET/HEAD /system/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:check_system_grant_for_group', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.check_system_grant_for_group( group_id, role_id ) return None, http.client.NO_CONTENT def put(self, group_id, role_id): """Grant a role to a group on the system. PUT /system/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:create_system_grant_for_group', build_target=_build_enforcement_target, ) PROVIDERS.assignment_api.create_system_grant_for_group( group_id, role_id ) return None, http.client.NO_CONTENT def delete(self, group_id, role_id): """Revoke a role from the group on the system. DELETE /system/groups/{group_id}/roles/{role_id} """ ENFORCER.enforce_call( action='identity:revoke_system_grant_for_group', build_target=functools.partial( _build_enforcement_target, allow_non_existing=True ), ) PROVIDERS.assignment_api.delete_system_grant_for_group( group_id, role_id ) return None, http.client.NO_CONTENT class SystemAPI(ks_flask.APIBase): _name = 'system' _import_name = __name__ resources = [] resource_mapping = [ ks_flask.construct_resource_map( resource=SystemUsersListResource, url='/system/users//roles', resource_kwargs={}, rel='system_user_roles', path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=SystemUsersResource, url='/system/users//roles/', resource_kwargs={}, rel='system_user_role', path_vars={ 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=SystemGroupsRolesListResource, url='/system/groups//roles', resource_kwargs={}, rel='system_group_roles', path_vars={'group_id': json_home.Parameters.GROUP_ID}, ), ks_flask.construct_resource_map( resource=SystemGroupsRolestResource, url='/system/groups//roles/', resource_kwargs={}, rel='system_group_role', path_vars={ 'role_id': json_home.Parameters.ROLE_ID, 'group_id': json_home.Parameters.GROUP_ID, }, ), ] APIs = (SystemAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/trusts.py0000664000175000017500000005057300000000000020025 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/OS-TRUST # TODO(morgan): Deprecate /v3/OS-TRUST/trusts path in favour of /v3/trusts. # /v3/OS-TRUST should remain indefinitely. import http.client import flask import flask_restful from oslo_log import log from oslo_policy import _checks as op_checks from keystone.api._shared import json_home_relations from keystone.common import context from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common.rbac_enforcer import policy from keystone.common import utils from keystone.common import validation from keystone import exception from keystone.i18n import _ from keystone.server import flask as ks_flask from keystone.trust import schema LOG = log.getLogger(__name__) ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs _build_resource_relation = json_home_relations.os_trust_resource_rel_func _build_parameter_relation = json_home_relations.os_trust_parameter_rel_func TRUST_ID_PARAMETER_RELATION = _build_parameter_relation( parameter_name='trust_id' ) def _build_trust_target_enforcement(): target = {} # NOTE(cmurphy) unlike other APIs, in the event the trust doesn't exist or # has 0 remaining uses, we actually do expect it to return a 404 and not a # 403, so don't catch NotFound here (lp#1840288) target['trust'] = PROVIDERS.trust_api.get_trust( flask.request.view_args.get('trust_id') ) return target def _trustor_trustee_only(trust): user_id = flask.request.environ.get(context.REQUEST_CONTEXT_ENV).user_id if user_id not in [ trust.get('trustee_user_id'), trust.get('trustor_user_id'), ]: raise exception.ForbiddenAction( action=_('Requested user has no relation to this trust') ) def _normalize_trust_expires_at(trust): # correct isotime if trust.get('expires_at') is not None: trust['expires_at'] = utils.isotime( trust['expires_at'], subsecond=True ) def _normalize_trust_roles(trust): # fill in role data trust_full_roles = [] for trust_role in trust.get('roles', []): trust_role = trust_role['id'] try: matching_role = PROVIDERS.role_api.get_role(trust_role) full_role = ks_flask.ResourceBase.wrap_member( matching_role, collection_name='roles', member_name='role' ) trust_full_roles.append(full_role['role']) except exception.RoleNotFound: pass trust['roles'] = trust_full_roles trust['roles_links'] = { 'self': ks_flask.base_url(path='/%s/roles' % trust['id']), 'next': None, 'previous': None, } class TrustResource(ks_flask.ResourceBase): collection_key = 'trusts' member_key = 'trust' api_prefix = '/OS-TRUST' json_home_resource_rel_func = _build_resource_relation json_home_parameter_rel_func = _build_parameter_relation def _check_unrestricted(self): if self.oslo_context.is_admin: return token = self.auth_context['token'] if 'application_credential' in token.methods: if not token.application_credential['unrestricted']: action = _( "Using method 'application_credential' is not " "allowed for managing trusts." ) raise exception.ForbiddenAction(action=action) def _find_redelegated_trust(self): # Check if delegated via trust redelegated_trust = None if self.oslo_context.is_delegated_auth: src_trust_id = self.oslo_context.trust_id if not src_trust_id: action = _('Redelegation allowed for delegated by trust only') raise exception.ForbiddenAction(action=action) redelegated_trust = PROVIDERS.trust_api.get_trust(src_trust_id) return redelegated_trust @staticmethod def _parse_expiration_date(expiration_date): if expiration_date is not None: return utils.parse_expiration_date(expiration_date) return None def _require_trustor_has_role_in_project(self, trust): trustor_roles = self._get_trustor_roles(trust) for trust_role in trust['roles']: matching_roles = [ x for x in trustor_roles if x == trust_role['id'] ] if not matching_roles: raise exception.RoleNotFound(role_id=trust_role['id']) def _get_trustor_roles(self, trust): original_trust = trust.copy() while original_trust.get('redelegated_trust_id'): original_trust = PROVIDERS.trust_api.get_trust( original_trust['redelegated_trust_id'] ) if (trust.get('project_id')) not in [None, '']: # Check project exists. PROVIDERS.resource_api.get_project(trust['project_id']) # Get a list of roles including any domain specific roles assignment_list = PROVIDERS.assignment_api.list_role_assignments( user_id=original_trust['trustor_user_id'], project_id=original_trust['project_id'], effective=True, strip_domain_roles=False, ) return list({x['role_id'] for x in assignment_list}) else: return [] def _normalize_role_list(self, trust_roles): roles = [] for role in trust_roles: if role.get('id'): roles.append({'id': role['id']}) else: roles.append( PROVIDERS.role_api.get_unique_role_by_name(role['name']) ) return roles def _get_trust(self, trust_id): ENFORCER.enforce_call( action='identity:get_trust', build_target=_build_trust_target_enforcement, ) # NOTE(cmurphy) look up trust before doing is_admin authorization - to # maintain the API contract, we expect a missing trust to raise a 404 # before we get to enforcement (lp#1840288) trust = PROVIDERS.trust_api.get_trust(trust_id) if self.oslo_context.is_admin: # policies are not loaded for the is_admin context, so need to # block access here raise exception.ForbiddenAction( action=_('Requested user has no relation to this trust') ) # NOTE(cmurphy) As of Train, the default policies enforce the # identity:get_trust rule. However, in case the # identity:get_trust rule has been locally overridden by the # default that would have been produced by the sample config, we need # to enforce it again and warn that the behavior is changing. rules = policy._ENFORCER._enforcer.rules.get('identity:get_trust') # rule check_str is "" if isinstance(rules, op_checks.TrueCheck): LOG.warning( "The policy check string for rule \"identity:get_trust\" " "has been overridden to \"always true\". In the next release, " "this will cause the" "\"identity:get_trust\" action to " "be fully permissive as hardcoded enforcement will be " "removed. To correct this issue, either stop overriding the " "\"identity:get_trust\" rule in config to accept the " "defaults, or explicitly set a rule that is not empty." ) _trustor_trustee_only(trust) _normalize_trust_expires_at(trust) _normalize_trust_roles(trust) return self.wrap_member(trust) def _list_trusts(self): trustor_user_id = flask.request.args.get('trustor_user_id') trustee_user_id = flask.request.args.get('trustee_user_id') if trustor_user_id: target = {'trust': {'trustor_user_id': trustor_user_id}} ENFORCER.enforce_call( action='identity:list_trusts_for_trustor', target_attr=target ) elif trustee_user_id: target = {'trust': {'trustee_user_id': trustee_user_id}} ENFORCER.enforce_call( action='identity:list_trusts_for_trustee', target_attr=target ) else: ENFORCER.enforce_call(action='identity:list_trusts') trusts = [] # NOTE(cmurphy) As of Train, the default policies enforce the # identity:list_trusts rule and there are new policies in-code to # enforce identity:list_trusts_for_trustor and # identity:list_trusts_for_trustee. However, in case the # identity:list_trusts rule has been locally overridden by the default # that would have been produced by the sample config, we need to # enforce it again and warn that the behavior is changing. rules = policy._ENFORCER._enforcer.rules.get('identity:list_trusts') # rule check_str is "" if isinstance(rules, op_checks.TrueCheck): LOG.warning( "The policy check string for rule \"identity:list_trusts\" " "has been overridden to \"always true\". In the next release, " "this will cause the \"identity:list_trusts\" action to be " "fully permissive as hardcoded enforcement will be removed. " "To correct this issue, either stop overriding the " "\"identity:list_trusts\" rule in config to accept the " "defaults, or explicitly set a rule that is not empty." ) if not flask.request.args: # NOTE(morgan): Admin can list all trusts. ENFORCER.enforce_call(action='admin_required') if not flask.request.args: trusts += PROVIDERS.trust_api.list_trusts() elif trustor_user_id: trusts += PROVIDERS.trust_api.list_trusts_for_trustor( trustor_user_id ) elif trustee_user_id: trusts += PROVIDERS.trust_api.list_trusts_for_trustee( trustee_user_id ) for trust in trusts: # get_trust returns roles, list_trusts does not # It seems in some circumstances, roles does not # exist in the query response, so check first if 'roles' in trust: del trust['roles'] if trust.get('expires_at') is not None: trust['expires_at'] = utils.isotime( trust['expires_at'], subsecond=True ) return self.wrap_collection(trusts) def get(self, trust_id=None): """Dispatch for GET/HEAD or LIST trusts.""" if trust_id is not None: return self._get_trust(trust_id=trust_id) else: return self._list_trusts() def post(self): """Create a new trust. The User creating the trust must be the trustor. """ ENFORCER.enforce_call(action='identity:create_trust') trust = self.request_body_json.get('trust', {}) validation.lazy_validate(schema.trust_create, trust) self._check_unrestricted() if trust.get('project_id') and not trust.get('roles'): action = _('At least one role should be specified') raise exception.ForbiddenAction(action=action) if self.oslo_context.user_id != trust.get('trustor_user_id'): action = _("The authenticated user should match the trustor") raise exception.ForbiddenAction(action=action) # Ensure the trustee exists PROVIDERS.identity_api.get_user(trust['trustee_user_id']) # Normalize roles trust['roles'] = self._normalize_role_list(trust.get('roles', [])) self._require_trustor_has_role_in_project(trust) trust['expires_at'] = self._parse_expiration_date( trust.get('expires_at') ) trust = self._assign_unique_id(trust) redelegated_trust = self._find_redelegated_trust() return_trust = PROVIDERS.trust_api.create_trust( trust_id=trust['id'], trust=trust, roles=trust['roles'], redelegated_trust=redelegated_trust, initiator=self.audit_initiator, ) _normalize_trust_expires_at(return_trust) _normalize_trust_roles(return_trust) return self.wrap_member(return_trust), http.client.CREATED def delete(self, trust_id): ENFORCER.enforce_call( action='identity:delete_trust', build_target=_build_trust_target_enforcement, ) self._check_unrestricted() # NOTE(cmurphy) As of Train, the default policies enforce the # identity:delete_trust rule. However, in case the # identity:delete_trust rule has been locally overridden by the # default that would have been produced by the sample config, we need # to enforce it again and warn that the behavior is changing. rules = policy._ENFORCER._enforcer.rules.get('identity:delete_trust') # rule check_str is "" if isinstance(rules, op_checks.TrueCheck): LOG.warning( "The policy check string for rule \"identity:delete_trust\" " "has been overridden to \"always true\". In the next release, " "this will cause the" "\"identity:delete_trust\" action to " "be fully permissive as hardcoded enforcement will be " "removed. To correct this issue, either stop overriding the " "\"identity:delete_trust\" rule in config to accept the " "defaults, or explicitly set a rule that is not empty." ) trust = PROVIDERS.trust_api.get_trust(trust_id) if ( self.oslo_context.user_id != trust.get('trustor_user_id') and not self.oslo_context.is_admin ): action = _('Only admin or trustor can delete a trust') raise exception.ForbiddenAction(action=action) PROVIDERS.trust_api.delete_trust( trust_id, initiator=self.audit_initiator ) return '', http.client.NO_CONTENT # NOTE(morgan): Since this Resource is not being used with the automatic # URL additions and does not have a collection key/member_key, we use # the flask-restful Resource, not the keystone ResourceBase class RolesForTrustListResource(flask_restful.Resource): @property def oslo_context(self): return flask.request.environ.get(context.REQUEST_CONTEXT_ENV, None) def get(self, trust_id): ENFORCER.enforce_call( action='identity:list_roles_for_trust', build_target=_build_trust_target_enforcement, ) # NOTE(morgan): This duplicates a little of the .get_trust from the # main resource, as it needs some of the same logic. However, due to # how flask-restful works, this should be fully encapsulated if self.oslo_context.is_admin: # policies are not loaded for the is_admin context, so need to # block access here raise exception.ForbiddenAction( action=_('Requested user has no relation to this trust') ) trust = PROVIDERS.trust_api.get_trust(trust_id) # NOTE(cmurphy) As of Train, the default policies enforce the # identity:list_roles_for_trust rule. However, in case the # identity:list_roles_for_trust rule has been locally overridden by the # default that would have been produced by the sample config, we need # to enforce it again and warn that the behavior is changing. rules = policy._ENFORCER._enforcer.rules.get( 'identity:list_roles_for_trust' ) # rule check_str is "" if isinstance(rules, op_checks.TrueCheck): LOG.warning( "The policy check string for rule " "\"identity:list_roles_for_trust\" has been overridden to " "\"always true\". In the next release, this will cause the " "\"identity:list_roles_for_trust\" action to be fully " "permissive as hardcoded enforcement will be removed. To " "correct this issue, either stop overriding the " "\"identity:get_trust\" rule in config to accept the " "defaults, or explicitly set a rule that is not empty." ) _trustor_trustee_only(trust) _normalize_trust_expires_at(trust) _normalize_trust_roles(trust) return {'roles': trust['roles'], 'links': trust['roles_links']} # NOTE(morgan): Since this Resource is not being used with the automatic # URL additions and does not have a collection key/member_key, we use # the flask-restful Resource, not the keystone ResourceBase class RoleForTrustResource(flask_restful.Resource): @property def oslo_context(self): return flask.request.environ.get(context.REQUEST_CONTEXT_ENV, None) def get(self, trust_id, role_id): """Get a role that has been assigned to a trust.""" ENFORCER.enforce_call( action='identity:get_role_for_trust', build_target=_build_trust_target_enforcement, ) if self.oslo_context.is_admin: # policies are not loaded for the is_admin context, so need to # block access here raise exception.ForbiddenAction( action=_('Requested user has no relation to this trust') ) trust = PROVIDERS.trust_api.get_trust(trust_id) # NOTE(cmurphy) As of Train, the default policies enforce the # identity:get_role_for_trust rule. However, in case the # identity:get_role_for_trust rule has been locally overridden by the # default that would have been produced by the sample config, we need # to enforce it again and warn that the behavior is changing. rules = policy._ENFORCER._enforcer.rules.get( 'identity:get_role_for_trust' ) # rule check_str is "" if isinstance(rules, op_checks.TrueCheck): LOG.warning( "The policy check string for rule " "\"identity:get_role_for_trust\" has been overridden to " "\"always true\". In the next release, this will cause the " "\"identity:get_role_for_trust\" action to be fully " "permissive as hardcoded enforcement will be removed. To " "correct this issue, either stop overriding the " "\"identity:get_role_for_trust\" rule in config to accept the " "defaults, or explicitly set a rule that is not empty." ) _trustor_trustee_only(trust) if not any(role['id'] == role_id for role in trust['roles']): raise exception.RoleNotFound(role_id=role_id) role = PROVIDERS.role_api.get_role(role_id) return ks_flask.ResourceBase.wrap_member( role, collection_name='roles', member_name='role' ) class TrustAPI(ks_flask.APIBase): _name = 'trusts' _import_name = __name__ resources = [TrustResource] resource_mapping = [ ks_flask.construct_resource_map( resource=RolesForTrustListResource, url='/trusts//roles', resource_kwargs={}, rel='trust_roles', path_vars={'trust_id': TRUST_ID_PARAMETER_RELATION}, resource_relation_func=_build_resource_relation, ), ks_flask.construct_resource_map( resource=RoleForTrustResource, url='/trusts//roles/', resource_kwargs={}, rel='trust_role', path_vars={ 'trust_id': TRUST_ID_PARAMETER_RELATION, 'role_id': json_home.Parameters.ROLE_ID, }, resource_relation_func=_build_resource_relation, ), ] _api_url_prefix = '/OS-TRUST' APIs = (TrustAPI,) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/api/users.py0000664000175000017500000011101600000000000017610 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This file handles all flask-restful resources for /v3/users import base64 import http.client import secrets import uuid import flask from oslo_serialization import jsonutils from werkzeug import exceptions from keystone.api._shared import json_home_relations from keystone.application_credential import schema as app_cred_schema from keystone.common import json_home from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone.common import utils from keystone.common import validation import keystone.conf from keystone import exception as ks_exception from keystone.i18n import _ from keystone.identity import schema from keystone import notifications from keystone.server import flask as ks_flask CRED_TYPE_EC2 = 'ec2' CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs ACCESS_TOKEN_ID_PARAMETER_RELATION = ( json_home_relations.os_oauth1_parameter_rel_func( parameter_name='access_token_id' ) ) def _convert_v3_to_ec2_credential(credential): # Prior to bug #1259584 fix, blob was stored unserialized # but it should be stored as a json string for compatibility # with the v3 credentials API. Fall back to the old behavior # for backwards compatibility with existing DB contents try: blob = jsonutils.loads(credential['blob']) except TypeError: blob = credential['blob'] return { 'user_id': credential.get('user_id'), 'tenant_id': credential.get('project_id'), 'access': blob.get('access'), 'secret': blob.get('secret'), 'trust_id': blob.get('trust_id'), } def _format_token_entity(entity): formatted_entity = entity.copy() access_token_id = formatted_entity['id'] user_id = formatted_entity.get('authorizing_user_id', '') if 'role_ids' in entity: formatted_entity.pop('role_ids') if 'access_secret' in entity: formatted_entity.pop('access_secret') url = ( '/users/%(user_id)s/OS-OAUTH1/access_tokens/%(access_token_id)s' '/roles' % {'user_id': user_id, 'access_token_id': access_token_id} ) formatted_entity.setdefault('links', {}) formatted_entity['links']['roles'] = ks_flask.base_url(url) return formatted_entity def _check_unrestricted_application_credential(token): if 'application_credential' in token.methods: if not token.application_credential['unrestricted']: action = _( "Using method 'application_credential' is not " "allowed for managing additional application " "credentials." ) raise ks_exception.ForbiddenAction(action=action) def _build_user_target_enforcement(): target = {} try: target['user'] = PROVIDERS.identity_api.get_user( flask.request.view_args.get('user_id') ) if flask.request.view_args.get('group_id'): target['group'] = PROVIDERS.identity_api.get_group( flask.request.view_args.get('group_id') ) except ks_exception.NotFound: # nosec # Defer existence in the event the user doesn't exist, we'll # check this later anyway. pass return target def _build_enforcer_target_data_owner_and_user_id_match(): ref = {} if flask.request.view_args: credential_id = flask.request.view_args.get('credential_id') if credential_id is not None: hashed_id = utils.hash_access_key(credential_id) ref['credential'] = PROVIDERS.credential_api.get_credential( hashed_id ) return ref def _update_request_user_id_attribute(): # This method handles a special case in policy enforcement. The application # credential API is underneath the user path (e.g., # /v3/users/{user_id}/application_credentials/{application_credential_id}). # The RBAC enforcer thinks the user to evaluate for application credential # ownership comes from the path, but it should come from the actual # application credential reference. By ensuring we pull the user ID from # the application credential, we close a loop hole where users could # effectively bypass authorization to view or delete any application # credential in the system, assuming the attacker knows the application # credential ID of another user. So long as the attacker matches the user # ID in the request path to the user in the token of the request, they can # pass the `rule:owner` policy check. This method protects against that by # ensuring we use the application credential user ID and not something # determined from the client. try: app_cred = ( PROVIDERS.application_credential_api.get_application_credential( flask.request.view_args.get('application_credential_id') ) ) flask.request.view_args['user_id'] = app_cred['user_id'] # This target isn't really used in the default policy for application # credentials, but we return it since we're using this method as a hook # to update the flask request variables, which are used later in the # keystone RBAC enforcer to populate the policy_dict, which ultimately # turns into target attributes. return {'user_id': app_cred['user_id']} except ks_exception.NotFound: # nosec # Defer existance in the event the application credential doesn't # exist, we'll check this later anyway. pass def _format_role_entity(role_id): role = PROVIDERS.role_api.get_role(role_id) formatted_entity = role.copy() if 'description' in role: formatted_entity.pop('description') if 'enabled' in role: formatted_entity.pop('enabled') return formatted_entity class UserResource(ks_flask.ResourceBase): collection_key = 'users' member_key = 'user' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='identity_api', method='get_user' ) def get(self, user_id=None): """Get a user resource or list users. GET/HEAD /v3/users GET/HEAD /v3/users/{user_id} """ if user_id is not None: return self._get_user(user_id) return self._list_users() def _get_user(self, user_id): """Get a user resource. GET/HEAD /v3/users/{user_id} """ ENFORCER.enforce_call( action='identity:get_user', build_target=_build_user_target_enforcement, ) ref = PROVIDERS.identity_api.get_user(user_id) return self.wrap_member(ref) def _list_users(self): """List users. GET/HEAD /v3/users """ filters = ( 'domain_id', 'enabled', 'idp_id', 'name', 'protocol_id', 'unique_id', 'password_expires_at', ) target = None if self.oslo_context.domain_id: target = {'domain_id': self.oslo_context.domain_id} hints = self.build_driver_hints(filters) ENFORCER.enforce_call( action='identity:list_users', filters=filters, target_attr=target ) domain = self._get_domain_id_for_list_request() if domain is None and self.oslo_context.domain_id: domain = self.oslo_context.domain_id refs = PROVIDERS.identity_api.list_users( domain_scope=domain, hints=hints ) # If the user making the request used a domain-scoped token, let's make # sure we filter out users that are not in that domain. Otherwise, we'd # be exposing users in other domains. This if statement is needed in # case _get_domain_id_for_list_request() short-circuits due to # configuration and protects against information from other domains # leaking to people who shouldn't see it. if self.oslo_context.domain_id: domain_id = self.oslo_context.domain_id users = [user for user in refs if user['domain_id'] == domain_id] else: users = refs return self.wrap_collection(users, hints=hints) def post(self): """Create a user. POST /v3/users """ user_data = self.request_body_json.get('user', {}) target = {'user': user_data} ENFORCER.enforce_call( action='identity:create_user', target_attr=target ) validation.lazy_validate(schema.user_create, user_data) user_data = self._normalize_dict(user_data) user_data = self._normalize_domain_id(user_data) ref = PROVIDERS.identity_api.create_user( user_data, initiator=self.audit_initiator ) return self.wrap_member(ref), http.client.CREATED def patch(self, user_id): """Update a user. PATCH /v3/users/{user_id} """ ENFORCER.enforce_call( action='identity:update_user', build_target=_build_user_target_enforcement, ) PROVIDERS.identity_api.get_user(user_id) user_data = self.request_body_json.get('user', {}) validation.lazy_validate(schema.user_update, user_data) self._require_matching_id(user_data) ref = PROVIDERS.identity_api.update_user( user_id, user_data, initiator=self.audit_initiator ) return self.wrap_member(ref) def delete(self, user_id): """Delete a user. DELETE /v3/users/{user_id} """ ENFORCER.enforce_call( action='identity:delete_user', build_target=_build_user_target_enforcement, ) PROVIDERS.identity_api.delete_user( user_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class UserChangePasswordResource(ks_flask.ResourceBase): @ks_flask.unenforced_api def get(self, user_id): # Special case, GET is not allowed. raise exceptions.MethodNotAllowed(valid_methods=['POST']) @ks_flask.unenforced_api def post(self, user_id): user_data = self.request_body_json.get('user', {}) validation.lazy_validate(schema.password_change, user_data) try: PROVIDERS.identity_api.change_password( user_id=user_id, original_password=user_data['original_password'], new_password=user_data['password'], initiator=self.audit_initiator, ) except AssertionError as e: raise ks_exception.Unauthorized( _('Error when changing user password: %s') % e ) return None, http.client.NO_CONTENT class UserProjectsResource(ks_flask.ResourceBase): collection_key = 'projects' member_key = 'project' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='resource_api', method='get_project' ) def get(self, user_id): filters = ('domain_id', 'enabled', 'name') ENFORCER.enforce_call( action='identity:list_user_projects', filters=filters, build_target=_build_user_target_enforcement, ) hints = self.build_driver_hints(filters) refs = PROVIDERS.assignment_api.list_projects_for_user(user_id) return self.wrap_collection(refs, hints=hints) class UserGroupsResource(ks_flask.ResourceBase): collection_key = 'groups' member_key = 'group' get_member_from_driver = PROVIDERS.deferred_provider_lookup( api='identity_api', method='get_group' ) def get(self, user_id): """Get groups for a user. GET/HEAD /v3/users/{user_id}/groups """ filters = ('name',) hints = self.build_driver_hints(filters) ENFORCER.enforce_call( action='identity:list_groups_for_user', build_target=_build_user_target_enforcement, filters=filters, ) refs = PROVIDERS.identity_api.list_groups_for_user( user_id=user_id, hints=hints ) if self.oslo_context.domain_id: filtered_refs = [] for ref in refs: if ref['domain_id'] == self.oslo_context.domain_id: filtered_refs.append(ref) refs = filtered_refs return self.wrap_collection(refs, hints=hints) class _UserOSEC2CredBaseResource(ks_flask.ResourceBase): collection_key = 'credentials' member_key = 'credential' @classmethod def _add_self_referential_link(cls, ref, collection_name=None): # NOTE(morgan): This should be refactored to have an EC2 Cred API with # a sane prefix instead of overloading the "_add_self_referential_link" # method. This was chosen as it more closely mirrors the pre-flask # code (for transition). path = '/users/%(user_id)s/credentials/OS-EC2/%(credential_id)s' url = ks_flask.base_url(path) % { 'user_id': ref['user_id'], 'credential_id': ref['access'], } ref.setdefault('links', {}) ref['links']['self'] = url class UserOSEC2CredentialsResourceListCreate(_UserOSEC2CredBaseResource): def get(self, user_id): """List EC2 Credentials for user. GET/HEAD /v3/users/{user_id}/credentials/OS-EC2 """ ENFORCER.enforce_call(action='identity:ec2_list_credentials') PROVIDERS.identity_api.get_user(user_id) credential_refs = PROVIDERS.credential_api.list_credentials_for_user( user_id, type=CRED_TYPE_EC2 ) collection_refs = [ _convert_v3_to_ec2_credential(cred) for cred in credential_refs ] return self.wrap_collection(collection_refs) def post(self, user_id): """Create EC2 Credential for user. POST /v3/users/{user_id}/credentials/OS-EC2 """ target = {} target['credential'] = {'user_id': user_id} ENFORCER.enforce_call( action='identity:ec2_create_credential', target_attr=target ) PROVIDERS.identity_api.get_user(user_id) tenant_id = self.request_body_json.get('tenant_id') PROVIDERS.resource_api.get_project(tenant_id) blob = dict( access=uuid.uuid4().hex, secret=uuid.uuid4().hex, trust_id=self.oslo_context.trust_id, ) credential_id = utils.hash_access_key(blob['access']) cred_data = dict( user_id=user_id, project_id=tenant_id, blob=jsonutils.dumps(blob), id=credential_id, type=CRED_TYPE_EC2, ) PROVIDERS.credential_api.create_credential(credential_id, cred_data) ref = _convert_v3_to_ec2_credential(cred_data) return self.wrap_member(ref), http.client.CREATED class UserOSEC2CredentialsResourceGetDelete(_UserOSEC2CredBaseResource): @staticmethod def _get_cred_data(credential_id): cred = PROVIDERS.credential_api.get_credential(credential_id) if not cred or cred['type'] != CRED_TYPE_EC2: raise ks_exception.Unauthorized( message=_('EC2 access key not found.') ) return _convert_v3_to_ec2_credential(cred) def get(self, user_id, credential_id): """Get a specific EC2 credential. GET/HEAD /users/{user_id}/credentials/OS-EC2/{credential_id} """ func = _build_enforcer_target_data_owner_and_user_id_match ENFORCER.enforce_call( action='identity:ec2_get_credential', build_target=func ) PROVIDERS.identity_api.get_user(user_id) ec2_cred_id = utils.hash_access_key(credential_id) cred_data = self._get_cred_data(ec2_cred_id) return self.wrap_member(cred_data) def delete(self, user_id, credential_id): """Delete a specific EC2 credential. DELETE /users/{user_id}/credentials/OS-EC2/{credential_id} """ func = _build_enforcer_target_data_owner_and_user_id_match ENFORCER.enforce_call( action='identity:ec2_delete_credential', build_target=func ) PROVIDERS.identity_api.get_user(user_id) ec2_cred_id = utils.hash_access_key(credential_id) self._get_cred_data(ec2_cred_id) PROVIDERS.credential_api.delete_credential(ec2_cred_id) return None, http.client.NO_CONTENT class _OAuth1ResourceBase(ks_flask.ResourceBase): collection_key = 'access_tokens' member_key = 'access_token' @classmethod def _add_self_referential_link(cls, ref, collection_name=None): # NOTE(morgan): This should be refactored to have an OAuth1 API with # a sane prefix instead of overloading the "_add_self_referential_link" # method. This was chosen as it more closely mirrors the pre-flask # code (for transition). ref.setdefault('links', {}) path = '/users/{user_id}/OS-OAUTH1/access_tokens'.format( user_id=ref.get('authorizing_user_id', '') ) ref['links']['self'] = ks_flask.base_url(path) + '/' + ref['id'] class OAuth1ListAccessTokensResource(_OAuth1ResourceBase): def get(self, user_id): """List OAuth1 Access Tokens for user. GET /v3/users/{user_id}/OS-OAUTH1/access_tokens """ ENFORCER.enforce_call(action='identity:list_access_tokens') if self.oslo_context.is_delegated_auth: raise ks_exception.Forbidden( _( 'Cannot list request tokens with a token ' 'issued via delegation.' ) ) refs = PROVIDERS.oauth_api.list_access_tokens(user_id) formatted_refs = [_format_token_entity(x) for x in refs] return self.wrap_collection(formatted_refs) class OAuth1AccessTokenCRUDResource(_OAuth1ResourceBase): def get(self, user_id, access_token_id): """Get specific access token. GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} """ ENFORCER.enforce_call(action='identity:get_access_token') access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise ks_exception.NotFound() access_token = _format_token_entity(access_token) return self.wrap_member(access_token) def delete(self, user_id, access_token_id): """Delete specific access token. DELETE /v3/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id} """ ENFORCER.enforce_call( action='identity:ec2_delete_credential', build_target=_build_enforcer_target_data_owner_and_user_id_match, ) access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) reason = ( 'Invalidating the token cache because an access token for ' 'consumer %(consumer_id)s has been deleted. Authorization for ' 'users with OAuth tokens will be recalculated and enforced ' 'accordingly the next time they authenticate or validate a ' 'token.' % {'consumer_id': access_token['consumer_id']} ) notifications.invalidate_token_cache_notification(reason) PROVIDERS.oauth_api.delete_access_token( user_id, access_token_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class OAuth1AccessTokenRoleListResource(ks_flask.ResourceBase): collection_key = 'roles' member_key = 'role' def get(self, user_id, access_token_id): """List roles for a user access token. GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/ {access_token_id}/roles """ ENFORCER.enforce_call(action='identity:list_access_token_roles') access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise ks_exception.NotFound() authed_role_ids = access_token['role_ids'] authed_role_ids = jsonutils.loads(authed_role_ids) refs = [_format_role_entity(x) for x in authed_role_ids] return self.wrap_collection(refs) class OAuth1AccessTokenRoleResource(ks_flask.ResourceBase): collection_key = 'roles' member_key = 'role' def get(self, user_id, access_token_id, role_id): """Get role for access token. GET/HEAD /v3/users/{user_id}/OS-OAUTH1/access_tokens/ {access_token_id}/roles/{role_id} """ ENFORCER.enforce_call(action='identity:get_access_token_role') access_token = PROVIDERS.oauth_api.get_access_token(access_token_id) if access_token['authorizing_user_id'] != user_id: raise ks_exception.Unauthorized(_('User IDs do not match')) authed_role_ids = access_token['role_ids'] authed_role_ids = jsonutils.loads(authed_role_ids) for authed_role_id in authed_role_ids: if authed_role_id == role_id: role = _format_role_entity(role_id) return self.wrap_member(role) raise ks_exception.RoleNotFound(role_id=role_id) class UserAppCredListCreateResource(ks_flask.ResourceBase): collection_key = 'application_credentials' member_key = 'application_credential' _public_parameters = frozenset( [ 'id', 'name', 'description', 'expires_at', 'project_id', 'roles', # secret is only exposed after create, it is not stored 'secret', 'links', 'unrestricted', 'access_rules', ] ) @staticmethod def _generate_secret(): length = 64 secret = secrets.token_bytes(length) secret = base64.urlsafe_b64encode(secret) secret = secret.rstrip(b'=') secret = secret.decode('utf-8') return secret @staticmethod def _normalize_role_list(app_cred_roles): roles = [] for role in app_cred_roles: if role.get('id'): roles.append(role) else: roles.append( PROVIDERS.role_api.get_unique_role_by_name(role['name']) ) return roles def _get_roles(self, app_cred_data, token): if app_cred_data.get('roles'): roles = self._normalize_role_list(app_cred_data['roles']) # When "roles" passed into the application credentials creation # we need to ensure also all implied roles are included similarly # to how it behaves when no roles are passed and current user roles # are being used. # So loop over all roles implied by the current role and add it # explicitly if not already there for role in roles: for implied_role in PROVIDERS.role_api.list_implied_roles( role['id'] ): imp_role_obj = PROVIDERS.role_api.get_role( implied_role['implied_role_id'] ) if imp_role_obj['id'] not in [x['id'] for x in roles]: roles.append(imp_role_obj) # NOTE(cmurphy): The user is not allowed to add a role that is not # in their token. This is to prevent trustees or application # credential users from escallating their privileges to include # additional roles that the trustor or application credential # creator has assigned on the project. token_roles = [r['id'] for r in token.roles] for role in roles: if role['id'] not in token_roles: detail = _( 'Cannot create an application credential with ' 'unassigned role' ) raise ks_exception.ApplicationCredentialValidationError( detail=detail ) else: roles = token.roles return roles def get(self, user_id): """List application credentials for user. GET/HEAD /v3/users/{user_id}/application_credentials """ filters = ('name',) ENFORCER.enforce_call( action='identity:list_application_credentials', filters=filters ) app_cred_api = PROVIDERS.application_credential_api hints = self.build_driver_hints(filters) refs = app_cred_api.list_application_credentials(user_id, hints=hints) return self.wrap_collection(refs, hints=hints) def post(self, user_id): """Create application credential. POST /v3/users/{user_id}/application_credentials """ ENFORCER.enforce_call(action='identity:create_application_credential') app_cred_data = self.request_body_json.get( 'application_credential', {} ) validation.lazy_validate( app_cred_schema.application_credential_create, app_cred_data ) token = self.auth_context['token'] _check_unrestricted_application_credential(token) if self.oslo_context.user_id != user_id: action = _( 'Cannot create an application credential for another user.' ) raise ks_exception.ForbiddenAction(action=action) project_id = self.oslo_context.project_id app_cred_data = self._assign_unique_id(app_cred_data) if not app_cred_data.get('secret'): app_cred_data['secret'] = self._generate_secret() app_cred_data['user_id'] = user_id app_cred_data['project_id'] = project_id app_cred_data['roles'] = self._get_roles(app_cred_data, token) if app_cred_data.get('expires_at'): app_cred_data['expires_at'] = utils.parse_expiration_date( app_cred_data['expires_at'] ) if app_cred_data.get('access_rules'): for access_rule in app_cred_data['access_rules']: # If user provides an access rule by ID, it will be looked up # by ID. If user provides an access rule that is identical to # an existing one, the ID generated here will be ignored and # the pre-existing access rule will be used. if 'id' not in access_rule: # Generate directly, rather than using _assign_unique_id, # so that there is no deep copy made access_rule['id'] = uuid.uuid4().hex app_cred_data = self._normalize_dict(app_cred_data) app_cred_api = PROVIDERS.application_credential_api try: ref = app_cred_api.create_application_credential( app_cred_data, initiator=self.audit_initiator ) except ks_exception.RoleAssignmentNotFound as e: # Raise a Bad Request, not a Not Found, in accordance with the # API-SIG recommendations: # https://specs.openstack.org/openstack/api-wg/guidelines/http.html#failure-code-clarifications raise ks_exception.ApplicationCredentialValidationError( detail=str(e) ) return self.wrap_member(ref), http.client.CREATED class UserAppCredGetDeleteResource(ks_flask.ResourceBase): collection_key = 'application_credentials' member_key = 'application_credential' def get(self, user_id, application_credential_id): """Get application credential resource. GET/HEAD /v3/users/{user_id}/application_credentials/ {application_credential_id} """ target = _update_request_user_id_attribute() ENFORCER.enforce_call( action='identity:get_application_credential', target_attr=target, ) ref = PROVIDERS.application_credential_api.get_application_credential( application_credential_id ) return self.wrap_member(ref) def delete(self, user_id, application_credential_id): """Delete application credential resource. DELETE /v3/users/{user_id}/application_credentials/ {application_credential_id} """ target = _update_request_user_id_attribute() ENFORCER.enforce_call( action='identity:delete_application_credential', target_attr=target ) token = self.auth_context['token'] _check_unrestricted_application_credential(token) PROVIDERS.application_credential_api.delete_application_credential( application_credential_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class UserAccessRuleListResource(ks_flask.ResourceBase): collection_key = 'access_rules' member_key = 'access_rule' def get(self, user_id): """List access rules for user. GET/HEAD /v3/users/{user_id}/access_rules """ filters = ( 'service', 'path', 'method', ) ENFORCER.enforce_call( action='identity:list_access_rules', filters=filters, build_target=_build_user_target_enforcement, ) app_cred_api = PROVIDERS.application_credential_api hints = self.build_driver_hints(filters) refs = app_cred_api.list_access_rules_for_user(user_id, hints=hints) hints = self.build_driver_hints(filters) return self.wrap_collection(refs, hints=hints) class UserAccessRuleGetDeleteResource(ks_flask.ResourceBase): collection_key = 'access_rules' member_key = 'access_rule' def get(self, user_id, access_rule_id): """Get access rule resource. GET/HEAD /v3/users/{user_id}/access_rules/{access_rule_id} """ ENFORCER.enforce_call( action='identity:get_access_rule', build_target=_build_user_target_enforcement, ) ref = PROVIDERS.application_credential_api.get_access_rule( access_rule_id ) return self.wrap_member(ref) def delete(self, user_id, access_rule_id): """Delete access rule resource. DELETE /v3/users/{user_id}/access_rules/{access_rule_id} """ ENFORCER.enforce_call( action='identity:delete_access_rule', build_target=_build_user_target_enforcement, ) PROVIDERS.application_credential_api.delete_access_rule( access_rule_id, initiator=self.audit_initiator ) return None, http.client.NO_CONTENT class UserAPI(ks_flask.APIBase): _name = 'users' _import_name = __name__ resources = [UserResource] resource_mapping = [ ks_flask.construct_resource_map( resource=UserChangePasswordResource, url='/users//password', resource_kwargs={}, rel='user_change_password', path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=UserGroupsResource, url='/users//groups', resource_kwargs={}, rel='user_groups', path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=UserProjectsResource, url='/users//projects', resource_kwargs={}, rel='user_projects', path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=UserOSEC2CredentialsResourceListCreate, url='/users//credentials/OS-EC2', resource_kwargs={}, rel='user_credentials', resource_relation_func=( json_home_relations.os_ec2_resource_rel_func ), path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=UserOSEC2CredentialsResourceGetDelete, url=( '/users//credentials/OS-EC2/' '' ), resource_kwargs={}, rel='user_credential', resource_relation_func=( json_home_relations.os_ec2_resource_rel_func ), path_vars={ 'credential_id': json_home.build_v3_parameter_relation( 'credential_id' ), 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=OAuth1ListAccessTokensResource, url='/users//OS-OAUTH1/access_tokens', resource_kwargs={}, rel='user_access_tokens', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func ), path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=OAuth1AccessTokenCRUDResource, url=( '/users//OS-OAUTH1/' 'access_tokens/' ), resource_kwargs={}, rel='user_access_token', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func ), path_vars={ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=OAuth1AccessTokenRoleListResource, url=( '/users//OS-OAUTH1/access_tokens/' '/roles' ), resource_kwargs={}, rel='user_access_token_roles', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func ), path_vars={ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=OAuth1AccessTokenRoleResource, url=( '/users//OS-OAUTH1/access_tokens/' '/roles/' ), resource_kwargs={}, rel='user_access_token_role', resource_relation_func=( json_home_relations.os_oauth1_resource_rel_func ), path_vars={ 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }, ), ks_flask.construct_resource_map( resource=UserAppCredListCreateResource, url='/users//application_credentials', resource_kwargs={}, rel='application_credentials', path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=UserAppCredGetDeleteResource, url=( '/users//application_credentials/' '' ), resource_kwargs={}, rel='application_credential', path_vars={ 'user_id': json_home.Parameters.USER_ID, 'application_credential_id': json_home.Parameters.APPLICATION_CRED_ID, }, ), ks_flask.construct_resource_map( resource=UserAccessRuleListResource, url='/users//access_rules', resource_kwargs={}, rel='access_rules', path_vars={'user_id': json_home.Parameters.USER_ID}, ), ks_flask.construct_resource_map( resource=UserAccessRuleGetDeleteResource, url=( '/users//access_rules/' '' ), resource_kwargs={}, rel='access_rule', path_vars={ 'user_id': json_home.Parameters.USER_ID, 'access_rule_id': json_home.Parameters.ACCESS_RULE_ID, }, ), ] APIs = (UserAPI,) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/application_credential/0000775000175000017500000000000000000000000022021 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/application_credential/__init__.py0000664000175000017500000000113500000000000024132 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.application_credential.core import * # noqa ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/application_credential/backends/0000775000175000017500000000000000000000000023573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/application_credential/backends/__init__.py0000664000175000017500000000000000000000000025672 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/application_credential/backends/base.py0000664000175000017500000001071200000000000025060 0ustar00zuulzuul00000000000000# Copyright 2018 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class ApplicationCredentialDriverBase(metaclass=abc.ABCMeta): @abc.abstractmethod def authenticate(self, application_credential_id, secret): """Validate an application credential. :param str application_credential_id: Application Credential ID :param str secret: Secret :raises AssertionError: If id or secret is invalid. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_application_credential(self, application_credential, roles): """Create a new application credential. :param dict application_credential: Application Credential data :param list roles: A list of roles that apply to the application_credential. :returns: a new application credential """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_application_credential(self, application_credential_id): """Get an application credential by the credential id. :param str application_credential_id: Application Credential ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_application_credentials_for_user(self, user_id, hints): """List application credentials for a user. :param str user_id: User ID :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_application_credential(self, application_credential_id): """Delete a single application credential. :param str application_credential_id: ID of the application credential to delete. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_application_credentials_for_user(self, user_id): """Delete all application credentials for a user. :param user_id: ID of a user to whose application credentials should be deleted. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_application_credentials_for_user_on_project( self, user_id, project_id ): """Delete all application credentials for a user on a given project. :param str user_id: ID of a user to whose application credentials should be deleted. :param str project_id: ID of a project on which to filter application credentials. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_access_rule(self, access_rule_id): """Get an access rule by its ID. :param str access_rule_id: Access Rule ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_access_rules_for_user(self, user_id): """List the access rules that a user has created. Access rules are only created as attributes of application credentials, they cannot be created independently. :param str user_id: User ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_access_rule(self, access_rule_id): """Delete one access rule. :param str access_rule_id: Access Rule ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_access_rules_for_user(self, user_id): """Delete all access rules for user. This is called when the user itself is deleted. :param str user_id: User ID """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/application_credential/backends/sql.py0000664000175000017500000002755100000000000024756 0ustar00zuulzuul00000000000000# Copyright 2018 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import sqlalchemy from keystone.application_credential.backends import base from keystone.common import password_hashing from keystone.common import sql from keystone import exception from keystone.i18n import _ class ApplicationCredentialModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'application_credential' attributes = [ 'internal_id', 'id', 'name', 'secret_hash', 'description', 'user_id', 'project_id', 'system', 'expires_at', 'unrestricted', ] internal_id = sql.Column(sql.Integer, primary_key=True, nullable=False) id = sql.Column(sql.String(64), nullable=False) name = sql.Column(sql.String(255), nullable=False) secret_hash = sql.Column(sql.String(255), nullable=False) description = sql.Column(sql.Text()) user_id = sql.Column(sql.String(64), nullable=False) project_id = sql.Column(sql.String(64), nullable=True) system = sql.Column(sql.String(64), nullable=True) expires_at = sql.Column(sql.DateTimeInt()) unrestricted = sql.Column(sql.Boolean) __table_args__ = ( sql.UniqueConstraint( 'name', 'user_id', name='duplicate_app_cred_constraint' ), ) roles = sqlalchemy.orm.relationship( 'ApplicationCredentialRoleModel', backref=sqlalchemy.orm.backref('application_credential'), cascade='all, delete-orphan', cascade_backrefs=False, ) access_rules = sqlalchemy.orm.relationship( 'ApplicationCredentialAccessRuleModel', backref=sqlalchemy.orm.backref('application_credential'), cascade='all, delete-orphan', cascade_backrefs=False, ) class ApplicationCredentialRoleModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'application_credential_role' attributes = ['application_credential_id', 'role_id'] application_credential_id = sql.Column( sql.Integer, sql.ForeignKey( 'application_credential.internal_id', ondelete='cascade' ), primary_key=True, nullable=False, ) role_id = sql.Column(sql.String(64), primary_key=True, nullable=False) class AccessRuleModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'access_rule' attributes = ['external_id', 'user_id', 'service', 'path', 'method'] id = sql.Column(sql.Integer, primary_key=True, nullable=False) external_id = sql.Column(sql.String(64), index=True) user_id = sql.Column(sql.String(64), index=True) service = sql.Column(sql.String(64)) path = sql.Column(sql.String(128)) method = sql.Column(sql.String(16)) __table_args__ = ( sql.UniqueConstraint( 'user_id', 'service', 'path', 'method', name='duplicate_access_rule_for_user_constraint', ), ) application_credential = sqlalchemy.orm.relationship( 'ApplicationCredentialAccessRuleModel', backref=sqlalchemy.orm.backref('access_rule'), cascade_backrefs=False, ) class ApplicationCredentialAccessRuleModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'application_credential_access_rule' attributes = ['application_credential_id', 'access_rule_id'] application_credential_id = sql.Column( sql.Integer, sql.ForeignKey( 'application_credential.internal_id', ondelete='cascade' ), primary_key=True, nullable=False, ) access_rule_id = sql.Column( sql.Integer, sql.ForeignKey('access_rule.id'), primary_key=True, nullable=False, ) class ApplicationCredential(base.ApplicationCredentialDriverBase): def _check_secret(self, secret, app_cred_ref): secret_hash = app_cred_ref['secret_hash'] return password_hashing.check_password(secret, secret_hash) def _check_expired(self, app_cred_ref): if app_cred_ref.get('expires_at'): return timeutils.utcnow() >= app_cred_ref['expires_at'] return False def authenticate(self, application_credential_id, secret): msg = _('Invalid application credential ID or secret') try: app_cred_ref = self.get_application_credential( application_credential_id ) except exception.ApplicationCredentialNotFound: raise AssertionError(msg) if not self._check_secret(secret, app_cred_ref): raise AssertionError(msg) if self._check_expired(app_cred_ref): raise AssertionError(msg) def _hash_secret(self, app_cred_ref): unhashed_secret = app_cred_ref.pop('secret') hashed_secret = password_hashing.hash_password(unhashed_secret) app_cred_ref['secret_hash'] = hashed_secret @sql.handle_conflicts(conflict_type='application_credential') def create_application_credential( self, application_credential, roles, access_rules=None ): app_cred = application_credential.copy() self._hash_secret(app_cred) with sql.session_for_write() as session: ref = ApplicationCredentialModel.from_dict(app_cred) session.add(ref) for role in roles: app_cred_role = ApplicationCredentialRoleModel() app_cred_role.application_credential = ref app_cred_role.role_id = role['id'] session.add(app_cred_role) if access_rules: for access_rule in access_rules: access_rule_ref = ( session.query(AccessRuleModel) .filter_by(external_id=access_rule['id']) .first() ) if not access_rule_ref: query = session.query(AccessRuleModel) access_rule_ref = query.filter_by( user_id=app_cred['user_id'], service=access_rule['service'], path=access_rule['path'], method=access_rule['method'], ).first() if not access_rule_ref: access_rule_ref = AccessRuleModel.from_dict( { k.replace('id', 'external_id'): v for k, v in access_rule.items() } ) access_rule_ref['user_id'] = app_cred['user_id'] session.add(access_rule_ref) app_cred_access_rule = ( ApplicationCredentialAccessRuleModel() ) app_cred_access_rule.application_credential = ref app_cred_access_rule.access_rule = access_rule_ref session.add(app_cred_access_rule) application_credential_dict = self._to_dict(ref) return application_credential_dict def _to_dict(self, ref): app_cred = ref.to_dict() roles = [{'id': r.to_dict()['role_id']} for r in ref.roles] app_cred['roles'] = roles if ref.access_rules: access_rules = [ self._access_rule_to_dict(c.access_rule) for c in ref.access_rules ] app_cred['access_rules'] = access_rules app_cred.pop('internal_id') return app_cred def _access_rule_to_dict(self, ref): access_rule = ref.to_dict() return { k.replace('external_id', 'id'): v for k, v in access_rule.items() if k != 'user_id' and k != 'id' } def get_application_credential(self, application_credential_id): with sql.session_for_read() as session: query = session.query(ApplicationCredentialModel).filter_by( id=application_credential_id ) ref = query.first() if ref is None: raise exception.ApplicationCredentialNotFound( application_credential_id=application_credential_id ) app_cred_dict = self._to_dict(ref) return app_cred_dict def list_application_credentials_for_user(self, user_id, hints): with sql.session_for_read() as session: query = session.query(ApplicationCredentialModel) query = sql.filter_limit_query( ApplicationCredentialModel, query, hints ) app_creds = query.filter_by(user_id=user_id) return [self._to_dict(ref) for ref in app_creds] @sql.handle_conflicts(conflict_type='application_credential') def delete_application_credential(self, application_credential_id): with sql.session_for_write() as session: query = session.query(ApplicationCredentialModel) app_cred_ref = query.filter_by( id=application_credential_id ).first() if not app_cred_ref: raise exception.ApplicationCredentialNotFound( application_credential_id=application_credential_id ) session.delete(app_cred_ref) def delete_application_credentials_for_user(self, user_id): with sql.session_for_write() as session: query = session.query(ApplicationCredentialModel) query = query.filter_by(user_id=user_id) query.delete() def delete_application_credentials_for_user_on_project( self, user_id, project_id ): with sql.session_for_write() as session: query = session.query(ApplicationCredentialModel) query = query.filter_by(user_id=user_id) query = query.filter_by(project_id=project_id) query.delete() def get_access_rule(self, access_rule_id): with sql.session_for_read() as session: query = session.query(AccessRuleModel).filter_by( external_id=access_rule_id ) ref = query.first() if not ref: raise exception.AccessRuleNotFound( access_rule_id=access_rule_id ) access_rule = self._access_rule_to_dict(ref) return access_rule def list_access_rules_for_user(self, user_id, hints): with sql.session_for_read() as session: query = session.query(AccessRuleModel).filter_by(user_id=user_id) refs = sql.filter_limit_query(AccessRuleModel, query, hints) return [self._access_rule_to_dict(ref) for ref in refs] def delete_access_rule(self, access_rule_id): try: with sql.session_for_write() as session: query = session.query(AccessRuleModel) ref = query.filter_by(external_id=access_rule_id).first() if not ref: raise exception.AccessRuleNotFound( access_rule_id=access_rule_id ) session.delete(ref) except AssertionError: raise exception.ForbiddenNotSecurity( "May not delete access rule in use" ) def delete_access_rules_for_user(self, user_id): with sql.session_for_write() as session: query = session.query(AccessRuleModel).filter_by(user_id=user_id) query.delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/application_credential/core.py0000664000175000017500000002431700000000000023332 0ustar00zuulzuul00000000000000# Copyright 2018 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Application Credential service.""" from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import notifications CONF = keystone.conf.CONF MEMOIZE = cache.get_memoization_decorator(group='application_credential') PROVIDERS = provider_api.ProviderAPIs class Manager(manager.Manager): """Default pivot point for the Application Credential backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.application_credential' _provides_api = 'application_credential_api' _APP_CRED = 'application_credential' _ACCESS_RULE = 'access_rule' def __init__(self): super().__init__(CONF.application_credential.driver) self._register_callback_listeners() def _register_callback_listeners(self): notifications.register_event_callback( notifications.ACTIONS.deleted, 'user', self._delete_app_creds_on_user_delete_callback, ) notifications.register_event_callback( notifications.ACTIONS.disabled, 'user', self._delete_app_creds_on_user_delete_callback, ) notifications.register_event_callback( notifications.ACTIONS.internal, notifications.REMOVE_APP_CREDS_FOR_USER, self._delete_app_creds_on_assignment_removal, ) def _delete_app_creds_on_user_delete_callback( self, service, resource_type, operation, payload ): user_id = payload['resource_info'] self._delete_application_credentials_for_user(user_id) self._delete_access_rules_for_user(user_id) def _delete_app_creds_on_assignment_removal( self, service, resource_type, operation, payload ): user_id = payload['resource_info']['user_id'] project_id = payload['resource_info']['project_id'] self._delete_application_credentials_for_user_on_project( user_id, project_id ) def _get_user_roles(self, user_id, project_id): assignment_list = self.assignment_api.list_role_assignments( user_id=user_id, project_id=project_id, effective=True ) return list({x['role_id'] for x in assignment_list}) def _require_user_has_role_in_project(self, roles, user_id, project_id): user_roles = self._get_user_roles(user_id, project_id) for role in roles: if role['id'] not in user_roles: raise exception.RoleAssignmentNotFound( role_id=role['id'], actor_id=user_id, target_id=project_id ) def _assert_limit_not_exceeded(self, user_id): user_limit = CONF.application_credential.user_limit if user_limit >= 0: app_cred_count = len(self.list_application_credentials(user_id)) if app_cred_count >= user_limit: raise exception.ApplicationCredentialLimitExceeded( limit=user_limit ) def _get_role_list(self, app_cred_roles): roles = [] for role in app_cred_roles: roles.append(PROVIDERS.role_api.get_role(role['id'])) return roles def authenticate(self, application_credential_id, secret): """Authenticate with an application credential. :param str application_credential_id: Application Credential ID :param str secret: Application Credential secret """ self.driver.authenticate(application_credential_id, secret) def _process_app_cred(self, app_cred_ref): app_cred_ref = app_cred_ref.copy() app_cred_ref.pop('secret_hash') app_cred_ref['roles'] = self._get_role_list(app_cred_ref['roles']) return app_cred_ref def create_application_credential( self, application_credential, initiator=None ): """Create a new application credential. :param dict application_credential: Application Credential data :param initiator: CADF initiator :returns: a new application credential """ application_credential = application_credential.copy() user_id = application_credential['user_id'] project_id = application_credential['project_id'] roles = application_credential.pop('roles', []) access_rules = application_credential.pop('access_rules', None) self._assert_limit_not_exceeded(user_id) self._require_user_has_role_in_project(roles, user_id, project_id) unhashed_secret = application_credential['secret'] ref = self.driver.create_application_credential( application_credential, roles, access_rules ) ref['secret'] = unhashed_secret ref = self._process_app_cred(ref) notifications.Audit.created( self._APP_CRED, application_credential['id'], initiator ) return ref @MEMOIZE def get_application_credential(self, application_credential_id): """Get application credential details. :param str application_credential_id: Application Credential ID :returns: an application credential """ app_cred = self.driver.get_application_credential( application_credential_id ) return self._process_app_cred(app_cred) def list_application_credentials(self, user_id, hints=None): """List application credentials for a user. :param str user_id: User ID :param dict hints: Properties to filter on :returns: a list of application credentials """ hints = hints or driver_hints.Hints() app_cred_list = self.driver.list_application_credentials_for_user( user_id, hints ) return [self._process_app_cred(app_cred) for app_cred in app_cred_list] @MEMOIZE def get_access_rule(self, access_rule_id): """Get access rule details. :param str access_rule_id: Access Rule ID :returns: an access rule """ return self.driver.get_access_rule(access_rule_id) def list_access_rules_for_user(self, user_id, hints=None): """List access rules for user. :param str user_id: User ID :returns: a list of access rules """ hints = hints or driver_hints.Hints() return self.driver.list_access_rules_for_user(user_id, hints) def delete_application_credential( self, application_credential_id, initiator=None ): """Delete an application credential. :param str application_credential_id: Application Credential ID :param initiator: CADF initiator :raises keystone.exception.ApplicationCredentialNotFound: If the application credential doesn't exist. """ self.driver.delete_application_credential(application_credential_id) self.get_application_credential.invalidate( self, application_credential_id ) notifications.Audit.deleted( self._APP_CRED, application_credential_id, initiator ) def _delete_application_credentials_for_user( self, user_id, initiator=None ): """Delete all application credentials for a user. :param str user_id: User ID This is triggered when a user is deleted. """ app_creds = self.driver.list_application_credentials_for_user( user_id, driver_hints.Hints() ) self.driver.delete_application_credentials_for_user(user_id) for app_cred in app_creds: self.get_application_credential.invalidate(self, app_cred['id']) notifications.Audit.deleted( self._APP_CRED, app_cred['id'], initiator ) def _delete_application_credentials_for_user_on_project( self, user_id, project_id ): """Delete all application credentials for a user on a given project. :param str user_id: User ID :param str project_id: Project ID This is triggered when a user loses a role assignment on a project. """ hints = driver_hints.Hints() hints.add_filter('project_id', project_id) app_creds = self.driver.list_application_credentials_for_user( user_id, hints ) self.driver.delete_application_credentials_for_user_on_project( user_id, project_id ) for app_cred in app_creds: self.get_application_credential.invalidate(self, app_cred['id']) def delete_access_rule(self, access_rule_id, initiator=None): """Delete an access rule. :param str: access_rule_id: Access Rule ID :param initiator: CADF initiator :raises keystone.exception.AccessRuleNotFound: If the access rule doesn't exist. """ self.driver.delete_access_rule(access_rule_id) self.get_access_rule.invalidate(self, access_rule_id) notifications.Audit.deleted( self._ACCESS_RULE, access_rule_id, initiator ) def _delete_access_rules_for_user(self, user_id, initiator=None): """Delete all access rules for a user. :param str user_id: User ID This is triggered when a user is deleted. """ access_rules = self.driver.list_access_rules_for_user( user_id, driver_hints.Hints() ) self.driver.delete_access_rules_for_user(user_id) for rule in access_rules: self.get_access_rule.invalidate(self, rule['id']) notifications.Audit.deleted( self._ACCESS_RULE, rule['id'], initiator ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/application_credential/schema.py0000664000175000017500000000405700000000000023641 0ustar00zuulzuul00000000000000# Copyright 2018 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _role_properties = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': parameter_types.id_string, 'name': parameter_types.name, }, 'minProperties': 1, 'maxProperties': 1, 'additionalProperties': False, }, } _access_rules_properties = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'path': { 'type': 'string', 'minLength': 0, 'maxLength': 225, 'pattern': r'^\/.*', }, 'method': { 'type': 'string', 'pattern': r'^(POST|GET|HEAD|PATCH|PUT|DELETE)$', }, 'service': parameter_types.id_string, 'id': parameter_types.id_string, }, 'additionalProperties': False, }, } _application_credential_properties = { 'name': parameter_types.name, 'description': validation.nullable(parameter_types.description), 'secret': {'type': ['null', 'string']}, 'expires_at': {'type': ['null', 'string']}, 'roles': _role_properties, 'unrestricted': parameter_types.boolean, 'access_rules': _access_rules_properties, } application_credential_create = { 'type': 'object', 'properties': _application_credential_properties, 'required': ['name'], 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/assignment/0000775000175000017500000000000000000000000017474 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/__init__.py0000664000175000017500000000117100000000000021605 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.assignment.core import * # noqa ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/assignment/backends/0000775000175000017500000000000000000000000021246 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/backends/__init__.py0000664000175000017500000000000000000000000023345 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/backends/base.py0000664000175000017500000001647700000000000022551 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import keystone.conf from keystone import exception CONF = keystone.conf.CONF class AssignmentDriverBase(metaclass=abc.ABCMeta): def _get_list_limit(self): return CONF.assignment.list_limit or CONF.list_limit @abc.abstractmethod def add_role_to_user_and_project(self, user_id, project_id, role_id): """Add a role to a user within given project. :raises keystone.exception.Conflict: If a duplicate role assignment exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_role_from_user_and_project(self, user_id, project_id, role_id): """Remove a role from a user within given project. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover # assignment/grant crud @abc.abstractmethod def create_grant( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): """Create a new assignment/grant. If the assignment is to a domain, then optionally it may be specified as inherited to owned projects (this requires the OS-INHERIT extension to be enabled). """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_grant_role_ids( self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): """List role ids for assignments/grants.""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_grant_role_id( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): """Check an assignment/grant role id. :raises keystone.exception.RoleAssignmentNotFound: If the role assignment doesn't exist. :returns: None or raises an exception if grant not found """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_grant( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): """Delete assignments/grants. :raises keystone.exception.RoleAssignmentNotFound: If the role assignment doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_role_assignments( self, role_id=None, user_id=None, group_ids=None, domain_id=None, project_ids=None, inherited_to_projects=None, ): """Return a list of role assignments for actors on targets. Available parameters represent values in which the returned role assignments attributes need to be filtered on. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_project_assignments(self, project_id): """Delete all assignments for a project. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_role_assignments(self, role_id): """Delete all assignments for a role.""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_user_assignments(self, user_id): """Delete all assignments for a user. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_group_assignments(self, group_id): """Delete all assignments for a group. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_domain_assignments(self, domain_id): """Delete all assignments for a domain.""" raise exception.NotImplemented() @abc.abstractmethod def create_system_grant( self, role_id, actor_id, target_id, assignment_type, inherited ): """Grant a user or group a role on the system. :param role_id: the unique ID of the role to grant to the user :param actor_id: the unique ID of the user or group :param target_id: the unique ID or string representing the target :param assignment_type: a string describing the relationship of the assignment :param inherited: a boolean denoting if the assignment is inherited or not """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_system_grants(self, actor_id, target_id, assignment_type): """Return a list of all system assignments for a specific entity. :param actor_id: the unique ID of the actor :param target_id: the unique ID of the target :param assignment_type: the type of assignment to return """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_system_grants_by_role(self, role_id): """Return a list of system assignments associated to a role. :param role_id: the unique ID of the role to grant to the user """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_system_grant(self, role_id, actor_id, target_id, inherited): """Check if a user or group has a specific role on the system. :param role_id: the unique ID of the role to grant to the user :param actor_id: the unique ID of the user or group :param target_id: the unique ID or string representing the target :param inherited: a boolean denoting if the assignment is inherited or not """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_system_grant(self, role_id, actor_id, target_id, inherited): """Remove a system assignment from a user or group. :param role_id: the unique ID of the role to grant to the user :param actor_id: the unique ID of the user or group :param target_id: the unique ID or string representing the target :param inherited: a boolean denoting if the assignment is inherited or not """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/backends/sql.py0000664000175000017500000004264100000000000022426 0ustar00zuulzuul00000000000000# Copyright 2012-13 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.assignment.backends import base from keystone.common import sql from keystone import exception from keystone.i18n import _ class AssignmentType: USER_PROJECT = 'UserProject' GROUP_PROJECT = 'GroupProject' USER_DOMAIN = 'UserDomain' GROUP_DOMAIN = 'GroupDomain' @classmethod def calculate_type(cls, user_id, group_id, project_id, domain_id): if user_id: if project_id: return cls.USER_PROJECT if domain_id: return cls.USER_DOMAIN if group_id: if project_id: return cls.GROUP_PROJECT if domain_id: return cls.GROUP_DOMAIN # Invalid parameters combination raise exception.AssignmentTypeCalculationError(**locals()) class Assignment(base.AssignmentDriverBase): @classmethod def default_role_driver(cls): return 'sql' def create_grant( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): assignment_type = AssignmentType.calculate_type( user_id, group_id, project_id, domain_id ) try: with sql.session_for_write() as session: session.add( RoleAssignment( type=assignment_type, actor_id=user_id or group_id, target_id=project_id or domain_id, role_id=role_id, inherited=inherited_to_projects, ) ) except sql.DBDuplicateEntry: # nosec : The v3 grant APIs are silent if # the assignment already exists pass def list_grant_role_ids( self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): with sql.session_for_read() as session: q = session.query(RoleAssignment.role_id) q = q.filter(RoleAssignment.actor_id == (user_id or group_id)) q = q.filter(RoleAssignment.target_id == (project_id or domain_id)) q = q.filter(RoleAssignment.inherited == inherited_to_projects) return [x.role_id for x in q.all()] def _build_grant_filter( self, session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects, ): q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id or group_id) if domain_id: q = q.filter_by(target_id=domain_id).filter( (RoleAssignment.type == AssignmentType.USER_DOMAIN) | (RoleAssignment.type == AssignmentType.GROUP_DOMAIN) ) else: q = q.filter_by(target_id=project_id).filter( (RoleAssignment.type == AssignmentType.USER_PROJECT) | (RoleAssignment.type == AssignmentType.GROUP_PROJECT) ) q = q.filter_by(role_id=role_id) q = q.filter_by(inherited=inherited_to_projects) return q def check_grant_role_id( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): with sql.session_for_read() as session: try: q = self._build_grant_filter( session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects, ) q.one() except sql.NotFound: actor_id = user_id or group_id target_id = domain_id or project_id raise exception.RoleAssignmentNotFound( role_id=role_id, actor_id=actor_id, target_id=target_id ) def delete_grant( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): with sql.session_for_write() as session: q = self._build_grant_filter( session, role_id, user_id, group_id, domain_id, project_id, inherited_to_projects, ) if not q.delete(False): actor_id = user_id or group_id target_id = domain_id or project_id raise exception.RoleAssignmentNotFound( role_id=role_id, actor_id=actor_id, target_id=target_id ) def add_role_to_user_and_project(self, user_id, project_id, role_id): try: with sql.session_for_write() as session: session.add( RoleAssignment( type=AssignmentType.USER_PROJECT, actor_id=user_id, target_id=project_id, role_id=role_id, inherited=False, ) ) except sql.DBDuplicateEntry: msg = 'User {} already has role {} in tenant {}'.format( user_id, role_id, project_id, ) raise exception.Conflict(type='role grant', details=msg) def remove_role_from_user_and_project(self, user_id, project_id, role_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id) q = q.filter_by(target_id=project_id) q = q.filter_by(role_id=role_id) if q.delete() == 0: raise exception.RoleNotFound( message=_( 'Cannot remove role that has not been granted, %s' ) % role_id ) def _get_user_assignment_types(self): return [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN] def _get_group_assignment_types(self): return [AssignmentType.GROUP_PROJECT, AssignmentType.GROUP_DOMAIN] def _get_project_assignment_types(self): return [AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT] def _get_domain_assignment_types(self): return [AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN] def _get_assignment_types(self, user, group, project, domain): """Return a list of role assignment types based on provided entities. If one of user or group (the "actor") as well as one of project or domain (the "target") are provided, the list will contain the role assignment type for that specific pair of actor and target. If only an actor or target is provided, the list will contain the role assignment types that satisfy the specified entity. For example, if user and project are provided, the return will be: [AssignmentType.USER_PROJECT] However, if only user was provided, the return would be: [AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN] It is not expected that user and group (or project and domain) are specified - but if they are, the most fine-grained value will be chosen (i.e. user over group, project over domain). """ actor_types = [] if user: actor_types = self._get_user_assignment_types() elif group: actor_types = self._get_group_assignment_types() target_types = [] if project: target_types = self._get_project_assignment_types() elif domain: target_types = self._get_domain_assignment_types() if actor_types and target_types: return list(set(actor_types).intersection(target_types)) return actor_types or target_types def list_role_assignments( self, role_id=None, user_id=None, group_ids=None, domain_id=None, project_ids=None, inherited_to_projects=None, ): def denormalize_role(ref): assignment = {} if ref.type == AssignmentType.USER_PROJECT: assignment['user_id'] = ref.actor_id assignment['project_id'] = ref.target_id elif ref.type == AssignmentType.USER_DOMAIN: assignment['user_id'] = ref.actor_id assignment['domain_id'] = ref.target_id elif ref.type == AssignmentType.GROUP_PROJECT: assignment['group_id'] = ref.actor_id assignment['project_id'] = ref.target_id elif ref.type == AssignmentType.GROUP_DOMAIN: assignment['group_id'] = ref.actor_id assignment['domain_id'] = ref.target_id else: raise exception.Error( message=_('Unexpected assignment type encountered, %s') % ref.type ) assignment['role_id'] = ref.role_id if ref.inherited: assignment['inherited_to_projects'] = 'projects' return assignment with sql.session_for_read() as session: assignment_types = self._get_assignment_types( user_id, group_ids, project_ids, domain_id ) targets = None if project_ids: targets = project_ids elif domain_id: targets = [domain_id] actors = None if group_ids: actors = group_ids elif user_id: actors = [user_id] query = session.query(RoleAssignment) if role_id: query = query.filter_by(role_id=role_id) if actors: query = query.filter(RoleAssignment.actor_id.in_(actors)) if targets: query = query.filter(RoleAssignment.target_id.in_(targets)) if assignment_types: query = query.filter(RoleAssignment.type.in_(assignment_types)) if inherited_to_projects is not None: query = query.filter_by(inherited=inherited_to_projects) return [denormalize_role(ref) for ref in query.all()] def delete_project_assignments(self, project_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(target_id=project_id).filter( RoleAssignment.type.in_( (AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT) ) ) q.delete(False) def delete_role_assignments(self, role_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(role_id=role_id) q.delete(False) with sql.session_for_write() as session: q = session.query(SystemRoleAssignment) q = q.filter_by(role_id=role_id) q.delete(False) def delete_domain_assignments(self, domain_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter(RoleAssignment.target_id == domain_id).filter( (RoleAssignment.type == AssignmentType.USER_DOMAIN) | (RoleAssignment.type == AssignmentType.GROUP_DOMAIN) ) q.delete(False) def delete_user_assignments(self, user_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=user_id).filter( RoleAssignment.type.in_( (AssignmentType.USER_PROJECT, AssignmentType.USER_DOMAIN) ) ) q.delete(False) def delete_group_assignments(self, group_id): with sql.session_for_write() as session: q = session.query(RoleAssignment) q = q.filter_by(actor_id=group_id).filter( RoleAssignment.type.in_( (AssignmentType.GROUP_PROJECT, AssignmentType.GROUP_DOMAIN) ) ) q.delete(False) def create_system_grant( self, role_id, actor_id, target_id, assignment_type, inherited ): try: with sql.session_for_write() as session: session.add( SystemRoleAssignment( type=assignment_type, actor_id=actor_id, target_id=target_id, role_id=role_id, inherited=inherited, ) ) except sql.DBDuplicateEntry: # nosec : The v3 grant APIs are silent if # the assignment already exists pass def list_system_grants(self, actor_id, target_id, assignment_type): with sql.session_for_read() as session: query = session.query(SystemRoleAssignment) if actor_id: query = query.filter_by(actor_id=actor_id) if target_id: query = query.filter_by(target_id=target_id) if assignment_type: query = query.filter_by(type=assignment_type) results = query.all() return [role.to_dict() for role in results] def list_system_grants_by_role(self, role_id): with sql.session_for_read() as session: query = session.query(SystemRoleAssignment) query = query.filter_by(role_id=role_id) return query.all() def check_system_grant(self, role_id, actor_id, target_id, inherited): with sql.session_for_read() as session: try: q = session.query(SystemRoleAssignment) q = q.filter_by(actor_id=actor_id) q = q.filter_by(target_id=target_id) q = q.filter_by(role_id=role_id) q = q.filter_by(inherited=inherited) q.one() except sql.NotFound: raise exception.RoleAssignmentNotFound( role_id=role_id, actor_id=actor_id, target_id=target_id ) def delete_system_grant(self, role_id, actor_id, target_id, inherited): with sql.session_for_write() as session: q = session.query(SystemRoleAssignment) q = q.filter_by(actor_id=actor_id) q = q.filter_by(target_id=target_id) q = q.filter_by(role_id=role_id) q = q.filter_by(inherited=inherited) if not q.delete(False): raise exception.RoleAssignmentNotFound( role_id=role_id, actor_id=actor_id, target_id=target_id ) class RoleAssignment(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'assignment' attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited'] # NOTE(henry-nash): Postgres requires a name to be defined for an Enum type = sql.Column( sql.Enum( AssignmentType.USER_PROJECT, AssignmentType.GROUP_PROJECT, AssignmentType.USER_DOMAIN, AssignmentType.GROUP_DOMAIN, name='type', ), nullable=False, ) actor_id = sql.Column(sql.String(64), nullable=False) target_id = sql.Column(sql.String(64), nullable=False) role_id = sql.Column(sql.String(64), nullable=False) inherited = sql.Column(sql.Boolean, default=False, nullable=False) __table_args__ = ( sql.PrimaryKeyConstraint( 'type', 'actor_id', 'target_id', 'role_id', 'inherited' ), sql.Index('ix_actor_id', 'actor_id'), ) def to_dict(self): """Override parent method with a simpler implementation. RoleAssignment doesn't have non-indexed 'extra' attributes, so the parent implementation is not applicable. """ return dict(self.items()) class SystemRoleAssignment(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'system_assignment' attributes = ['type', 'actor_id', 'target_id', 'role_id', 'inherited'] type = sql.Column(sql.String(64), nullable=False) actor_id = sql.Column(sql.String(64), nullable=False) target_id = sql.Column(sql.String(64), nullable=False) role_id = sql.Column(sql.String(64), nullable=False) inherited = sql.Column(sql.Boolean, default=False, nullable=False) __table_args__ = ( sql.PrimaryKeyConstraint( 'type', 'actor_id', 'target_id', 'role_id', 'inherited' ), sql.Index('ix_system_actor_id', 'actor_id'), ) def to_dict(self): """Override parent method with a simpler implementation. RoleAssignment doesn't have non-indexed 'extra' attributes, so the parent implementation is not applicable. """ return dict(self.items()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/core.py0000664000175000017500000017247000000000000021011 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Assignment service.""" import copy import itertools from oslo_log import log from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api from keystone.common.resource_options import options as ro_opt import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs # This is a general cache region for assignment administration (CRUD # operations). MEMOIZE = cache.get_memoization_decorator(group='role') # This builds a discrete cache region dedicated to role assignments computed # for a given user + project/domain pair. Any write operation to add or remove # any role assignment should invalidate this entire cache region. COMPUTED_ASSIGNMENTS_REGION = cache.create_region(name='computed assignments') MEMOIZE_COMPUTED_ASSIGNMENTS = cache.get_memoization_decorator( group='role', region=COMPUTED_ASSIGNMENTS_REGION ) @notifications.listener class Manager(manager.Manager): """Default pivot point for the Assignment backend. See :class:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.assignment' _provides_api = 'assignment_api' _SYSTEM_SCOPE_TOKEN = 'system' # nosec _USER_SYSTEM = 'UserSystem' _GROUP_SYSTEM = 'GroupSystem' _PROJECT = 'project' _ROLE_REMOVED_FROM_USER = 'role_removed_from_user' _INVALIDATION_USER_PROJECT_TOKENS = 'invalidate_user_project_tokens' def __init__(self): assignment_driver = CONF.assignment.driver super().__init__(assignment_driver) self.event_callbacks = { notifications.ACTIONS.deleted: { 'domain': [self._delete_domain_assignments], }, } def _delete_domain_assignments( self, service, resource_type, operations, payload ): domain_id = payload['resource_info'] self.driver.delete_domain_assignments(domain_id) def _get_group_ids_for_user_id(self, user_id): # TODO(morganfainberg): Implement a way to get only group_ids # instead of the more expensive to_dict() call for each record. return [ x['id'] for x in PROVIDERS.identity_api.list_groups_for_user(user_id) ] def list_user_ids_for_project(self, project_id): PROVIDERS.resource_api.get_project(project_id) assignment_list = self.list_role_assignments( project_id=project_id, effective=True ) # Use set() to process the list to remove any duplicates return list({x['user_id'] for x in assignment_list}) def _send_app_cred_notification_for_role_removal(self, role_id): """Delete all application credential for a specific role. :param role_id: role identifier :type role_id: string """ assignments = self.list_role_assignments(role_id=role_id) for assignment in assignments: if 'user_id' in assignment and 'project_id' in assignment: payload = { 'user_id': assignment['user_id'], 'project_id': assignment['project_id'], } notifications.Audit.internal( notifications.REMOVE_APP_CREDS_FOR_USER, payload ) @MEMOIZE_COMPUTED_ASSIGNMENTS def get_roles_for_user_and_project(self, user_id, project_id): """Get the roles associated with a user within given project. This includes roles directly assigned to the user on the project, as well as those by virtue of group membership or inheritance. :returns: a list of role ids. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. """ PROVIDERS.resource_api.get_project(project_id) assignment_list = self.list_role_assignments( user_id=user_id, project_id=project_id, effective=True ) # Use set() to process the list to remove any duplicates return list({x['role_id'] for x in assignment_list}) @MEMOIZE_COMPUTED_ASSIGNMENTS def get_roles_for_trustor_and_project(self, trustor_id, project_id): """Get the roles associated with a trustor within given project. This includes roles directly assigned to the trustor on the project, as well as those by virtue of group membership or inheritance, but it doesn't include the domain roles. :returns: a list of role ids. :raises keystone.exception.ProjectNotFound: If the project doesn't exist. """ PROVIDERS.resource_api.get_project(project_id) assignment_list = self.list_role_assignments( user_id=trustor_id, project_id=project_id, effective=True, strip_domain_roles=False, ) # Use set() to process the list to remove any duplicates return list({x['role_id'] for x in assignment_list}) @MEMOIZE_COMPUTED_ASSIGNMENTS def get_roles_for_user_and_domain(self, user_id, domain_id): """Get the roles associated with a user within given domain. :returns: a list of role ids. :raises keystone.exception.DomainNotFound: If the domain doesn't exist. """ PROVIDERS.resource_api.get_domain(domain_id) assignment_list = self.list_role_assignments( user_id=user_id, domain_id=domain_id, effective=True ) # Use set() to process the list to remove any duplicates return list({x['role_id'] for x in assignment_list}) def get_roles_for_groups(self, group_ids, project_id=None, domain_id=None): """Get a list of roles for this group on domain and/or project.""" # if no group ids were passed, there are no roles. Without this check, # all assignments for the project or domain will be fetched, # which is not what we want. if not group_ids: return [] if project_id is not None: PROVIDERS.resource_api.get_project(project_id) assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, project_id=project_id, effective=True, ) elif domain_id is not None: assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, domain_id=domain_id, effective=True, ) else: raise AttributeError(_("Must specify either domain or project")) role_ids = list({x['role_id'] for x in assignment_list}) return PROVIDERS.role_api.list_roles_from_ids(role_ids) @notifications.role_assignment('created') def _add_role_to_user_and_project_adapter( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, context=None, ): # The parameters for this method must match the parameters for # create_grant so that the notifications.role_assignment decorator # will work. PROVIDERS.resource_api.get_project(project_id) PROVIDERS.role_api.get_role(role_id) self.driver.add_role_to_user_and_project(user_id, project_id, role_id) def add_role_to_user_and_project(self, user_id, project_id, role_id): self._add_role_to_user_and_project_adapter( role_id, user_id=user_id, project_id=project_id ) COMPUTED_ASSIGNMENTS_REGION.invalidate() # TODO(henry-nash): We might want to consider list limiting this at some # point in the future. @MEMOIZE_COMPUTED_ASSIGNMENTS def list_projects_for_user(self, user_id): # FIXME(lbragstad): Without the use of caching, listing effective role # assignments is slow, especially with large data set (lots of users # with multiple role assignments). This should serve as a marker in # case we have the opportunity to come back and optimize this code so # that it can be performant without having a hard dependency on # caching. Please see https://bugs.launchpad.net/keystone/+bug/1700852 # for more details. assignment_list = self.list_role_assignments( user_id=user_id, effective=True ) # Use set() to process the list to remove any duplicates project_ids = list( {x['project_id'] for x in assignment_list if x.get('project_id')} ) return PROVIDERS.resource_api.list_projects_from_ids(project_ids) # TODO(henry-nash): We might want to consider list limiting this at some # point in the future. @MEMOIZE_COMPUTED_ASSIGNMENTS def list_domains_for_user(self, user_id): assignment_list = self.list_role_assignments( user_id=user_id, effective=True ) # Use set() to process the list to remove any duplicates domain_ids = list( {x['domain_id'] for x in assignment_list if x.get('domain_id')} ) return PROVIDERS.resource_api.list_domains_from_ids(domain_ids) def list_domains_for_groups(self, group_ids): assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, effective=True ) domain_ids = list( {x['domain_id'] for x in assignment_list if x.get('domain_id')} ) return PROVIDERS.resource_api.list_domains_from_ids(domain_ids) def list_projects_for_groups(self, group_ids): assignment_list = self.list_role_assignments( source_from_group_ids=group_ids, effective=True ) project_ids = list( {x['project_id'] for x in assignment_list if x.get('project_id')} ) return PROVIDERS.resource_api.list_projects_from_ids(project_ids) @notifications.role_assignment('deleted') def _remove_role_from_user_and_project_adapter( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, context=None, ): # The parameters for this method must match the parameters for # delete_grant so that the notifications.role_assignment decorator # will work. self.driver.remove_role_from_user_and_project( user_id, project_id, role_id ) payload = {'user_id': user_id, 'project_id': project_id} notifications.Audit.internal( notifications.REMOVE_APP_CREDS_FOR_USER, payload ) self._invalidate_token_cache( role_id, group_id, user_id, project_id, domain_id ) def remove_role_from_user_and_project(self, user_id, project_id, role_id): self._remove_role_from_user_and_project_adapter( role_id, user_id=user_id, project_id=project_id ) COMPUTED_ASSIGNMENTS_REGION.invalidate() def _invalidate_token_cache( self, role_id, group_id, user_id, project_id, domain_id ): if group_id: actor_type = 'group' actor_id = group_id elif user_id: actor_type = 'user' actor_id = user_id if domain_id: target_type = 'domain' target_id = domain_id elif project_id: target_type = 'project' target_id = project_id reason = ( 'Invalidating the token cache because role %(role_id)s was ' 'removed from %(actor_type)s %(actor_id)s on %(target_type)s ' '%(target_id)s.' % { 'role_id': role_id, 'actor_type': actor_type, 'actor_id': actor_id, 'target_type': target_type, 'target_id': target_id, } ) notifications.invalidate_token_cache_notification(reason) @notifications.role_assignment('created') def create_grant( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, initiator=None, ): role = PROVIDERS.role_api.get_role(role_id) if domain_id: PROVIDERS.resource_api.get_domain(domain_id) if project_id: project = PROVIDERS.resource_api.get_project(project_id) # For domain specific roles, the domain of the project # and role must match if role['domain_id'] and project['domain_id'] != role['domain_id']: raise exception.DomainSpecificRoleMismatch( role_id=role_id, project_id=project_id ) self.driver.create_grant( role_id, user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects, ) COMPUTED_ASSIGNMENTS_REGION.invalidate() def get_grant( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): role_ref = PROVIDERS.role_api.get_role(role_id) if domain_id: PROVIDERS.resource_api.get_domain(domain_id) if project_id: PROVIDERS.resource_api.get_project(project_id) self.check_grant_role_id( role_id, user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects, ) return role_ref def list_grants( self, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, ): if domain_id: PROVIDERS.resource_api.get_domain(domain_id) if project_id: PROVIDERS.resource_api.get_project(project_id) grant_ids = self.list_grant_role_ids( user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects, ) return PROVIDERS.role_api.list_roles_from_ids(grant_ids) @notifications.role_assignment('deleted') def delete_grant( self, role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False, initiator=None, ): # check if role exist before any processing PROVIDERS.role_api.get_role(role_id) if group_id is None: # check if role exists on the user before revoke self.check_grant_role_id( role_id, user_id=user_id, group_id=None, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects, ) self._invalidate_token_cache( role_id, group_id, user_id, project_id, domain_id ) else: try: # check if role exists on the group before revoke self.check_grant_role_id( role_id, user_id=None, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects, ) if CONF.token.revoke_by_id: self._invalidate_token_cache( role_id, group_id, user_id, project_id, domain_id ) except exception.GroupNotFound: LOG.debug( 'Group %s not found, no tokens to invalidate.', group_id ) if domain_id: PROVIDERS.resource_api.get_domain(domain_id) if project_id: PROVIDERS.resource_api.get_project(project_id) self.driver.delete_grant( role_id, user_id=user_id, group_id=group_id, domain_id=domain_id, project_id=project_id, inherited_to_projects=inherited_to_projects, ) COMPUTED_ASSIGNMENTS_REGION.invalidate() # The methods _expand_indirect_assignment, _list_direct_role_assignments # and _list_effective_role_assignments below are only used on # list_role_assignments, but they are not in its scope as nested functions # since it would significantly increase McCabe complexity, that should be # kept as it is in order to detect unnecessarily complex code, which is not # this case. def _expand_indirect_assignment( self, ref, user_id=None, project_id=None, subtree_ids=None, expand_groups=True, ): """Return a list of expanded role assignments. This methods is called for each discovered assignment that either needs a group assignment expanded into individual user assignments, or needs an inherited assignment to be applied to its children. In all cases, if either user_id and/or project_id is specified, then we filter the result on those values. If project_id is specified and subtree_ids is None, then this indicates that we are only interested in that one project. If subtree_ids is not None, then this is an indicator that any inherited assignments need to be expanded down the tree. The actual subtree_ids don't need to be used as a filter here, since we already ensured only those assignments that could affect them were passed to this method. If expand_groups is True then we expand groups out to a list of assignments, one for each member of that group. """ def create_group_assignment(base_ref, user_id): """Create a group assignment from the provided ref.""" ref = copy.deepcopy(base_ref) ref['user_id'] = user_id indirect = ref.setdefault('indirect', {}) indirect['group_id'] = ref.pop('group_id') return ref def expand_group_assignment(ref, user_id): """Expand group role assignment. For any group role assignment on a target, it is replaced by a list of role assignments containing one for each user of that group on that target. An example of accepted ref is:: { 'group_id': group_id, 'project_id': project_id, 'role_id': role_id } Once expanded, it should be returned as a list of entities like the one below, one for each user_id in the provided group_id. :: { 'user_id': user_id, 'project_id': project_id, 'role_id': role_id, 'indirect' : { 'group_id': group_id } } Returned list will be formatted by the Controller, which will deduce a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in indirect subdict. """ if user_id: return [create_group_assignment(ref, user_id=user_id)] # Note(prashkre): Try to get the users in a group, # if a group wasn't found in the backend, users are set # as empty list. try: users = PROVIDERS.identity_api.list_users_in_group( ref['group_id'] ) except exception.GroupNotFound: LOG.warning( 'Group %(group)s was not found but still has role ' 'assignments.', {'group': ref['group_id']}, ) users = [] return [ create_group_assignment(ref, user_id=m['id']) for m in users ] def expand_inherited_assignment( ref, user_id, project_id, subtree_ids, expand_groups ): """Expand inherited role assignments. If expand_groups is True and this is a group role assignment on a target, replace it by a list of role assignments containing one for each user of that group, on every project under that target. If expand_groups is False, then return a group assignment on an inherited target. If this is a user role assignment on a specific target (i.e. project_id is specified, but subtree_ids is None) then simply format this as a single assignment (since we are effectively filtering on project_id). If however, project_id is None or subtree_ids is not None, then replace this one assignment with a list of role assignments for that user on every project under that target. An example of accepted ref is:: { 'group_id': group_id, 'project_id': parent_id, 'role_id': role_id, 'inherited_to_projects': 'projects' } Once expanded, it should be returned as a list of entities like the one below, one for each user_id in the provided group_id and for each subproject_id in the project_id subtree. :: { 'user_id': user_id, 'project_id': subproject_id, 'role_id': role_id, 'indirect' : { 'group_id': group_id, 'project_id': parent_id } } Returned list will be formatted by the Controller, which will deduce a role assignment came from group membership if it has both 'user_id' in the main body of the dict and 'group_id' in the 'indirect' subdict, as well as it is possible to deduce if it has come from inheritance if it contains both a 'project_id' in the main body of the dict and 'parent_id' in the 'indirect' subdict. """ def create_inherited_assignment(base_ref, project_id): """Create a project assignment from the provided ref. base_ref can either be a project or domain inherited assignment ref. """ ref = copy.deepcopy(base_ref) indirect = ref.setdefault('indirect', {}) if ref.get('project_id'): indirect['project_id'] = ref.pop('project_id') else: indirect['domain_id'] = ref.pop('domain_id') ref['project_id'] = project_id ref.pop('inherited_to_projects') return ref # Define expanded project list to which to apply this assignment if project_id: # Since ref is an inherited assignment and we are filtering by # project(s), we are only going to apply the assignment to the # relevant project(s) project_ids = [project_id] if subtree_ids: project_ids += subtree_ids # If this is a domain inherited assignment, then we know # that all the project_ids will get this assignment. If # it's a project inherited assignment, and the assignment # point is an ancestor of project_id, then we know that # again all the project_ids will get the assignment. If, # however, the assignment point is within the subtree, # then only a partial tree will get the assignment. resource_api = PROVIDERS.resource_api if ref.get('project_id'): if ref['project_id'] in project_ids: project_ids = [ x['id'] for x in resource_api.list_projects_in_subtree( ref['project_id'] ) ] elif ref.get('domain_id'): # A domain inherited assignment, so apply it to all projects # in this domain project_ids = [ x['id'] for x in PROVIDERS.resource_api.list_projects_in_domain( ref['domain_id'] ) ] else: # It must be a project assignment, so apply it to its subtree project_ids = [ x['id'] for x in PROVIDERS.resource_api.list_projects_in_subtree( ref['project_id'] ) ] new_refs = [] if 'group_id' in ref: if expand_groups: # Expand role assignment to all group members on any # inherited target of any of the projects for ref in expand_group_assignment(ref, user_id): new_refs += [ create_inherited_assignment(ref, proj_id) for proj_id in project_ids ] else: # Just place the group assignment on any inherited target # of any of the projects new_refs += [ create_inherited_assignment(ref, proj_id) for proj_id in project_ids ] else: # Expand role assignment for all projects new_refs += [ create_inherited_assignment(ref, proj_id) for proj_id in project_ids ] return new_refs if ref.get('inherited_to_projects') == 'projects': return expand_inherited_assignment( ref, user_id, project_id, subtree_ids, expand_groups ) elif 'group_id' in ref and expand_groups: return expand_group_assignment(ref, user_id) return [ref] def add_implied_roles(self, role_refs): """Expand out implied roles. The role_refs passed in have had all inheritance and group assignments expanded out. We now need to look at the role_id in each ref and see if it is a prior role for some implied roles. If it is, then we need to duplicate that ref, one for each implied role. We store the prior role in the indirect dict that is part of such a duplicated ref, so that a caller can determine where the assignment came from. """ def _make_implied_ref_copy(prior_ref, implied_role_id): # Create a ref for an implied role from the ref of a prior role, # setting the new role_id to be the implied role and the indirect # role_id to be the prior role implied_ref = copy.deepcopy(prior_ref) implied_ref['role_id'] = implied_role_id indirect = implied_ref.setdefault('indirect', {}) indirect['role_id'] = prior_ref['role_id'] return implied_ref try: implied_roles_cache = {} role_refs_to_check = list(role_refs) ref_results = list(role_refs) checked_role_refs = list() while role_refs_to_check: next_ref = role_refs_to_check.pop() checked_role_refs.append(next_ref) next_role_id = next_ref['role_id'] if next_role_id in implied_roles_cache: implied_roles = implied_roles_cache[next_role_id] else: implied_roles = PROVIDERS.role_api.list_implied_roles( next_role_id ) implied_roles_cache[next_role_id] = implied_roles for implied_role in implied_roles: implied_ref = _make_implied_ref_copy( next_ref, implied_role['implied_role_id'] ) if implied_ref in checked_role_refs: # Avoid traversing a cycle continue else: ref_results.append(implied_ref) role_refs_to_check.append(implied_ref) except exception.NotImplemented: LOG.error('Role driver does not support implied roles.') return ref_results def _filter_by_role_id(self, role_id, ref_results): # if we arrive here, we need to filer by role_id. filter_results = [] for ref in ref_results: if ref['role_id'] == role_id: filter_results.append(ref) return filter_results def _strip_domain_roles(self, role_refs): """Post process assignment list for domain roles. Domain roles are only designed to do the job of inferring other roles and since that has been done before this method is called, we need to remove any assignments that include a domain role. """ def _role_is_global(role_id): ref = PROVIDERS.role_api.get_role(role_id) return ref['domain_id'] is None filter_results = [] for ref in role_refs: if _role_is_global(ref['role_id']): filter_results.append(ref) return filter_results def _list_effective_role_assignments( self, role_id, user_id, group_id, domain_id, project_id, subtree_ids, inherited, source_from_group_ids, strip_domain_roles, ): """List role assignments in effective mode. When using effective mode, besides the direct assignments, the indirect ones that come from grouping or inheritance are retrieved and will then be expanded. The resulting list of assignments will be filtered by the provided parameters. If subtree_ids is not None, then we also want to include all subtree_ids in the filter as well. Since we are in effective mode, group can never act as a filter (since group assignments are expanded into user roles) and domain can only be filter if we want non-inherited assignments, since domains can't inherit assignments. The goal of this method is to only ask the driver for those assignments as could effect the result based on the parameter filters specified, hence avoiding retrieving a huge list. """ def list_role_assignments_for_actor( role_id, inherited, user_id=None, group_ids=None, project_id=None, subtree_ids=None, domain_id=None, ): """List role assignments for actor on target. List direct and indirect assignments for an actor, optionally for a given target (i.e. projects or domain). :param role_id: List for a specific role, can be None meaning all roles :param inherited: Indicates whether inherited assignments or only direct assignments are required. If None, then both are required. :param user_id: If not None, list only assignments that affect this user. :param group_ids: A list of groups required. Only one of user_id and group_ids can be specified :param project_id: If specified, only include those assignments that affect at least this project, with additionally any projects specified in subtree_ids :param subtree_ids: The list of projects in the subtree. If specified, also include those assignments that affect these projects. These projects are guaranteed to be in the same domain as the project specified in project_id. subtree_ids can only be specified if project_id has also been specified. :param domain_id: If specified, only include those assignments that affect this domain - by definition this will not include any inherited assignments :returns: List of assignments matching the criteria. Any inherited or group assignments that could affect the resulting response are included. """ project_ids_of_interest = None if project_id: if subtree_ids: project_ids_of_interest = subtree_ids + [project_id] else: project_ids_of_interest = [project_id] # List direct project role assignments non_inherited_refs = [] if inherited is False or inherited is None: # Get non inherited assignments non_inherited_refs = self.driver.list_role_assignments( role_id=role_id, domain_id=domain_id, project_ids=project_ids_of_interest, user_id=user_id, group_ids=group_ids, inherited_to_projects=False, ) inherited_refs = [] if inherited is True or inherited is None: # Get inherited assignments if project_id: # The project and any subtree are guaranteed to be owned by # the same domain, so since we are filtering by these # specific projects, then we can only get inherited # assignments from their common domain or from any of # their parents projects. # List inherited assignments from the project's domain proj_domain_id = PROVIDERS.resource_api.get_project( project_id )['domain_id'] inherited_refs += self.driver.list_role_assignments( role_id=role_id, domain_id=proj_domain_id, user_id=user_id, group_ids=group_ids, inherited_to_projects=True, ) # For inherited assignments from projects, since we know # they are from the same tree the only places these can # come from are from parents of the main project or # inherited assignments on the project or subtree itself. source_ids = [ project['id'] for project in PROVIDERS.resource_api.list_project_parents( project_id ) ] if subtree_ids: source_ids += project_ids_of_interest if source_ids: inherited_refs += self.driver.list_role_assignments( role_id=role_id, project_ids=source_ids, user_id=user_id, group_ids=group_ids, inherited_to_projects=True, ) else: # List inherited assignments without filtering by target inherited_refs = self.driver.list_role_assignments( role_id=role_id, user_id=user_id, group_ids=group_ids, inherited_to_projects=True, ) return non_inherited_refs + inherited_refs # If filtering by group or inherited domain assignment the list is # guaranteed to be empty if group_id or (domain_id and inherited): return [] if user_id and source_from_group_ids: # You can't do both - and since source_from_group_ids is only used # internally, this must be a coding error by the caller. msg = _( 'Cannot list assignments sourced from groups and filtered ' 'by user ID.' ) raise exception.UnexpectedError(msg) # If filtering by domain, then only non-inherited assignments are # relevant, since domains don't inherit assignments inherited = False if domain_id else inherited # List user or explicit group assignments. # Due to the need to expand implied roles, this call will skip # filtering by role_id and instead return the whole set of roles. # Matching on the specified role is performed at the end. direct_refs = list_role_assignments_for_actor( role_id=None, user_id=user_id, group_ids=source_from_group_ids, project_id=project_id, subtree_ids=subtree_ids, domain_id=domain_id, inherited=inherited, ) # And those from the user's groups, so long as we are not restricting # to a set of source groups (in which case we already got those # assignments in the direct listing above). group_refs = [] if not source_from_group_ids and user_id: group_ids = self._get_group_ids_for_user_id(user_id) if group_ids: group_refs = list_role_assignments_for_actor( role_id=None, project_id=project_id, subtree_ids=subtree_ids, group_ids=group_ids, domain_id=domain_id, inherited=inherited, ) # Expand grouping and inheritance on retrieved role assignments refs = [] expand_groups = source_from_group_ids is None for ref in direct_refs + group_refs: refs += self._expand_indirect_assignment( ref, user_id, project_id, subtree_ids, expand_groups ) refs = self.add_implied_roles(refs) if strip_domain_roles: refs = self._strip_domain_roles(refs) if role_id: refs = self._filter_by_role_id(role_id, refs) return refs def _list_direct_role_assignments( self, role_id, user_id, group_id, system, domain_id, project_id, subtree_ids, inherited, ): """List role assignments without applying expansion. Returns a list of direct role assignments, where their attributes match the provided filters. If subtree_ids is not None, then we also want to include all subtree_ids in the filter as well. """ group_ids = [group_id] if group_id else None project_ids_of_interest = None if project_id: if subtree_ids: project_ids_of_interest = subtree_ids + [project_id] else: project_ids_of_interest = [project_id] project_and_domain_assignments = [] if not system: project_and_domain_assignments = self.driver.list_role_assignments( role_id=role_id, user_id=user_id, group_ids=group_ids, domain_id=domain_id, project_ids=project_ids_of_interest, inherited_to_projects=inherited, ) system_assignments = [] if system or (not project_id and not domain_id and not system): if user_id: assignments = self.list_system_grants_for_user(user_id) for assignment in assignments: system_assignments.append( { 'system': {'all': True}, 'user_id': user_id, 'role_id': assignment['id'], } ) elif group_id: assignments = self.list_system_grants_for_group(group_id) for assignment in assignments: system_assignments.append( { 'system': {'all': True}, 'group_id': group_id, 'role_id': assignment['id'], } ) else: assignments = self.list_all_system_grants() for assignment in assignments: a = {} if assignment['type'] == self._GROUP_SYSTEM: a['group_id'] = assignment['actor_id'] elif assignment['type'] == self._USER_SYSTEM: a['user_id'] = assignment['actor_id'] a['role_id'] = assignment['role_id'] a['system'] = {'all': True} system_assignments.append(a) if role_id: system_assignments = [ sa for sa in system_assignments if role_id == sa['role_id'] ] assignments = [] for assignment in itertools.chain( project_and_domain_assignments, system_assignments ): assignments.append(assignment) return assignments @MEMOIZE_COMPUTED_ASSIGNMENTS def list_role_assignments( self, role_id=None, user_id=None, group_id=None, system=None, domain_id=None, project_id=None, include_subtree=False, inherited=None, effective=None, include_names=False, source_from_group_ids=None, strip_domain_roles=True, ): """List role assignments, honoring effective mode and provided filters. Returns a list of role assignments, where their attributes match the provided filters (role_id, user_id, group_id, domain_id, project_id and inherited). If include_subtree is True, then assignments on all descendants of the project specified by project_id are also included. The inherited filter defaults to None, meaning to get both non-inherited and inherited role assignments. If effective mode is specified, this means that rather than simply return the assignments that match the filters, any group or inheritance assignments will be expanded. Group assignments will become assignments for all the users in that group, and inherited assignments will be shown on the projects below the assignment point. Think of effective mode as being the list of assignments that actually affect a user, for example the roles that would be placed in a token. If include_names is set to true the entities' names are returned in addition to their id's. source_from_group_ids is a list of group IDs and, if specified, then only those assignments that are derived from membership of these groups are considered, and any such assignments will not be expanded into their user membership assignments. This is different to a group filter of the resulting list, instead being a restriction on which assignments should be considered before expansion of inheritance. This option is only used internally (i.e. it is not exposed at the API level) and is only supported in effective mode (since in regular mode there is no difference between this and a group filter, other than it is a list of groups). In effective mode, any domain specific roles are usually stripped from the returned assignments (since such roles are not placed in tokens). This stripping can be disabled by specifying strip_domain_roles=False, which is useful for internal calls like trusts which need to examine the full set of roles. """ subtree_ids = None if project_id and include_subtree: subtree_ids = [ x['id'] for x in PROVIDERS.resource_api.list_projects_in_subtree( project_id ) ] if system != 'all': system = None if effective: role_assignments = self._list_effective_role_assignments( role_id, user_id, group_id, domain_id, project_id, subtree_ids, inherited, source_from_group_ids, strip_domain_roles, ) else: role_assignments = self._list_direct_role_assignments( role_id, user_id, group_id, system, domain_id, project_id, subtree_ids, inherited, ) if include_names: return self._get_names_from_role_assignments(role_assignments) return role_assignments def _get_names_from_role_assignments(self, role_assignments): role_assign_list = [] for role_asgmt in role_assignments: new_assign = copy.deepcopy(role_asgmt) for key, value in role_asgmt.items(): if key == 'domain_id': _domain = PROVIDERS.resource_api.get_domain(value) new_assign['domain_name'] = _domain['name'] elif key == 'user_id': try: # Note(knikolla): Try to get the user, otherwise # if the user wasn't found in the backend # use empty values. _user = PROVIDERS.identity_api.get_user(value) except exception.UserNotFound: msg = ( 'User %(user)s not found in the' ' backend but still has role assignments.' ) LOG.warning(msg, {'user': value}) new_assign['user_name'] = '' new_assign['user_domain_id'] = '' new_assign['user_domain_name'] = '' else: new_assign['user_name'] = _user['name'] new_assign['user_domain_id'] = _user['domain_id'] new_assign['user_domain_name'] = ( PROVIDERS.resource_api.get_domain( _user['domain_id'] )['name'] ) elif key == 'group_id': try: # Note(knikolla): Try to get the group, otherwise # if the group wasn't found in the backend # use empty values. _group = PROVIDERS.identity_api.get_group(value) except exception.GroupNotFound: msg = ( 'Group %(group)s not found in the' ' backend but still has role assignments.' ) LOG.warning(msg, {'group': value}) new_assign['group_name'] = '' new_assign['group_domain_id'] = '' new_assign['group_domain_name'] = '' else: new_assign['group_name'] = _group['name'] new_assign['group_domain_id'] = _group['domain_id'] new_assign['group_domain_name'] = ( PROVIDERS.resource_api.get_domain( _group['domain_id'] )['name'] ) elif key == 'project_id': _project = PROVIDERS.resource_api.get_project(value) new_assign['project_name'] = _project['name'] new_assign['project_domain_id'] = _project['domain_id'] new_assign['project_domain_name'] = ( PROVIDERS.resource_api.get_domain( _project['domain_id'] )['name'] ) elif key == 'role_id': _role = PROVIDERS.role_api.get_role(value) new_assign['role_name'] = _role['name'] if _role['domain_id'] is not None: new_assign['role_domain_id'] = _role['domain_id'] new_assign['role_domain_name'] = ( PROVIDERS.resource_api.get_domain( _role['domain_id'] )['name'] ) role_assign_list.append(new_assign) return role_assign_list def delete_group_assignments(self, group_id): # FIXME(lbragstad): This should be refactored in the Rocky release so # that we can pass the group_id to the system assignment backend like # we do with the project and domain assignment backend. Holding off on # this because it will require an interface change to the backend, # making it harder to backport for Queens RC. self.driver.delete_group_assignments(group_id) system_assignments = self.list_system_grants_for_group(group_id) for assignment in system_assignments: self.delete_system_grant_for_group(group_id, assignment['id']) COMPUTED_ASSIGNMENTS_REGION.invalidate() def delete_user_assignments(self, user_id): # FIXME(lbragstad): This should be refactored in the Rocky release so # that we can pass the user_id to the system assignment backend like we # do with the project and domain assignment backend. Holding off on # this because it will require an interface change to the backend, # making it harder to backport for Queens RC. self.driver.delete_user_assignments(user_id) system_assignments = self.list_system_grants_for_user(user_id) for assignment in system_assignments: self.delete_system_grant_for_user(user_id, assignment['id']) COMPUTED_ASSIGNMENTS_REGION.invalidate() def check_system_grant_for_user(self, user_id, role_id): """Check if a user has a specific role on the system. :param user_id: the ID of the user in the assignment :param role_id: the ID of the system role in the assignment :raises keystone.exception.RoleAssignmentNotFound: if the user doesn't have a role assignment matching the role_id on the system """ target_id = self._SYSTEM_SCOPE_TOKEN inherited = False return self.driver.check_system_grant( role_id, user_id, target_id, inherited ) def list_system_grants_for_user(self, user_id): """Return a list of roles the user has on the system. :param user_id: the ID of the user :returns: a list of role assignments the user has system-wide """ target_id = self._SYSTEM_SCOPE_TOKEN assignment_type = self._USER_SYSTEM grants = self.driver.list_system_grants( user_id, target_id, assignment_type ) grant_ids = [] for grant in grants: grant_ids.append(grant['role_id']) return PROVIDERS.role_api.list_roles_from_ids(grant_ids) def create_system_grant_for_user(self, user_id, role_id): """Grant a user a role on the system. :param user_id: the ID of the user :param role_id: the ID of the role to grant on the system """ role = PROVIDERS.role_api.get_role(role_id) if role.get('domain_id'): raise exception.ValidationError( 'Role %(role_id)s is a domain-specific role. Unable to use ' 'a domain-specific role in a system assignment.' % {'role_id': role_id} ) target_id = self._SYSTEM_SCOPE_TOKEN assignment_type = self._USER_SYSTEM inherited = False self.driver.create_system_grant( role_id, user_id, target_id, assignment_type, inherited ) def delete_system_grant_for_user(self, user_id, role_id): """Remove a system grant from a user. :param user_id: the ID of the user :param role_id: the ID of the role to remove from the user on the system :raises keystone.exception.RoleAssignmentNotFound: if the user doesn't have a role assignment with role_id on the system """ target_id = self._SYSTEM_SCOPE_TOKEN inherited = False self.driver.delete_system_grant(role_id, user_id, target_id, inherited) COMPUTED_ASSIGNMENTS_REGION.invalidate() def check_system_grant_for_group(self, group_id, role_id): """Check if a group has a specific role on the system. :param group_id: the ID of the group in the assignment :param role_id: the ID of the system role in the assignment :raises keystone.exception.RoleAssignmentNotFound: if the group doesn't have a role assignment matching the role_id on the system """ target_id = self._SYSTEM_SCOPE_TOKEN inherited = False return self.driver.check_system_grant( role_id, group_id, target_id, inherited ) def list_system_grants_for_group(self, group_id): """Return a list of roles the group has on the system. :param group_id: the ID of the group :returns: a list of role assignments the group has system-wide """ target_id = self._SYSTEM_SCOPE_TOKEN assignment_type = self._GROUP_SYSTEM grants = self.driver.list_system_grants( group_id, target_id, assignment_type ) grant_ids = [] for grant in grants: grant_ids.append(grant['role_id']) return PROVIDERS.role_api.list_roles_from_ids(grant_ids) def create_system_grant_for_group(self, group_id, role_id): """Grant a group a role on the system. :param group_id: the ID of the group :param role_id: the ID of the role to grant on the system """ role = PROVIDERS.role_api.get_role(role_id) if role.get('domain_id'): raise exception.ValidationError( 'Role %(role_id)s is a domain-specific role. Unable to use ' 'a domain-specific role in a system assignment.' % {'role_id': role_id} ) target_id = self._SYSTEM_SCOPE_TOKEN assignment_type = self._GROUP_SYSTEM inherited = False self.driver.create_system_grant( role_id, group_id, target_id, assignment_type, inherited ) def delete_system_grant_for_group(self, group_id, role_id): """Remove a system grant from a group. :param group_id: the ID of the group :param role_id: the ID of the role to remove from the group on the system :raises keystone.exception.RoleAssignmentNotFound: if the group doesn't have a role assignment with role_id on the system """ target_id = self._SYSTEM_SCOPE_TOKEN inherited = False self.driver.delete_system_grant( role_id, group_id, target_id, inherited ) COMPUTED_ASSIGNMENTS_REGION.invalidate() def list_all_system_grants(self): """Return a list of all system grants.""" actor_id = None target_id = self._SYSTEM_SCOPE_TOKEN assignment_type = None return self.driver.list_system_grants( actor_id, target_id, assignment_type ) class RoleManager(manager.Manager): """Default pivot point for the Role backend.""" driver_namespace = 'keystone.role' _provides_api = 'role_api' _ROLE = 'role' def __init__(self): # If there is a specific driver specified for role, then use it. # Otherwise retrieve the driver type from the assignment driver. role_driver = CONF.role.driver if role_driver is None: # Explicitly load the assignment manager object assignment_driver = CONF.assignment.driver assignment_manager_obj = manager.load_driver( Manager.driver_namespace, assignment_driver ) role_driver = assignment_manager_obj.default_role_driver() super().__init__(role_driver) @MEMOIZE def get_role(self, role_id): return self.driver.get_role(role_id) def get_unique_role_by_name(self, role_name, hints=None): if not hints: hints = driver_hints.Hints() hints.add_filter("name", role_name, case_sensitive=True) found_roles = PROVIDERS.role_api.list_roles(hints) if not found_roles: raise exception.RoleNotFound( _("Role %s is not defined") % role_name ) elif len(found_roles) == 1: return {'id': found_roles[0]['id']} else: raise exception.AmbiguityError(resource='role', name=role_name) def create_role(self, role_id, role, initiator=None): # Shallow copy to help mitigate in-line changes that might impact # testing. This mirrors create_user, specifically relevant for # resource options. role = role.copy() ret = self.driver.create_role(role_id, role) notifications.Audit.created(self._ROLE, role_id, initiator) if MEMOIZE.should_cache(ret): self.get_role.set(ret, self, role_id) return ret @manager.response_truncated def list_roles(self, hints=None): return self.driver.list_roles(hints or driver_hints.Hints()) def _is_immutable(self, role): return role['options'].get(ro_opt.IMMUTABLE_OPT.option_name, False) def update_role(self, role_id, role, initiator=None): original_role = self.driver.get_role(role_id) # Prevent the update of immutable set roles unless the update is # exclusively used for ro_opt.check_immutable_update( original_resource_ref=original_role, new_resource_ref=role, type='role', resource_id=role_id, ) if ( 'domain_id' in role and role['domain_id'] != original_role['domain_id'] ): raise exception.ValidationError( message=_('Update of `domain_id` is not allowed.') ) ret = self.driver.update_role(role_id, role) notifications.Audit.updated(self._ROLE, role_id, initiator) self.get_role.invalidate(self, role_id) return ret def delete_role(self, role_id, initiator=None): role = self.driver.get_role(role_id) # Prevent deletion of immutable roles. ro_opt.check_immutable_delete( resource_ref=role, resource_type='role', resource_id=role_id ) PROVIDERS.assignment_api._send_app_cred_notification_for_role_removal( role_id ) PROVIDERS.assignment_api.delete_role_assignments(role_id) self.driver.delete_role(role_id) notifications.Audit.deleted(self._ROLE, role_id, initiator) self.get_role.invalidate(self, role_id) reason = ( 'Invalidating the token cache because role %(role_id)s has been ' 'removed. Role assignments for users will be recalculated and ' 'enforced accordingly the next time they authenticate or validate ' 'a token' % {'role_id': role_id} ) notifications.invalidate_token_cache_notification(reason) COMPUTED_ASSIGNMENTS_REGION.invalidate() # TODO(ayoung): Add notification def create_implied_role(self, prior_role_id, implied_role_id): implied_role = self.driver.get_role(implied_role_id) prior_role = self.driver.get_role(prior_role_id) if implied_role['name'] in CONF.assignment.prohibited_implied_role: raise exception.InvalidImpliedRole(role_id=implied_role_id) if prior_role['domain_id'] is None and implied_role['domain_id']: msg = _('Global role cannot imply a domain-specific role') raise exception.InvalidImpliedRole(msg, role_id=implied_role_id) response = self.driver.create_implied_role( prior_role_id, implied_role_id ) COMPUTED_ASSIGNMENTS_REGION.invalidate() return response def delete_implied_role(self, prior_role_id, implied_role_id): self.driver.delete_implied_role(prior_role_id, implied_role_id) COMPUTED_ASSIGNMENTS_REGION.invalidate() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/assignment/role_backends/0000775000175000017500000000000000000000000022267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/role_backends/__init__.py0000664000175000017500000000000000000000000024366 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/role_backends/base.py0000664000175000017500000001043500000000000023556 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import keystone.conf from keystone import exception # NOTE(henry-nash): From the manager and above perspective, the domain_id # attribute of a role is nullable. However, to ensure uniqueness in # multi-process configurations, it is better to still use a sql uniqueness # constraint. Since the support for a nullable component of a uniqueness # constraint across different sql databases is mixed, we instead store a # special value to represent null, as defined in NULL_DOMAIN_ID below. NULL_DOMAIN_ID = '<>' CONF = keystone.conf.CONF class RoleDriverBase(metaclass=abc.ABCMeta): def _get_list_limit(self): return CONF.role.list_limit or CONF.list_limit @abc.abstractmethod def create_role(self, role_id, role): """Create a new role. :raises keystone.exception.Conflict: If a duplicate role exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_roles(self, hints): """List roles in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of role_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_roles_from_ids(self, role_ids): """List roles for the provided list of ids. :param role_ids: list of ids :returns: a list of role_refs. This method is used internally by the assignment manager to bulk read a set of roles given their ids. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_role(self, role_id): """Get a role by ID. :returns: role_ref :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_role(self, role_id, role): """Update an existing role. :raises keystone.exception.RoleNotFound: If the role doesn't exist. :raises keystone.exception.Conflict: If a duplicate role exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_role(self, role_id): """Delete an existing role. :raises keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_implied_role(self, prior_role_id, implied_role_id): """Get a role inference rule. :raises keystone.exception.ImpliedRoleNotFound: If the implied role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_implied_role(self, prior_role_id, implied_role_id): """Create a role inference rule. :raises: keystone.exception.RoleNotFound: If the role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_implied_role(self, prior_role_id, implied_role_id): """Delete a role inference rule. :raises keystone.exception.ImpliedRoleNotFound: If the implied role doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_role_inference_rules(self): """List all the rules used to imply one role from another.""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_implied_roles(self, prior_role_id): """List roles implied from the prior role ID.""" raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/role_backends/resource_options.py0000664000175000017500000000173500000000000026251 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import resource_options from keystone.common.resource_options import options as ro_opt ROLE_OPTIONS_REGISTRY = resource_options.ResourceOptionRegistry('ROLE') # NOTE(morgan): wrap this in a function for testing purposes. # This is called on import by design. def register_role_options(): for opt in [ ro_opt.IMMUTABLE_OPT, ]: ROLE_OPTIONS_REGISTRY.register_option(opt) register_role_options() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/role_backends/sql.py0000664000175000017500000001501100000000000023436 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import exception as db_exception from keystone.assignment.role_backends import base from keystone.assignment.role_backends import sql_model from keystone.common import driver_hints from keystone.common import resource_options from keystone.common import sql from keystone import exception class Role(base.RoleDriverBase): @sql.handle_conflicts(conflict_type='role') def create_role(self, role_id, role): with sql.session_for_write() as session: ref = sql_model.RoleTable.from_dict(role) session.add(ref) # Set resource options passed on creation resource_options.resource_options_ref_to_mapper( ref, sql_model.RoleOption ) return ref.to_dict() @driver_hints.truncated def list_roles(self, hints): # If there is a filter on domain_id and the value is None, then to # ensure that the sql filtering works correctly, we need to patch # the value to be NULL_DOMAIN_ID. This is safe to do here since we # know we are able to satisfy any filter of this type in the call to # filter_limit_query() below, which will remove the filter from the # hints (hence ensuring our substitution is not exposed to the caller). for f in hints.filters: if f['name'] == 'domain_id' and f['value'] is None: f['value'] = base.NULL_DOMAIN_ID with sql.session_for_read() as session: query = session.query(sql_model.RoleTable) refs = sql.filter_limit_query(sql_model.RoleTable, query, hints) return [ref.to_dict() for ref in refs] def list_roles_from_ids(self, ids): if not ids: return [] else: with sql.session_for_read() as session: query = session.query(sql_model.RoleTable) query = query.filter(sql_model.RoleTable.id.in_(ids)) role_refs = query.all() return [role_ref.to_dict() for role_ref in role_refs] def _get_role(self, session, role_id): ref = session.get(sql_model.RoleTable, role_id) if ref is None: raise exception.RoleNotFound(role_id=role_id) return ref def get_role(self, role_id): with sql.session_for_read() as session: return self._get_role(session, role_id).to_dict() @sql.handle_conflicts(conflict_type='role') def update_role(self, role_id, role): with sql.session_for_write() as session: ref = self._get_role(session, role_id) old_dict = ref.to_dict() for k in role: old_dict[k] = role[k] new_role = sql_model.RoleTable.from_dict(old_dict) for attr in sql_model.RoleTable.attributes: if attr != 'id': setattr(ref, attr, getattr(new_role, attr)) ref.extra = new_role.extra ref.description = new_role.description # Move the "_resource_options" attribute over to the real ref # so that resource_options.resource_options_ref_to_mapper can # handle the work. setattr( ref, '_resource_options', getattr(new_role, '_resource_options', {}), ) # Move options into the propper attribute mapper construct resource_options.resource_options_ref_to_mapper( ref, sql_model.RoleOption ) return ref.to_dict() def delete_role(self, role_id): with sql.session_for_write() as session: ref = self._get_role(session, role_id) session.delete(ref) def _get_implied_role(self, session, prior_role_id, implied_role_id): query = ( session.query(sql_model.ImpliedRoleTable) .filter(sql_model.ImpliedRoleTable.prior_role_id == prior_role_id) .filter( sql_model.ImpliedRoleTable.implied_role_id == implied_role_id ) ) try: ref = query.one() except sql.NotFound: raise exception.ImpliedRoleNotFound( prior_role_id=prior_role_id, implied_role_id=implied_role_id ) return ref @sql.handle_conflicts(conflict_type='implied_role') def create_implied_role(self, prior_role_id, implied_role_id): with sql.session_for_write() as session: inference = { 'prior_role_id': prior_role_id, 'implied_role_id': implied_role_id, } ref = sql_model.ImpliedRoleTable.from_dict(inference) try: session.add(ref) except db_exception.DBReferenceError: # We don't know which role threw this. # Query each to trigger the exception. self._get_role(session, prior_role_id) self._get_role(session, implied_role_id) return ref.to_dict() def delete_implied_role(self, prior_role_id, implied_role_id): with sql.session_for_write() as session: ref = self._get_implied_role( session, prior_role_id, implied_role_id ) session.delete(ref) def list_implied_roles(self, prior_role_id): with sql.session_for_read() as session: query = session.query(sql_model.ImpliedRoleTable).filter( sql_model.ImpliedRoleTable.prior_role_id == prior_role_id ) refs = query.all() return [ref.to_dict() for ref in refs] def list_role_inference_rules(self): with sql.session_for_read() as session: query = session.query(sql_model.ImpliedRoleTable) refs = query.all() return [ref.to_dict() for ref in refs] def get_implied_role(self, prior_role_id, implied_role_id): with sql.session_for_read() as session: ref = self._get_implied_role( session, prior_role_id, implied_role_id ) return ref.to_dict() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/role_backends/sql_model.py0000664000175000017500000001047700000000000024631 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import orm from sqlalchemy.orm import collections from keystone.assignment.role_backends import base from keystone.assignment.role_backends import resource_options as ro from keystone.common import resource_options from keystone.common import sql class RoleTable(sql.ModelBase, sql.ModelDictMixinWithExtras): def to_dict(self, include_extra_dict=False): d = super().to_dict(include_extra_dict=include_extra_dict) if d['domain_id'] == base.NULL_DOMAIN_ID: d['domain_id'] = None # NOTE(notmorgan): Eventually it may make sense to drop the empty # option dict creation to the superclass (if enough models use it) d['options'] = resource_options.ref_mapper_to_dict_options(self) return d @classmethod def from_dict(cls, role_dict): if 'domain_id' in role_dict and role_dict['domain_id'] is None: new_dict = role_dict.copy() new_dict['domain_id'] = base.NULL_DOMAIN_ID else: new_dict = role_dict # TODO(morgan): move this functionality to a common location resource_options = {} options = new_dict.pop('options', {}) for opt in cls.resource_options_registry.options: if opt.option_name in options: opt_value = options[opt.option_name] # NOTE(notmorgan): None is always a valid type if opt_value is not None: opt.validator(opt_value) resource_options[opt.option_id] = opt_value role_obj = super().from_dict(new_dict) setattr(role_obj, '_resource_options', resource_options) return role_obj __tablename__ = 'role' attributes = ['id', 'name', 'domain_id', 'description'] resource_options_registry = ro.ROLE_OPTIONS_REGISTRY id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), nullable=False) domain_id = sql.Column( sql.String(64), nullable=False, server_default=base.NULL_DOMAIN_ID ) description = sql.Column(sql.String(255), nullable=True) extra = sql.Column(sql.JsonBlob()) _resource_option_mapper = orm.relationship( 'RoleOption', single_parent=True, cascade='all,delete,delete-orphan', lazy='subquery', backref='role', collection_class=collections.attribute_mapped_collection('option_id'), ) __table_args__ = (sql.UniqueConstraint('name', 'domain_id'),) class ImpliedRoleTable(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'implied_role' attributes = ['prior_role_id', 'implied_role_id'] prior_role_id = sql.Column( sql.String(64), sql.ForeignKey('role.id', ondelete="CASCADE"), primary_key=True, ) implied_role_id = sql.Column( sql.String(64), sql.ForeignKey('role.id', ondelete="CASCADE"), primary_key=True, ) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes. overrides the `to_dict` function from the base class to avoid having an `extra` field. """ d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class RoleOption(sql.ModelBase): __tablename__ = 'role_option' role_id = sql.Column( sql.String(64), sql.ForeignKey('role.id', ondelete='CASCADE'), nullable=False, primary_key=True, ) option_id = sql.Column(sql.String(4), nullable=False, primary_key=True) option_value = sql.Column(sql.JsonBlob, nullable=True) def __init__(self, option_id, option_value): self.option_id = option_id self.option_value = option_value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/assignment/schema.py0000664000175000017500000000214600000000000021311 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.assignment.role_backends import resource_options as ro from keystone.common.validation import parameter_types # Schema for Identity v3 API _role_properties = { 'name': parameter_types.name, 'description': parameter_types.description, 'options': ro.ROLE_OPTIONS_REGISTRY.json_schema, } role_create = { 'type': 'object', 'properties': _role_properties, 'required': ['name'], 'additionalProperties': True, } role_update = { 'type': 'object', 'properties': _role_properties, 'minProperties': 1, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.498114 keystone-26.0.0/keystone/auth/0000775000175000017500000000000000000000000016265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/__init__.py0000664000175000017500000000131100000000000020372 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(notmorgan): Be careful in adjusting whitespace here, flake8 checks # get cranky. from keystone.auth import core # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/core.py0000664000175000017500000005616000000000000017577 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from functools import partial from oslo_log import log import stevedore from keystone.common import driver_hints from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.identity.backends import resource_options as ro LOG = log.getLogger(__name__) CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs # registry of authentication methods AUTH_METHODS = {} AUTH_PLUGINS_LOADED = False def _get_auth_driver_manager(namespace, plugin_name): return stevedore.DriverManager(namespace, plugin_name, invoke_on_load=True) def load_auth_method(method): plugin_name = CONF.auth.get(method) or 'default' namespace = 'keystone.auth.%s' % method driver_manager = _get_auth_driver_manager(namespace, plugin_name) return driver_manager.driver def load_auth_methods(): global AUTH_PLUGINS_LOADED if AUTH_PLUGINS_LOADED: # Only try and load methods a single time. return # config.setup_authentication should be idempotent, call it to ensure we # have setup all the appropriate configuration options we may need. keystone.conf.auth.setup_authentication() for plugin in set(CONF.auth.methods): AUTH_METHODS[plugin] = load_auth_method(plugin) AUTH_PLUGINS_LOADED = True def get_auth_method(method_name): global AUTH_METHODS if method_name not in AUTH_METHODS: raise exception.AuthMethodNotSupported() return AUTH_METHODS[method_name] class AuthContext(dict): """Retrofitting auth_context to reconcile identity attributes. The identity attributes must not have conflicting values among the auth plug-ins. The only exception is `expires_at`, which is set to its earliest value. """ # identity attributes need to be reconciled among the auth plugins IDENTITY_ATTRIBUTES = frozenset( ['user_id', 'project_id', 'access_token_id', 'domain_id', 'expires_at'] ) def __setitem__(self, key, val): """Override __setitem__ to prevent conflicting values.""" if key in self.IDENTITY_ATTRIBUTES and key in self: existing_val = self[key] if key == 'expires_at': # special treatment for 'expires_at', we are going to take # the earliest expiration instead. if existing_val != val: LOG.info( '"expires_at" has conflicting values ' '%(existing)s and %(new)s. Will use the ' 'earliest value.', {'existing': existing_val, 'new': val}, ) if existing_val is None or val is None: val = existing_val or val else: val = min(existing_val, val) elif existing_val != val: msg = _( 'Unable to reconcile identity attribute %(attribute)s ' 'as it has conflicting values %(new)s and %(old)s' ) % ({'attribute': key, 'new': val, 'old': existing_val}) raise exception.Unauthorized(msg) return super().__setitem__(key, val) def update(self, E=None, **F): """Override update to prevent conflicting values.""" # NOTE(notmorgan): This will not be nearly as performant as the # use of the built-in "update" method on the dict, however, the # volume of data being changed here is very minimal in most cases # and should not see a significant impact by iterating instead of # explicit setting of values. update_dicts = (E or {}, F or {}) for d in update_dicts: for key, val in d.items(): self[key] = val class AuthInfo(provider_api.ProviderAPIMixin): """Encapsulation of "auth" request.""" @staticmethod def create(auth=None, scope_only=False): auth_info = AuthInfo(auth=auth) auth_info._validate_and_normalize_auth_data(scope_only) return auth_info def __init__(self, auth=None): self.auth = auth self._scope_data = (None, None, None, None, None) # self._scope_data is # (domain_id, project_id, trust_ref, unscoped, system) # project scope: (None, project_id, None, None, None) # domain scope: (domain_id, None, None, None, None) # trust scope: (None, None, trust_ref, None, None) # unscoped: (None, None, None, 'unscoped', None) # system: (None, None, None, None, 'all') def _assert_project_is_enabled(self, project_ref): # ensure the project is enabled try: PROVIDERS.resource_api.assert_project_enabled( project_id=project_ref['id'], project=project_ref ) except AssertionError as e: LOG.warning(e) raise exception.Unauthorized from e def _assert_domain_is_enabled(self, domain_ref): try: PROVIDERS.resource_api.assert_domain_enabled( domain_id=domain_ref['id'], domain=domain_ref ) except AssertionError as e: LOG.warning(e) raise exception.Unauthorized from e def _lookup_domain(self, domain_info): domain_id = domain_info.get('id') domain_name = domain_info.get('name') try: if domain_name: if ( CONF.resource.domain_name_url_safe == 'strict' and utils.is_not_url_safe(domain_name) ): msg = 'Domain name cannot contain reserved characters.' tr_msg = _( 'Domain name cannot contain reserved characters.' ) LOG.warning(msg) raise exception.Unauthorized(message=tr_msg) domain_ref = PROVIDERS.resource_api.get_domain_by_name( domain_name ) else: domain_ref = PROVIDERS.resource_api.get_domain(domain_id) except exception.DomainNotFound as e: LOG.warning(e) raise exception.Unauthorized(e) self._assert_domain_is_enabled(domain_ref) return domain_ref def _lookup_project(self, project_info): project_id = project_info.get('id') project_name = project_info.get('name') try: if project_name: if ( CONF.resource.project_name_url_safe == 'strict' and utils.is_not_url_safe(project_name) ): msg = 'Project name cannot contain reserved characters.' tr_msg = _( 'Project name cannot contain reserved characters.' ) LOG.warning(msg) raise exception.Unauthorized(message=tr_msg) if 'domain' not in project_info: raise exception.ValidationError( attribute='domain', target='project' ) domain_ref = self._lookup_domain(project_info['domain']) project_ref = PROVIDERS.resource_api.get_project_by_name( project_name, domain_ref['id'] ) else: project_ref = PROVIDERS.resource_api.get_project(project_id) domain_id = project_ref['domain_id'] if not domain_id: raise exception.ProjectNotFound(project_id=project_id) # NOTE(morganfainberg): The _lookup_domain method will raise # exception.Unauthorized if the domain isn't found or is # disabled. self._lookup_domain({'id': domain_id}) except exception.ProjectNotFound as e: LOG.warning(e) raise exception.Unauthorized(e) self._assert_project_is_enabled(project_ref) return project_ref def _lookup_trust(self, trust_info): trust_id = trust_info.get('id') if not trust_id: raise exception.ValidationError( attribute='trust_id', target='trust' ) trust = PROVIDERS.trust_api.get_trust(trust_id) return trust def _lookup_app_cred(self, app_cred_info): app_cred_id = app_cred_info.get('id') if app_cred_id: get_app_cred = partial( PROVIDERS.application_credential_api.get_application_credential ) return get_app_cred(app_cred_id) name = app_cred_info.get('name') if not name: raise exception.ValidationError( attribute='name or ID', target='application credential' ) user = app_cred_info.get('user') if not user: raise exception.ValidationError( attribute='user', target='application credential' ) user_id = user.get('id') if not user_id: if 'domain' not in user: raise exception.ValidationError( attribute='domain', target='user' ) domain_ref = self._lookup_domain(user['domain']) user_id = PROVIDERS.identity_api.get_user_by_name( user['name'], domain_ref['id'] )['id'] hints = driver_hints.Hints() hints.add_filter('name', name) app_cred_api = PROVIDERS.application_credential_api app_creds = app_cred_api.list_application_credentials(user_id, hints) if len(app_creds) != 1: message = "Could not find application credential: %s" % name tr_message = _("Could not find application credential: %s") % name LOG.warning(message) raise exception.Unauthorized(tr_message) return app_creds[0] def _set_scope_from_app_cred(self, app_cred_info): app_cred_ref = self._lookup_app_cred(app_cred_info) self._scope_data = (None, app_cred_ref['project_id'], None, None, None) return def _validate_and_normalize_scope_data(self): """Validate and normalize scope data.""" if 'identity' in self.auth: if 'application_credential' in self.auth['identity']['methods']: # Application credentials can't choose their own scope if 'scope' in self.auth: detail = "Application credentials cannot request a scope." raise exception.ApplicationCredentialAuthError( detail=detail ) self._set_scope_from_app_cred( self.auth['identity']['application_credential'] ) return if 'scope' not in self.auth: return if ( sum( [ 'project' in self.auth['scope'], 'domain' in self.auth['scope'], 'unscoped' in self.auth['scope'], 'system' in self.auth['scope'], 'OS-TRUST:trust' in self.auth['scope'], ] ) != 1 ): msg = 'system, project, domain, OS-TRUST:trust or unscoped' raise exception.ValidationError(attribute=msg, target='scope') if 'system' in self.auth['scope']: self._scope_data = (None, None, None, None, 'all') return if 'unscoped' in self.auth['scope']: self._scope_data = (None, None, None, 'unscoped', None) return if 'project' in self.auth['scope']: project_ref = self._lookup_project(self.auth['scope']['project']) self._scope_data = (None, project_ref['id'], None, None, None) elif 'domain' in self.auth['scope']: domain_ref = self._lookup_domain(self.auth['scope']['domain']) self._scope_data = (domain_ref['id'], None, None, None, None) elif 'OS-TRUST:trust' in self.auth['scope']: trust_ref = self._lookup_trust( self.auth['scope']['OS-TRUST:trust'] ) # TODO(ayoung): when trusts support domains, fill in domain data if trust_ref.get('project_id') is not None: project_ref = self._lookup_project( {'id': trust_ref['project_id']} ) self._scope_data = ( None, project_ref['id'], trust_ref, None, None, ) else: self._scope_data = (None, None, trust_ref, None, None) def _validate_auth_methods(self): # make sure all the method data/payload are provided for method_name in self.get_method_names(): if method_name not in self.auth['identity']: raise exception.ValidationError( attribute=method_name, target='identity' ) # make sure auth method is supported for method_name in self.get_method_names(): if method_name not in AUTH_METHODS: raise exception.AuthMethodNotSupported() def _validate_and_normalize_auth_data(self, scope_only=False): """Make sure "auth" is valid. :param scope_only: If it is True, auth methods will not be validated but only the scope data. :type scope_only: boolean """ # make sure "auth" exist if not self.auth: raise exception.ValidationError( attribute='auth', target='request body' ) # NOTE(chioleong): Tokenless auth does not provide auth methods, # we only care about using this method to validate the scope # information. Therefore, validating the auth methods here is # insignificant and we can skip it when scope_only is set to # true. if scope_only is False: self._validate_auth_methods() self._validate_and_normalize_scope_data() def get_method_names(self): """Return the identity method names. :returns: list of auth method names """ # Sanitizes methods received in request's body # Filters out duplicates, while keeping elements' order. method_names = [] for method in self.auth['identity']['methods']: if method not in method_names: method_names.append(method) return method_names def get_method_data(self, method): """Get the auth method payload. :returns: auth method payload """ if method not in self.auth['identity']['methods']: raise exception.ValidationError( attribute=method, target='identity' ) return self.auth['identity'][method] def get_scope(self): """Get scope information. Verify and return the scoping information. :returns: (domain_id, project_id, trust_ref, unscoped, system). If scope to a project, (None, project_id, None, None, None) will be returned. If scoped to a domain, (domain_id, None, None, None, None) will be returned. If scoped to a trust, (None, project_id, trust_ref, None, None), Will be returned, where the project_id comes from the trust definition. If unscoped, (None, None, None, 'unscoped', None) will be returned. If system_scoped, (None, None, None, None, 'all') will be returned. """ return self._scope_data def set_scope( self, domain_id=None, project_id=None, trust=None, unscoped=None, system=None, ): """Set scope information.""" if domain_id and project_id: msg = _('Scoping to both domain and project is not allowed') raise ValueError(msg) if domain_id and trust: msg = _('Scoping to both domain and trust is not allowed') raise ValueError(msg) if project_id and trust: msg = _('Scoping to both project and trust is not allowed') raise ValueError(msg) if system and project_id: msg = _('Scoping to both project and system is not allowed') raise ValueError(msg) if system and domain_id: msg = _('Scoping to both domain and system is not allowed') raise ValueError(msg) self._scope_data = (domain_id, project_id, trust, unscoped, system) class UserMFARulesValidator(provider_api.ProviderAPIMixin): """Helper object that can validate the MFA Rules.""" @classmethod def _auth_methods(cls): if AUTH_PLUGINS_LOADED: return set(AUTH_METHODS.keys()) raise RuntimeError(_('Auth Method Plugins are not loaded.')) @classmethod def check_auth_methods_against_rules(cls, user_id, auth_methods): """Validate the MFA rules against the successful auth methods. :param user_id: The user's ID (uuid). :type user_id: str :param auth_methods: List of methods that were used for auth :type auth_methods: set :returns: Boolean, ``True`` means rules match and auth may proceed, ``False`` means rules do not match. """ user_ref = PROVIDERS.identity_api.get_user(user_id) mfa_rules = user_ref['options'].get(ro.MFA_RULES_OPT.option_name, []) mfa_rules_enabled = user_ref['options'].get( ro.MFA_ENABLED_OPT.option_name, True ) rules = cls._parse_rule_structure(mfa_rules, user_ref['id']) if not rules or not mfa_rules_enabled: # return quickly if the rules are disabled for the user or not set LOG.debug( 'MFA Rules not processed for user `%(user_id)s`. ' 'Rule list: `%(rules)s` (Enabled: `%(enabled)s`).', { 'user_id': user_id, 'rules': mfa_rules, 'enabled': mfa_rules_enabled, }, ) return True for r in rules: # NOTE(notmorgan): We only check against the actually loaded # auth methods meaning that the keystone administrator may # disable an auth method, and a rule will still pass making it # impossible to accidently lock-out a subset of users with a # bad keystone.conf r_set = set(r).intersection(cls._auth_methods()) if set(auth_methods).issuperset(r_set): # Rule Matches no need to continue, return here. LOG.debug( 'Auth methods for user `%(user_id)s`, `%(methods)s` ' 'matched MFA rule `%(rule)s`. Loaded ' 'auth_methods: `%(loaded)s`', { 'user_id': user_id, 'rule': list(r_set), 'methods': auth_methods, 'loaded': cls._auth_methods(), }, ) return True LOG.debug( 'Auth methods for user `%(user_id)s`, `%(methods)s` did not ' 'match a MFA rule in `%(rules)s`.', {'user_id': user_id, 'methods': auth_methods, 'rules': rules}, ) return False @staticmethod def _parse_rule_structure(rules, user_id): """Validate and parse the rule data structure. Rule sets must be in the form of list of lists. The lists may not have duplicates and must not be empty. The top-level list may be empty indicating that no rules exist. :param rules: The list of rules from the user_ref :type rules: list :param user_id: the user_id, used for logging purposes :type user_id: str :returns: list of list, duplicates are stripped """ # NOTE(notmorgan): Most of this is done at the API request validation # and in the storage layer, it makes sense to also validate here and # ensure the data returned from the DB is sane, This will not raise # any exceptions, but just produce a usable set of data for rules # processing. rule_set = [] if not isinstance(rules, list): LOG.error( 'Corrupt rule data structure for user %(user_id)s, ' 'no rules loaded.', {'user_id': user_id}, ) # Corrupt Data means no rules. Auth success > MFA rules in this # case. return rule_set elif not rules: # Exit early, nothing to do here. return rule_set for r_list in rules: if not isinstance(r_list, list): # Rule was not a list, it is invalid, drop the rule from # being considered. LOG.info( 'Ignoring Rule %(type)r; rule must be a list of ' 'strings.', {'type': type(r_list)}, ) continue if r_list: # No empty rules are allowed. _ok_rule = True for item in r_list: if not isinstance(item, str): # Rules may only contain strings for method names # Reject a rule with non-string values LOG.info( 'Ignoring Rule %(rule)r; rule contains ' 'non-string values.', {'rule': r_list}, ) # Rule is known to be bad, drop it from consideration. _ok_rule = False break # NOTE(notmorgan): No FOR/ELSE used here! Though it could be # done and avoid the use of _ok_rule. This is a note for # future developers to avoid using for/else and as an example # of how to implement it that is readable and maintainable. if _ok_rule: # Unique the r_list and cast back to a list and then append # as we know the rule is ok (matches our requirements). # This is outside the for loop, as the for loop is # only used to validate the elements in the list. The # This de-dupe should never be needed, but we are being # extra careful at all levels of validation for the MFA # rules. r_list = list(set(r_list)) rule_set.append(r_list) return rule_set ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.502114 keystone-26.0.0/keystone/auth/plugins/0000775000175000017500000000000000000000000017746 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/__init__.py0000664000175000017500000000115300000000000022057 0ustar00zuulzuul00000000000000# Copyright 2015 CERN # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth.plugins.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/application_credential.py0000664000175000017500000000301600000000000025015 0ustar00zuulzuul00000000000000# Copyright 2018 SUSE Linux GmbH # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth import plugins as auth_plugins from keystone.auth.plugins import base from keystone.common import provider_api from keystone import exception PROVIDERS = provider_api.ProviderAPIs METHOD_NAME = 'application_credential' class ApplicationCredential(base.AuthMethodHandler): def authenticate(self, auth_payload): """Authenticate an application.""" response_data = {} app_cred_info = auth_plugins.AppCredInfo.create( auth_payload, METHOD_NAME ) try: PROVIDERS.application_credential_api.authenticate( application_credential_id=app_cred_info.id, secret=app_cred_info.secret, ) except AssertionError as e: raise exception.Unauthorized(e) response_data['user_id'] = app_cred_info.user_id return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/base.py0000664000175000017500000000656400000000000021245 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections from keystone.common import provider_api from keystone import exception AuthHandlerResponse = collections.namedtuple( 'AuthHandlerResponse', 'status, response_body, response_data' ) class AuthMethodHandler(provider_api.ProviderAPIMixin, metaclass=abc.ABCMeta): """Abstract base class for an authentication plugin.""" def __init__(self): pass @abc.abstractmethod def authenticate(self, auth_payload): """Authenticate user and return an authentication context. :param auth_payload: the payload content of the authentication request for a given method :type auth_payload: dict If successful, plugin must set ``user_id`` in ``response_data``. ``method_name`` is used to convey any additional authentication methods in case authentication is for re-scoping. For example, if the authentication is for re-scoping, plugin must append the previous method names into ``method_names``; NOTE: This behavior is exclusive to the re-scope type action. Here's an example of ``response_data`` on successful authentication:: { "methods": [ "password", "token" ], "user_id": "abc123" } Plugins are invoked in the order in which they are specified in the ``methods`` attribute of the ``identity`` object. For example, ``custom-plugin`` is invoked before ``password``, which is invoked before ``token`` in the following authentication request:: { "auth": { "identity": { "custom-plugin": { "custom-data": "sdfdfsfsfsdfsf" }, "methods": [ "custom-plugin", "password", "token" ], "password": { "user": { "id": "s23sfad1", "password": "secret" } }, "token": { "id": "sdfafasdfsfasfasdfds" } } } } :returns: AuthHandlerResponse with status set to ``True`` if auth was successful. If `status` is ``False`` and this is a multi-step auth, the ``response_body`` can be in a form of a dict for the next step in authentication. :raises keystone.exception.Unauthorized: for authentication failure """ raise exception.Unauthorized() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/core.py0000664000175000017500000002257200000000000021260 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from pycadf import cadftaxonomy as taxonomy from pycadf import reason from pycadf import resource from keystone.common import driver_hints from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import notifications CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs _NOTIFY_OP = 'authenticate' _NOTIFY_EVENT = '{service}.{event}'.format( service=notifications.SERVICE, event=_NOTIFY_OP ) def construct_method_map_from_config(): """Determine authentication method types for deployment. :returns: a dictionary containing the methods and their indexes """ method_map = dict() method_index = 1 for method in CONF.auth.methods: method_map[method_index] = method method_index = method_index * 2 return method_map def convert_method_list_to_integer(methods): """Convert the method type(s) to an integer. :param methods: a list of method names :returns: an integer representing the methods """ method_map = construct_method_map_from_config() method_ints = [] for method in methods: for k, v in method_map.items(): if v == method: method_ints.append(k) return sum(method_ints) def convert_integer_to_method_list(method_int): """Convert an integer to a list of methods. :param method_int: an integer representing methods :returns: a corresponding list of methods """ # If the method_int is 0 then no methods were used so return an empty # method list if method_int == 0: return [] method_map = construct_method_map_from_config() method_ints = sorted(method_map, reverse=True) methods = [] for m_int in method_ints: # (lbragstad): By dividing the method_int by each key in the # method_map, we know if the division results in an integer of 1, that # key was used in the construction of the total sum of the method_int. # In that case, we should confirm the key value and store it so we can # look it up later. Then we should take the remainder of what is # confirmed and the method_int and continue the process. In the end, we # should have a list of integers that correspond to indexes in our # method_map and we can reinflate the methods that the original # method_int represents. result = int(method_int / m_int) if result == 1: methods.append(method_map[m_int]) method_int = method_int - m_int return methods class BaseUserInfo(provider_api.ProviderAPIMixin): @classmethod def create(cls, auth_payload, method_name): user_auth_info = cls() user_auth_info._validate_and_normalize_auth_data(auth_payload) user_auth_info.METHOD_NAME = method_name return user_auth_info def __init__(self): self.user_id = None self.user_ref = None self.METHOD_NAME = None def _assert_domain_is_enabled(self, domain_ref): try: PROVIDERS.resource_api.assert_domain_enabled( domain_id=domain_ref['id'], domain=domain_ref ) except AssertionError as e: LOG.warning(e) raise exception.Unauthorized from e def _assert_user_is_enabled(self, user_ref): try: PROVIDERS.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref ) except AssertionError as e: LOG.warning(e) raise exception.Unauthorized from e def _lookup_domain(self, domain_info): domain_id = domain_info.get('id') domain_name = domain_info.get('name') if not domain_id and not domain_name: raise exception.ValidationError( attribute='id or name', target='domain' ) try: if domain_name: domain_ref = PROVIDERS.resource_api.get_domain_by_name( domain_name ) else: domain_ref = PROVIDERS.resource_api.get_domain(domain_id) except exception.DomainNotFound as e: LOG.warning(e) raise exception.Unauthorized(e) self._assert_domain_is_enabled(domain_ref) return domain_ref def _validate_and_normalize_auth_data(self, auth_payload): if 'user' not in auth_payload: raise exception.ValidationError( attribute='user', target=self.METHOD_NAME ) user_info = auth_payload['user'] user_id = user_info.get('id') user_name = user_info.get('name') domain_ref = {} if not user_id and not user_name: raise exception.ValidationError( attribute='id or name', target='user' ) try: if user_name: if 'domain' not in user_info: raise exception.ValidationError( attribute='domain', target='user' ) domain_ref = self._lookup_domain(user_info['domain']) user_ref = PROVIDERS.identity_api.get_user_by_name( user_name, domain_ref['id'] ) else: user_ref = PROVIDERS.identity_api.get_user(user_id) domain_ref = PROVIDERS.resource_api.get_domain( user_ref['domain_id'] ) self._assert_domain_is_enabled(domain_ref) except exception.UserNotFound as e: LOG.warning(e) # We need to special case USER NOT FOUND here for CADF # notifications as the normal path for notification(s) come from # `identity_api.authenticate` and we are a bit before dropping into # that method. audit_reason = reason.Reason(str(e), str(e.code)) audit_initiator = notifications.build_audit_initiator() # build an appropriate audit initiator with relevant information # for the failed request. This will catch invalid user_name and # invalid user_id. if user_name: audit_initiator.user_name = user_name else: audit_initiator.user_id = user_id audit_initiator.domain_id = domain_ref.get('id') audit_initiator.domain_name = domain_ref.get('name') notifications._send_audit_notification( action=_NOTIFY_OP, initiator=audit_initiator, outcome=taxonomy.OUTCOME_FAILURE, target=resource.Resource(typeURI=taxonomy.ACCOUNT_USER), event_type=_NOTIFY_EVENT, reason=audit_reason, ) raise exception.Unauthorized(e) self._assert_user_is_enabled(user_ref) self.user_ref = user_ref self.user_id = user_ref['id'] self.domain_id = domain_ref['id'] class UserAuthInfo(BaseUserInfo): def __init__(self): super().__init__() self.password = None def _validate_and_normalize_auth_data(self, auth_payload): super()._validate_and_normalize_auth_data(auth_payload) user_info = auth_payload['user'] self.password = user_info.get('password') class TOTPUserInfo(BaseUserInfo): def __init__(self): super().__init__() self.passcode = None def _validate_and_normalize_auth_data(self, auth_payload): super()._validate_and_normalize_auth_data(auth_payload) user_info = auth_payload['user'] self.passcode = user_info.get('passcode') class AppCredInfo(BaseUserInfo): def __init__(self): super().__init__() self.id = None self.secret = None def _validate_and_normalize_auth_data(self, auth_payload): app_cred_api = PROVIDERS.application_credential_api if auth_payload.get('id'): app_cred = app_cred_api.get_application_credential( auth_payload['id'] ) self.user_id = app_cred['user_id'] if not auth_payload.get('user'): auth_payload['user'] = {} auth_payload['user']['id'] = self.user_id super()._validate_and_normalize_auth_data(auth_payload) elif auth_payload.get('name'): super()._validate_and_normalize_auth_data(auth_payload) hints = driver_hints.Hints() hints.add_filter('name', auth_payload['name']) app_cred = app_cred_api.list_application_credentials( self.user_id, hints )[0] auth_payload['id'] = app_cred['id'] else: raise exception.ValidationError( attribute='id or name', target='application credential' ) self.id = auth_payload['id'] self.secret = auth_payload.get('secret') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/external.py0000664000175000017500000000604100000000000022143 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone External Authentication Plugins.""" import abc import flask from keystone.auth.plugins import base from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class Base(base.AuthMethodHandler, metaclass=abc.ABCMeta): def authenticate(self, auth_payload): """Use REMOTE_USER to look up the user in the identity backend. The user_id from the actual user from the REMOTE_USER env variable is placed in the response_data. """ response_data = {} if not flask.request.remote_user: msg = _('No authenticated user') raise exception.Unauthorized(msg) try: user_ref = self._authenticate() except Exception: msg = _('Unable to lookup user %s') % flask.request.remote_user raise exception.Unauthorized(msg) response_data['user_id'] = user_ref['id'] return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) @abc.abstractmethod def _authenticate(self): """Look up the user in the identity backend. Return user_ref """ pass class DefaultDomain(Base): def _authenticate(self): """Use remote_user to look up the user in the identity backend.""" return PROVIDERS.identity_api.get_user_by_name( flask.request.remote_user, CONF.identity.default_domain_id ) class Domain(Base): def _authenticate(self): """Use remote_user to look up the user in the identity backend. The domain will be extracted from the REMOTE_DOMAIN environment variable if present. If not, the default domain will be used. """ remote_domain = flask.request.environ.get('REMOTE_DOMAIN') if remote_domain: ref = PROVIDERS.resource_api.get_domain_by_name(remote_domain) domain_id = ref['id'] else: domain_id = CONF.identity.default_domain_id return PROVIDERS.identity_api.get_user_by_name( flask.request.remote_user, domain_id ) class KerberosDomain(Domain): """Allows `kerberos` as a method.""" def _authenticate(self): if flask.request.environ.get('AUTH_TYPE') != 'Negotiate': raise exception.Unauthorized(_("auth_type is not Negotiate")) return super()._authenticate() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/mapped.py0000664000175000017500000004011500000000000021567 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from urllib import parse import uuid import flask from oslo_log import log from pycadf import cadftaxonomy as taxonomy from keystone.auth import plugins as auth_plugins from keystone.auth.plugins import base from keystone.common import provider_api from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils from keystone.i18n import _ from keystone import notifications LOG = log.getLogger(__name__) METHOD_NAME = 'mapped' PROVIDERS = provider_api.ProviderAPIs class Mapped(base.AuthMethodHandler): def _get_token_ref(self, auth_payload): token_id = auth_payload['id'] return PROVIDERS.token_provider_api.validate_token(token_id) def authenticate(self, auth_payload): """Authenticate mapped user and set an authentication context. :param auth_payload: the content of the authentication for a given method In addition to ``user_id`` in ``response_data``, this plugin sets ``group_ids``, ``OS-FEDERATION:identity_provider`` and ``OS-FEDERATION:protocol`` """ if 'id' in auth_payload: token_ref = self._get_token_ref(auth_payload) response_data = handle_scoped_token( token_ref, PROVIDERS.federation_api, PROVIDERS.identity_api ) else: response_data = handle_unscoped_token( auth_payload, PROVIDERS.resource_api, PROVIDERS.federation_api, PROVIDERS.identity_api, PROVIDERS.assignment_api, PROVIDERS.role_api, ) return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) def handle_scoped_token(token, federation_api, identity_api): response_data = {} utils.validate_expiration(token) token_audit_id = token.audit_id identity_provider = token.identity_provider_id protocol = token.protocol_id user_id = token.user_id group_ids = [] for group_dict in token.federated_groups: group_ids.append(group_dict['id']) send_notification = functools.partial( notifications.send_saml_audit_notification, 'authenticate', user_id, group_ids, identity_provider, protocol, token_audit_id, ) utils.assert_enabled_identity_provider(federation_api, identity_provider) try: mapping = federation_api.get_mapping_from_idp_and_protocol( identity_provider, protocol ) utils.validate_mapped_group_ids(group_ids, mapping['id'], identity_api) except Exception: # NOTE(topol): Diaper defense to catch any exception, so we can # send off failed authentication notification, raise the exception # after sending the notification send_notification(taxonomy.OUTCOME_FAILURE) raise else: send_notification(taxonomy.OUTCOME_SUCCESS) response_data['user_id'] = user_id response_data['group_ids'] = group_ids response_data[federation_constants.IDENTITY_PROVIDER] = identity_provider response_data[federation_constants.PROTOCOL] = protocol return response_data def configure_project_domain(shadow_project, idp_domain_id, resource_api): """Configure federated projects domain. We set the domain to be the default (idp_domain_id) if the project from the attribute mapping comes without a domain. """ LOG.debug('Processing domain for project: %s', shadow_project) domain = shadow_project.get('domain', {"id": idp_domain_id}) if 'id' not in domain: db_domain = resource_api.get_domain_by_name(domain['name']) domain = {"id": db_domain.get('id')} shadow_project['domain'] = domain LOG.debug( 'Project [%s] domain ID was resolved to [%s]', shadow_project['name'], shadow_project['domain']['id'], ) def handle_projects_from_mapping( shadow_projects, idp_domain_id, existing_roles, user, assignment_api, resource_api, ): for shadow_project in shadow_projects: configure_project_domain(shadow_project, idp_domain_id, resource_api) try: # Check and see if the project already exists and if it # does not, try to create it. project = resource_api.get_project_by_name( shadow_project['name'], shadow_project['domain']['id'] ) except exception.ProjectNotFound: LOG.info( 'Project %(project_name)s does not exist. It will be ' 'automatically provisioning for user %(user_id)s.', { 'project_name': shadow_project['name'], 'user_id': user['id'], }, ) project_ref = { 'id': uuid.uuid4().hex, 'name': shadow_project['name'], 'domain_id': shadow_project['domain']['id'], } project = resource_api.create_project( project_ref['id'], project_ref ) shadow_roles = shadow_project['roles'] for shadow_role in shadow_roles: assignment_api.create_grant( existing_roles[shadow_role['name']]['id'], user_id=user['id'], project_id=project['id'], ) def handle_unscoped_token( auth_payload, resource_api, federation_api, identity_api, assignment_api, role_api, ): def validate_shadow_mapping( shadow_projects, existing_roles, user_domain_id, idp_id ): # Validate that the roles in the shadow mapping actually exist. If # they don't we should bail early before creating anything. for shadow_project in shadow_projects: for shadow_role in shadow_project['roles']: # The role in the project mapping must exist in order for it to # be useful. if shadow_role['name'] not in existing_roles: LOG.error( 'Role %s was specified in the mapping but does ' 'not exist. All roles specified in a mapping must ' 'exist before assignment.', shadow_role['name'], ) # NOTE(lbragstad): The RoleNotFound exception usually # expects a role_id as the parameter, but in this case we # only have a name so we'll pass that instead. raise exception.RoleNotFound(shadow_role['name']) role = existing_roles[shadow_role['name']] if ( role['domain_id'] is not None and role['domain_id'] != user_domain_id ): LOG.error( 'Role %(role)s is a domain-specific role and ' 'cannot be assigned within %(domain)s.', { 'role': shadow_role['name'], 'domain': user_domain_id, }, ) raise exception.DomainSpecificRoleNotWithinIdPDomain( role_name=shadow_role['name'], identity_provider=idp_id ) def is_ephemeral_user(mapped_properties): return mapped_properties['user']['type'] == utils.UserType.EPHEMERAL def build_ephemeral_user_context( user, mapped_properties, identity_provider, protocol ): resp = {} resp['user_id'] = user['id'] resp['group_ids'] = mapped_properties['group_ids'] resp[federation_constants.IDENTITY_PROVIDER] = identity_provider resp[federation_constants.PROTOCOL] = protocol return resp def build_local_user_context(mapped_properties): resp = {} user_info = auth_plugins.UserAuthInfo.create( mapped_properties, METHOD_NAME ) resp['user_id'] = user_info.user_id return resp assertion = extract_assertion_data() try: identity_provider = auth_payload['identity_provider'] except KeyError: raise exception.ValidationError( attribute='identity_provider', target='mapped' ) try: protocol = auth_payload['protocol'] except KeyError: raise exception.ValidationError(attribute='protocol', target='mapped') utils.assert_enabled_identity_provider(federation_api, identity_provider) group_ids = None # NOTE(topol): The user is coming in from an IdP with a SAML assertion # instead of from a token, so we set token_id to None token_id = None # NOTE(marek-denis): This variable is set to None and there is a # possibility that it will be used in the CADF notification. This means # operation will not be mapped to any user (even ephemeral). user_id = None try: try: mapped_properties, mapping_id = apply_mapping_filter( identity_provider, protocol, assertion, resource_api, federation_api, identity_api, ) except exception.ValidationError as e: # if mapping is either invalid or yield no valid identity, # it is considered a failed authentication raise exception.Unauthorized(e) if is_ephemeral_user(mapped_properties): idp_domain_id = federation_api.get_idp(identity_provider)[ 'domain_id' ] validate_and_prepare_federated_user( mapped_properties, idp_domain_id, resource_api ) user = identity_api.shadow_federated_user( identity_provider, protocol, mapped_properties['user'], group_ids=mapped_properties['group_ids'], ) if 'projects' in mapped_properties: existing_roles = { role['name']: role for role in role_api.list_roles() } # NOTE(lbragstad): If we are dealing with a shadow mapping, # then we need to make sure we validate all pieces of the # mapping and what it's saying to create. If there is something # wrong with how the mapping is, we should bail early before we # create anything. validate_shadow_mapping( mapped_properties['projects'], existing_roles, mapped_properties['user']['domain']['id'], identity_provider, ) handle_projects_from_mapping( mapped_properties['projects'], idp_domain_id, existing_roles, user, assignment_api, resource_api, ) user_id = user['id'] group_ids = mapped_properties['group_ids'] response_data = build_ephemeral_user_context( user, mapped_properties, identity_provider, protocol ) else: response_data = build_local_user_context(mapped_properties) except Exception: # NOTE(topol): Diaper defense to catch any exception, so we can # send off failed authentication notification, raise the exception # after sending the notification outcome = taxonomy.OUTCOME_FAILURE notifications.send_saml_audit_notification( 'authenticate', user_id, group_ids, identity_provider, protocol, token_id, outcome, ) raise else: outcome = taxonomy.OUTCOME_SUCCESS notifications.send_saml_audit_notification( 'authenticate', user_id, group_ids, identity_provider, protocol, token_id, outcome, ) return response_data def extract_assertion_data(): assertion = dict(utils.get_assertion_params_from_env()) return assertion def apply_mapping_filter( identity_provider, protocol, assertion, resource_api, federation_api, identity_api, ): idp = federation_api.get_idp(identity_provider) utils.validate_idp(idp, protocol, assertion) mapped_properties, mapping_id = federation_api.evaluate( identity_provider, protocol, assertion ) # NOTE(marek-denis): We update group_ids only here to avoid fetching # groups identified by name/domain twice. # NOTE(marek-denis): Groups are translated from name/domain to their # corresponding ids in the auth plugin, as we need information what # ``mapping_id`` was used as well as idenity_api and resource_api # objects. group_ids = mapped_properties['group_ids'] utils.validate_mapped_group_ids(group_ids, mapping_id, identity_api) group_ids.extend( utils.transform_to_group_ids( mapped_properties['group_names'], mapping_id, identity_api, resource_api, ) ) mapped_properties['group_ids'] = list(set(group_ids)) return mapped_properties, mapping_id def validate_and_prepare_federated_user( mapped_properties, idp_domain_id, resource_api ): """Setup federated username. Function covers all the cases for properly setting user id, a primary identifier for identity objects. Initial version of the mapping engine assumed user is identified by ``name`` and his ``id`` is built from the name. We, however need to be able to accept local rules that identify user by either id or name/domain. The following use-cases are covered: 1) If neither user_name nor user_id is set raise exception.Unauthorized 2) If user_id is set and user_name not, set user_name equal to user_id 3) If user_id is not set and user_name is, set user_id as url safe version of user_name. Furthermore, we set the IdP as the user domain, if the user definition does not come with a domain definition. :param mapped_properties: Properties issued by a RuleProcessor. :type: dictionary :param idp_domain_id: The domain ID of the IdP registered in OpenStack. :type: string :param resource_api: The resource API used to access the database layer. :type: object :raises keystone.exception.Unauthorized: If neither `user_name` nor `user_id` is set. :returns: tuple with user identification :rtype: tuple """ user = mapped_properties['user'] user_id = user.get('id') user_name = user.get('name') or flask.request.remote_user if not any([user_id, user_name]): msg = _( "Could not map user while setting ephemeral user identity. " "Either mapping rules must specify user id/name or " "REMOTE_USER environment variable must be set." ) raise exception.Unauthorized(msg) elif not user_name: user['name'] = user_id elif not user_id: user_id = user_name if user_name: user['name'] = user_name user['id'] = parse.quote(user_id) LOG.debug('Processing domain for federated user: %s', user) domain = user.get('domain', {"id": idp_domain_id}) if 'id' not in domain: db_domain = resource_api.get_domain_by_name(domain['name']) domain = {"id": db_domain.get('id')} user['domain'] = domain LOG.debug( 'User [%s] domain ID was resolved to [%s]', user['name'], user['domain']['id'], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/oauth1.py0000664000175000017500000000524100000000000021523 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import flask from oslo_utils import timeutils from keystone.auth.plugins import base from keystone.common import provider_api from keystone import exception from keystone.i18n import _ from keystone.oauth1 import core as oauth from keystone.oauth1 import validator from keystone.server import flask as ks_flask PROVIDERS = provider_api.ProviderAPIs class OAuth(base.AuthMethodHandler): def authenticate(self, auth_payload): """Turn a signed request with an access key into a keystone token.""" response_data = {} oauth_headers = oauth.get_oauth_headers(flask.request.headers) access_token_id = oauth_headers.get('oauth_token') if not access_token_id: raise exception.ValidationError( attribute='oauth_token', target='request' ) acc_token = PROVIDERS.oauth_api.get_access_token(access_token_id) expires_at = acc_token['expires_at'] if expires_at: now = timeutils.utcnow() expires = timeutils.normalize_time( timeutils.parse_isotime(expires_at) ) if now > expires: raise exception.Unauthorized(_('Access token is expired')) url = ks_flask.base_url(path=flask.request.path) access_verifier = oauth.ResourceEndpoint( request_validator=validator.OAuthValidator(), token_generator=oauth.token_generator, ) result, request = access_verifier.validate_protected_resource_request( url, http_method='POST', body=flask.request.args, headers=dict(flask.request.headers), realms=None, ) if not result: msg = _('Could not validate the access token') raise exception.Unauthorized(msg) response_data['user_id'] = acc_token['authorizing_user_id'] response_data['access_token_id'] = access_token_id response_data['project_id'] = acc_token['project_id'] return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/password.py0000664000175000017500000000313300000000000022162 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth import plugins as auth_plugins from keystone.auth.plugins import base from keystone.common import provider_api from keystone import exception from keystone.i18n import _ METHOD_NAME = 'password' PROVIDERS = provider_api.ProviderAPIs class Password(base.AuthMethodHandler): def authenticate(self, auth_payload): """Try to authenticate against the identity backend.""" response_data = {} user_info = auth_plugins.UserAuthInfo.create(auth_payload, METHOD_NAME) try: PROVIDERS.identity_api.authenticate( user_id=user_info.user_id, password=user_info.password ) except AssertionError: # authentication failed because of invalid username or password msg = _('Invalid username or password') raise exception.Unauthorized(msg) response_data['user_id'] = user_info.user_id return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/token.py0000664000175000017500000001165600000000000021451 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import flask from oslo_log import log from keystone.auth.plugins import base from keystone.auth.plugins import mapped from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.i18n import _ LOG = log.getLogger(__name__) CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class Token(base.AuthMethodHandler): def _get_token_ref(self, auth_payload): token_id = auth_payload['id'] return PROVIDERS.token_provider_api.validate_token(token_id) def authenticate(self, auth_payload): if 'id' not in auth_payload: raise exception.ValidationError(attribute='id', target='token') token = self._get_token_ref(auth_payload) if token.is_federated and PROVIDERS.federation_api: response_data = mapped.handle_scoped_token( token, PROVIDERS.federation_api, PROVIDERS.identity_api ) else: response_data = token_authenticate(token) # NOTE(notmorgan): The Token auth method is *very* special and sets the # previous values to the method_names. This is because it can be used # for re-scoping and we want to maintain the values. Most # AuthMethodHandlers do no such thing and this is not required. response_data.setdefault('method_names', []).extend(token.methods) return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) def token_authenticate(token): response_data = {} try: # Do not allow tokens used for delegation to # create another token, or perform any changes of # state in Keystone. To do so is to invite elevation of # privilege attacks json_body = flask.request.get_json(silent=True, force=True) or {} project_scoped = 'project' in json_body['auth'].get('scope', {}) domain_scoped = 'domain' in json_body['auth'].get('scope', {}) if token.oauth_scoped: raise exception.ForbiddenAction( action=_( 'Using OAuth-scoped token to create another token. ' 'Create a new OAuth-scoped token instead' ) ) elif token.trust_scoped: raise exception.ForbiddenAction( action=_( 'Using trust-scoped token to create another token. ' 'Create a new trust-scoped token instead' ) ) elif token.system_scoped and (project_scoped or domain_scoped): raise exception.ForbiddenAction( action=_( 'Using a system-scoped token to create a project-scoped ' 'or domain-scoped token is not allowed.' ) ) if not CONF.token.allow_rescope_scoped_token: # Do not allow conversion from scoped tokens. if token.project_scoped or token.domain_scoped: raise exception.ForbiddenAction( action=_('rescope a scoped token') ) # New tokens maintain the audit_id of the original token in the # chain (if possible) as the second element in the audit data # structure. Look for the last element in the audit data structure # which will be either the audit_id of the token (in the case of # a token that has not been rescoped) or the audit_chain id (in # the case of a token that has been rescoped). try: token_audit_id = token.parent_audit_id or token.audit_id except IndexError: # NOTE(morganfainberg): In the case this is a token that was # issued prior to audit id existing, the chain is not tracked. token_audit_id = None # To prevent users from never having to re-authenticate, the original # token expiration time is maintained in the new token. Not doing this # would make it possible for a user to continuously bump token # expiration through token rescoping without proving their identity. response_data.setdefault('expires_at', token.expires_at) response_data['audit_id'] = token_audit_id response_data.setdefault('user_id', token.user_id) return response_data except AssertionError as e: LOG.error(e) raise exception.Unauthorized(e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/plugins/totp.py0000664000175000017500000001130000000000000021301 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Time-based One-time Password Algorithm (TOTP) auth plugin. TOTP is an algorithm that computes a one-time password from a shared secret key and the current time. TOTP is an implementation of a hash-based message authentication code (HMAC). It combines a secret key with the current timestamp using a cryptographic hash function to generate a one-time password. The timestamp typically increases in 30-second intervals, so passwords generated close together in time from the same secret key will be equal. """ import base64 from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.twofactor import totp as crypto_totp from oslo_log import log from oslo_utils import timeutils from keystone.auth import plugins from keystone.auth.plugins import base from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF METHOD_NAME = 'totp' LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs PASSCODE_LENGTH = 6 PASSCODE_TIME_PERIOD = 30 def _generate_totp_passcodes(secret, included_previous_windows=0): """Generate TOTP passcode. :param bytes secret: A base32 encoded secret for the TOTP authentication :returns: totp passcode as bytes """ if isinstance(secret, str): # NOTE(dstanek): since this may be coming from the JSON stored in the # database it may be UTF-8 encoded secret = secret.encode('utf-8') # NOTE(nonameentername): cryptography takes a non base32 encoded value for # TOTP. Add the correct padding to be able to base32 decode while len(secret) % 8 != 0: secret = secret + b'=' decoded = base64.b32decode(secret) # NOTE(lhinds) This is marked as #nosec since bandit will see SHA1 # which is marked as insecure. In this instance however, keystone uses # HMAC-SHA1 when generating the TOTP, which is currently not insecure but # will still trigger when scanned by bandit. totp = crypto_totp.TOTP( decoded, PASSCODE_LENGTH, hashes.SHA1(), # nosec PASSCODE_TIME_PERIOD, backend=default_backend(), ) passcode_ts = timeutils.utcnow_ts(microsecond=True) passcodes = [totp.generate(passcode_ts).decode('utf-8')] for i in range(included_previous_windows): # NOTE(adriant): we move back the timestamp the number of seconds in # PASSCODE_TIME_PERIOD each time. passcode_ts -= PASSCODE_TIME_PERIOD passcodes.append(totp.generate(passcode_ts).decode('utf-8')) return passcodes class TOTP(base.AuthMethodHandler): def authenticate(self, auth_payload): """Try to authenticate using TOTP.""" response_data = {} user_info = plugins.TOTPUserInfo.create(auth_payload, METHOD_NAME) auth_passcode = auth_payload.get('user').get('passcode') credentials = PROVIDERS.credential_api.list_credentials_for_user( user_info.user_id, type='totp' ) valid_passcode = False for credential in credentials: try: generated_passcodes = _generate_totp_passcodes( credential['blob'], CONF.totp.included_previous_windows ) if auth_passcode in generated_passcodes: valid_passcode = True break except (ValueError, KeyError): LOG.debug( 'No TOTP match; credential id: %s, user_id: %s', credential['id'], user_info.user_id, ) except TypeError: LOG.debug( 'Base32 decode failed for TOTP credential %s', credential['id'], ) if not valid_passcode: # authentication failed because of invalid username or passcode msg = _('Invalid username or TOTP passcode') raise exception.Unauthorized(msg) response_data['user_id'] = user_info.user_id return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/auth/schema.py0000664000175000017500000001527100000000000020105 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types from keystone import exception from keystone.i18n import _ token_issue = { 'type': 'object', 'properties': { 'identity': { 'type': 'object', 'properties': { 'methods': { 'type': 'array', 'items': { 'type': 'string', }, }, 'password': { 'type': 'object', 'properties': { 'user': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, 'name': { 'type': 'string', }, 'password': { 'type': 'string', }, 'domain': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, 'name': { 'type': 'string', }, }, }, }, }, }, }, 'token': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, }, 'required': [ 'id', ], }, }, 'required': [ 'methods', ], }, 'scope': { # For explicit unscoped authentication the type should not be # strictly string. Although keystone server specifies the value # to be 'unscoped', old versions of keystoneauth might still be # using `"scope": {'unscoped': {}}` instead of # `"scope": "unscoped"` # https://bugs.launchpad.net/keystoneauth/+bug/1637682/ 'type': ['object', 'string'], 'properties': { 'project': { 'type': 'object', 'properties': { 'name': { 'type': 'string', }, 'id': { 'type': 'string', }, 'domain': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, 'name': { 'type': 'string', }, }, }, }, }, 'domain': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, 'name': { 'type': 'string', }, }, }, 'OS-TRUST:trust': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, }, }, 'system': { 'type': 'object', 'properties': {'all': parameter_types.boolean}, }, }, }, }, 'required': [ 'identity', ], } def validate_issue_token_auth(auth=None): if auth is None: return validation.lazy_validate(token_issue, auth) user = auth['identity'].get('password', {}).get('user') if user is not None: if 'id' not in user and 'name' not in user: msg = _( 'Invalid input for field identity/password/user: ' 'id or name must be present.' ) raise exception.SchemaValidationError(detail=msg) domain = user.get('domain') if domain is not None: if 'id' not in domain and 'name' not in domain: msg = _( 'Invalid input for field identity/password/user/domain: ' 'id or name must be present.' ) raise exception.SchemaValidationError(detail=msg) scope = auth.get('scope') if scope is not None and isinstance(scope, dict): project = scope.get('project') if project is not None: if 'id' not in project and 'name' not in project: msg = _( 'Invalid input for field scope/project: ' 'id or name must be present.' ) raise exception.SchemaValidationError(detail=msg) domain = project.get('domain') if domain is not None: if 'id' not in domain and 'name' not in domain: msg = _( 'Invalid input for field scope/project/domain: ' 'id or name must be present.' ) raise exception.SchemaValidationError(detail=msg) domain = scope.get('domain') if domain is not None: if 'id' not in domain and 'name' not in domain: msg = _( 'Invalid input for field scope/domain: ' 'id or name must be present.' ) raise exception.SchemaValidationError(detail=msg) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.502114 keystone-26.0.0/keystone/catalog/0000775000175000017500000000000000000000000016736 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/catalog/__init__.py0000664000175000017500000000116600000000000021053 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.catalog.core import * # noqa ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.502114 keystone-26.0.0/keystone/catalog/backends/0000775000175000017500000000000000000000000020510 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/catalog/backends/__init__.py0000664000175000017500000000000000000000000022607 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/catalog/backends/base.py0000664000175000017500000004041300000000000021776 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone.common import provider_api import keystone.conf from keystone import exception CONF = keystone.conf.CONF class CatalogDriverBase(provider_api.ProviderAPIMixin, metaclass=abc.ABCMeta): """Interface description for the Catalog driver.""" def _get_list_limit(self): return CONF.catalog.list_limit or CONF.list_limit def _ensure_no_circle_in_hierarchical_regions(self, region_ref): if region_ref.get('parent_region_id') is None: return root_region_id = region_ref['id'] parent_region_id = region_ref['parent_region_id'] while parent_region_id: # NOTE(wanghong): check before getting parent region can ensure no # self circle if parent_region_id == root_region_id: raise exception.CircularRegionHierarchyError( parent_region_id=parent_region_id ) parent_region = self.get_region(parent_region_id) parent_region_id = parent_region.get('parent_region_id') @abc.abstractmethod def create_region(self, region_ref): """Create a new region. :raises keystone.exception.Conflict: If the region already exists. :raises keystone.exception.RegionNotFound: If the parent region is invalid. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_regions(self, hints): """List all regions. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: list of region_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_region(self, region_id): """Get region by id. :returns: region_ref dict :raises keystone.exception.RegionNotFound: If the region doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_region(self, region_id, region_ref): """Update region by id. :returns: region_ref dict :raises keystone.exception.RegionNotFound: If the region doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_region(self, region_id): """Delete an existing region. :raises keystone.exception.RegionNotFound: If the region doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_service(self, service_id, service_ref): """Create a new service. :raises keystone.exception.Conflict: If a duplicate service exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_services(self, hints): """List all services. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: list of service_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_service(self, service_id): """Get service by id. :returns: service_ref dict :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_service(self, service_id, service_ref): """Update service by id. :returns: service_ref dict :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_service(self, service_id): """Delete an existing service. :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_endpoint(self, endpoint_id, endpoint_ref): """Create a new endpoint for a service. :raises keystone.exception.Conflict: If a duplicate endpoint exists. :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_endpoint(self, endpoint_id): """Get endpoint by id. :returns: endpoint_ref dict :raises keystone.exception.EndpointNotFound: If the endpoint doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoints(self, hints): """List all endpoints. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: list of endpoint_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_endpoint(self, endpoint_id, endpoint_ref): """Get endpoint by id. :returns: endpoint_ref dict :raises keystone.exception.EndpointNotFound: If the endpoint doesn't exist. :raises keystone.exception.ServiceNotFound: If the service doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_endpoint(self, endpoint_id): """Delete an endpoint for a service. :raises keystone.exception.EndpointNotFound: If the endpoint doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_catalog(self, user_id, project_id): """Retrieve and format the current service catalog. Example:: { 'RegionOne': {'compute': { 'adminURL': u'http://host:8774/v1.1/project_id', 'internalURL': u'http://host:8774/v1.1/project_id', 'name': 'Compute Service', 'publicURL': u'http://host:8774/v1.1/project_id'}, 'ec2': { 'adminURL': 'http://host:8773/services/Admin', 'internalURL': 'http://host:8773/services/Cloud', 'name': 'EC2 Service', 'publicURL': 'http://host:8773/services/Cloud'}} :returns: A nested dict representing the service catalog or an empty dict. :raises keystone.exception.NotFound: If the endpoint doesn't exist. """ raise exception.NotImplemented() # pragma: no cover def get_v3_catalog(self, user_id, project_id): """Retrieve and format the current V3 service catalog. Example:: [ { "endpoints": [ { "interface": "public", "id": "--endpoint-id--", "region": "RegionOne", "url": "http://external:8776/v1/--project-id--" }, { "interface": "internal", "id": "--endpoint-id--", "region": "RegionOne", "url": "http://internal:8776/v1/--project-id--" }], "id": "--service-id--", "type": "volume" }] :returns: A list representing the service catalog or an empty list :raises keystone.exception.NotFound: If the endpoint doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_endpoint_to_project(self, endpoint_id, project_id): """Create an endpoint to project association. :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param project_id: identity of the project to be associated with :type project_id: string :raises: keystone.exception.Conflict: If the endpoint was already added to project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_endpoint_from_project(self, endpoint_id, project_id): """Remove an endpoint to project association. :param endpoint_id: identity of endpoint to remove :type endpoint_id: string :param project_id: identity of the project associated with :type project_id: string :raises keystone.exception.NotFound: If the endpoint was not found in the project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_endpoint_in_project(self, endpoint_id, project_id): """Check if an endpoint is associated with a project. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :param project_id: identity of the project associated with :type project_id: string :raises keystone.exception.NotFound: If the endpoint was not found in the project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoints_for_project(self, project_id): """List all endpoints associated with a project. :param project_id: identity of the project to check :type project_id: string :returns: a list of identity endpoint ids or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_for_endpoint(self, endpoint_id): """List all projects associated with an endpoint. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :returns: a list of projects or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_endpoint(self, endpoint_id): """Remove all the endpoints to project association with endpoint. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :returns: None """ raise exception.NotImplemented() @abc.abstractmethod def delete_association_by_project(self, project_id): """Remove all the endpoints to project association with project. :param project_id: identity of the project to check :type project_id: string :returns: None """ raise exception.NotImplemented() @abc.abstractmethod def create_endpoint_group(self, endpoint_group): """Create an endpoint group. :param endpoint_group: endpoint group to create :type endpoint_group: dictionary :raises: keystone.exception.Conflict: If a duplicate endpoint group already exists. :returns: an endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_endpoint_group(self, endpoint_group_id): """Get an endpoint group. :param endpoint_group_id: identity of endpoint group to retrieve :type endpoint_group_id: string :raises keystone.exception.NotFound: If the endpoint group was not found. :returns: an endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_endpoint_group(self, endpoint_group_id, endpoint_group): """Update an endpoint group. :param endpoint_group_id: identity of endpoint group to retrieve :type endpoint_group_id: string :param endpoint_group: A full or partial endpoint_group :type endpoint_group: dictionary :raises keystone.exception.NotFound: If the endpoint group was not found. :returns: an endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_endpoint_group(self, endpoint_group_id): """Delete an endpoint group. :param endpoint_group_id: identity of endpoint group to delete :type endpoint_group_id: string :raises keystone.exception.NotFound: If the endpoint group was not found. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_endpoint_group_to_project(self, endpoint_group_id, project_id): """Add an endpoint group to project association. :param endpoint_group_id: identity of endpoint to associate :type endpoint_group_id: string :param project_id: identity of project to associate :type project_id: string :raises keystone.exception.Conflict: If the endpoint group was already added to the project. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_endpoint_group_in_project(self, endpoint_group_id, project_id): """Get endpoint group to project association. :param endpoint_group_id: identity of endpoint group to retrieve :type endpoint_group_id: string :param project_id: identity of project to associate :type project_id: string :raises keystone.exception.NotFound: If the endpoint group to the project association was not found. :returns: a project endpoint group representation. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoint_groups(self, hints): """List all endpoint groups. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_endpoint_groups_for_project(self, project_id): """List all endpoint group to project associations for a project. :param project_id: identity of project to associate :type project_id: string :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_associated_with_endpoint_group(self, endpoint_group_id): """List all projects associated with endpoint group. :param endpoint_group_id: identity of endpoint to associate :type endpoint_group_id: string :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_endpoint_group_from_project( self, endpoint_group_id, project_id ): """Remove an endpoint to project association. :param endpoint_group_id: identity of endpoint to associate :type endpoint_group_id: string :param project_id: identity of project to associate :type project_id: string :raises keystone.exception.NotFound: If endpoint group project association was not found. :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_endpoint_group_association_by_project(self, project_id): """Remove endpoint group to project associations. :param project_id: identity of the project to check :type project_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/catalog/backends/sql.py0000664000175000017500000006567300000000000021702 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2012 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from sqlalchemy.sql import true from keystone.catalog.backends import base from keystone.common import driver_hints from keystone.common import sql from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF class Region(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'region' attributes = ['id', 'description', 'parent_region_id'] id = sql.Column(sql.String(255), primary_key=True) description = sql.Column(sql.String(255), nullable=False) # NOTE(jaypipes): Right now, using an adjacency list model for # storing the hierarchy of regions is fine, since # the API does not support any kind of querying for # more complex hierarchical queries such as "get me only # the regions that are subchildren of this region", etc. # If, in the future, such queries are needed, then it # would be possible to add in columns to this model for # "left" and "right" and provide support for a nested set # model. parent_region_id = sql.Column(sql.String(255), nullable=True) extra = sql.Column(sql.JsonBlob()) endpoints = sqlalchemy.orm.relationship("Endpoint", backref="region") class Service(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'service' attributes = ['id', 'type', 'enabled'] id = sql.Column(sql.String(64), primary_key=True) type = sql.Column(sql.String(255)) enabled = sql.Column( sql.Boolean, nullable=False, default=True, server_default=sqlalchemy.sql.expression.true(), ) extra = sql.Column(sql.JsonBlob()) endpoints = sqlalchemy.orm.relationship("Endpoint", backref="service") class Endpoint(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'endpoint' attributes = [ 'id', 'interface', 'region_id', 'service_id', 'url', 'legacy_endpoint_id', 'enabled', ] id = sql.Column(sql.String(64), primary_key=True) legacy_endpoint_id = sql.Column(sql.String(64)) interface = sql.Column(sql.String(8), nullable=False) region_id = sql.Column( sql.String(255), sql.ForeignKey('region.id', ondelete='RESTRICT'), nullable=True, default=None, ) service_id = sql.Column( sql.String(64), sql.ForeignKey('service.id'), nullable=False ) url = sql.Column(sql.Text(), nullable=False) enabled = sql.Column( sql.Boolean, nullable=False, default=True, server_default=sqlalchemy.sql.expression.true(), ) extra = sql.Column(sql.JsonBlob()) @classmethod def from_dict(cls, endpoint_dict): """Override from_dict to set enabled if missing.""" new_dict = endpoint_dict.copy() if new_dict.get('enabled') is None: new_dict['enabled'] = True return super().from_dict(new_dict) class Catalog(base.CatalogDriverBase): # Regions def list_regions(self, hints): with sql.session_for_read() as session: regions = session.query(Region) regions = sql.filter_limit_query(Region, regions, hints) return [s.to_dict() for s in list(regions)] def _get_region(self, session, region_id): ref = session.get(Region, region_id) if not ref: raise exception.RegionNotFound(region_id=region_id) return ref def _delete_child_regions(self, session, region_id, root_region_id): """Delete all child regions. Recursively delete any region that has the supplied region as its parent. """ children = session.query(Region).filter_by(parent_region_id=region_id) for child in children: if child.id == root_region_id: # Hit a circular region hierarchy return self._delete_child_regions(session, child.id, root_region_id) session.delete(child) def _check_parent_region(self, session, region_ref): """Raise a NotFound if the parent region does not exist. If the region_ref has a specified parent_region_id, check that the parent exists, otherwise, raise a NotFound. """ parent_region_id = region_ref.get('parent_region_id') if parent_region_id is not None: # This will raise NotFound if the parent doesn't exist, # which is the behavior we want. self._get_region(session, parent_region_id) def _has_endpoints(self, session, region, root_region): if region.endpoints is not None and len(region.endpoints) > 0: return True q = session.query(Region) q = q.filter_by(parent_region_id=region.id) for child in q.all(): if child.id == root_region.id: # Hit a circular region hierarchy return False if self._has_endpoints(session, child, root_region): return True return False def get_region(self, region_id): with sql.session_for_read() as session: return self._get_region(session, region_id).to_dict() def delete_region(self, region_id): with sql.session_for_write() as session: ref = self._get_region(session, region_id) if self._has_endpoints(session, ref, ref): raise exception.RegionDeletionError(region_id=region_id) self._delete_child_regions(session, region_id, region_id) session.delete(ref) @sql.handle_conflicts(conflict_type='region') def create_region(self, region_ref): with sql.session_for_write() as session: self._check_parent_region(session, region_ref) region = Region.from_dict(region_ref) session.add(region) return region.to_dict() def update_region(self, region_id, region_ref): with sql.session_for_write() as session: self._check_parent_region(session, region_ref) ref = self._get_region(session, region_id) old_dict = ref.to_dict() old_dict.update(region_ref) self._ensure_no_circle_in_hierarchical_regions(old_dict) new_region = Region.from_dict(old_dict) for attr in Region.attributes: if attr != 'id': setattr(ref, attr, getattr(new_region, attr)) ref.extra = new_region.extra return ref.to_dict() # Services @driver_hints.truncated def list_services(self, hints): with sql.session_for_read() as session: services = session.query(Service) services = sql.filter_limit_query(Service, services, hints) return [s.to_dict() for s in list(services)] def _get_service(self, session, service_id): ref = session.get(Service, service_id) if not ref: raise exception.ServiceNotFound(service_id=service_id) return ref def get_service(self, service_id): with sql.session_for_read() as session: return self._get_service(session, service_id).to_dict() def delete_service(self, service_id): with sql.session_for_write() as session: ref = self._get_service(session, service_id) session.query(Endpoint).filter_by(service_id=service_id).delete() session.delete(ref) def create_service(self, service_id, service_ref): with sql.session_for_write() as session: service = Service.from_dict(service_ref) session.add(service) return service.to_dict() def update_service(self, service_id, service_ref): with sql.session_for_write() as session: ref = self._get_service(session, service_id) old_dict = ref.to_dict() old_dict.update(service_ref) new_service = Service.from_dict(old_dict) for attr in Service.attributes: if attr != 'id': setattr(ref, attr, getattr(new_service, attr)) ref.extra = new_service.extra return ref.to_dict() # Endpoints def create_endpoint(self, endpoint_id, endpoint): with sql.session_for_write() as session: endpoint_ref = Endpoint.from_dict(endpoint) session.add(endpoint_ref) return endpoint_ref.to_dict() def delete_endpoint(self, endpoint_id): with sql.session_for_write() as session: ref = self._get_endpoint(session, endpoint_id) session.delete(ref) def _get_endpoint(self, session, endpoint_id): try: return session.query(Endpoint).filter_by(id=endpoint_id).one() except sql.NotFound: raise exception.EndpointNotFound(endpoint_id=endpoint_id) def get_endpoint(self, endpoint_id): with sql.session_for_read() as session: return self._get_endpoint(session, endpoint_id).to_dict() @driver_hints.truncated def list_endpoints(self, hints): with sql.session_for_read() as session: endpoints = session.query(Endpoint) endpoints = sql.filter_limit_query(Endpoint, endpoints, hints) return [e.to_dict() for e in list(endpoints)] def update_endpoint(self, endpoint_id, endpoint_ref): with sql.session_for_write() as session: ref = self._get_endpoint(session, endpoint_id) old_dict = ref.to_dict() old_dict.update(endpoint_ref) new_endpoint = Endpoint.from_dict(old_dict) for attr in Endpoint.attributes: if attr != 'id': setattr(ref, attr, getattr(new_endpoint, attr)) ref.extra = new_endpoint.extra return ref.to_dict() def get_catalog(self, user_id, project_id): """Retrieve and format the V2 service catalog. :param user_id: The id of the user who has been authenticated for creating service catalog. :param project_id: The id of the project. 'project_id' will be None in the case this being called to create a catalog to go in a domain scoped token. In this case, any endpoint that requires a project_id as part of their URL will be skipped (as would a whole service if, as a consequence, it has no valid endpoints). :returns: A nested dict representing the service catalog or an empty dict. """ substitutions = dict(CONF.items()) substitutions.update({'user_id': user_id}) silent_keyerror_failures = [] if project_id: substitutions.update( {'tenant_id': project_id, 'project_id': project_id} ) else: silent_keyerror_failures = ['tenant_id', 'project_id'] with sql.session_for_read() as session: endpoints = ( session.query(Endpoint) .options(sql.joinedload(Endpoint.service)) .filter(Endpoint.enabled == true()) .all() ) catalog = {} for endpoint in endpoints: if not endpoint.service['enabled']: continue try: formatted_url = utils.format_url( endpoint['url'], substitutions, silent_keyerror_failures=silent_keyerror_failures, ) if formatted_url is not None: url = formatted_url else: continue except exception.MalformedEndpoint: # nosec(tkelsey) continue # this failure is already logged in format_url() region = endpoint['region_id'] service_type = endpoint.service['type'] default_service = { 'id': endpoint['id'], 'name': endpoint.service.extra.get('name', ''), 'publicURL': '', } catalog.setdefault(region, {}) catalog[region].setdefault(service_type, default_service) interface_url = '%sURL' % endpoint['interface'] catalog[region][service_type][interface_url] = url return catalog def get_v3_catalog(self, user_id, project_id): """Retrieve and format the current V3 service catalog. :param user_id: The id of the user who has been authenticated for creating service catalog. :param project_id: The id of the project. 'project_id' will be None in the case this being called to create a catalog to go in a domain scoped token. In this case, any endpoint that requires a project_id as part of their URL will be skipped. :returns: A list representing the service catalog or an empty list """ d = dict(CONF.items()) d.update({'user_id': user_id}) silent_keyerror_failures = [] if project_id: d.update( { 'tenant_id': project_id, 'project_id': project_id, } ) else: silent_keyerror_failures = ['tenant_id', 'project_id'] with sql.session_for_read() as session: services = ( session.query(Service) .filter(Service.enabled == true()) .options(sql.joinedload(Service.endpoints)) .all() ) def make_v3_endpoints(endpoints): for endpoint in ( ep.to_dict() for ep in endpoints if ep.enabled ): del endpoint['service_id'] del endpoint['legacy_endpoint_id'] del endpoint['enabled'] endpoint['region'] = endpoint['region_id'] try: formatted_url = utils.format_url( endpoint['url'], d, silent_keyerror_failures=silent_keyerror_failures, ) if formatted_url: endpoint['url'] = formatted_url else: continue except exception.MalformedEndpoint: # nosec(tkelsey) # this failure is already logged in format_url() continue yield endpoint # TODO(davechen): If there is service with no endpoints, we should # skip the service instead of keeping it in the catalog, # see bug #1436704. def make_v3_service(svc): eps = list(make_v3_endpoints(svc.endpoints)) service = {'endpoints': eps, 'id': svc.id, 'type': svc.type} service['name'] = svc.extra.get('name', '') return service # Build the unfiltered catalog, this is the catalog that is # returned if endpoint filtering is not performed and the # option of `return_all_endpoints_if_no_filter` is set to true. catalog_ref = [make_v3_service(svc) for svc in services] # Filter the `catalog_ref` above by any project-endpoint # association configured by endpoint filter. filtered_endpoints = {} if project_id: filtered_endpoints = ( self.catalog_api.list_endpoints_for_project(project_id) ) # endpoint filter is enabled, only return the filtered endpoints. if filtered_endpoints: filtered_ids = list(filtered_endpoints.keys()) # This is actually working on the copy of `catalog_ref` since # the index will be shifted if remove/add any entry for the # original one. for service in catalog_ref[:]: endpoints = service['endpoints'] for endpoint in endpoints[:]: endpoint_id = endpoint['id'] # remove the endpoint that is not associated with # the project. if endpoint_id not in filtered_ids: service['endpoints'].remove(endpoint) continue # remove the disabled endpoint from the list. if not filtered_endpoints[endpoint_id]['enabled']: service['endpoints'].remove(endpoint) # NOTE(davechen): The service will not be included in the # catalog if the service doesn't have any endpoint when # endpoint filter is enabled, this is inconsistent with # full catalog that is returned when endpoint filter is # disabled. if not service.get('endpoints'): catalog_ref.remove(service) # When it arrives here it means it's domain scoped token ( # `project_id` is not set) or it's a project scoped token # but the endpoint filtering is not performed. # Both of them tell us the endpoint filtering is not enabled, so # check the option of `return_all_endpoints_if_no_filter`, it will # judge whether a full unfiltered catalog or a empty service # catalog will be returned. elif not CONF.endpoint_filter.return_all_endpoints_if_no_filter: return [] return catalog_ref @sql.handle_conflicts(conflict_type='project_endpoint') def add_endpoint_to_project(self, endpoint_id, project_id): with sql.session_for_write() as session: endpoint_filter_ref = ProjectEndpoint( endpoint_id=endpoint_id, project_id=project_id ) session.add(endpoint_filter_ref) def _get_project_endpoint_ref(self, session, endpoint_id, project_id): endpoint_filter_ref = session.get( ProjectEndpoint, (endpoint_id, project_id), ) if endpoint_filter_ref is None: msg = _( 'Endpoint %(endpoint_id)s not found in project ' '%(project_id)s' ) % {'endpoint_id': endpoint_id, 'project_id': project_id} raise exception.NotFound(msg) return endpoint_filter_ref def check_endpoint_in_project(self, endpoint_id, project_id): with sql.session_for_read() as session: self._get_project_endpoint_ref(session, endpoint_id, project_id) def remove_endpoint_from_project(self, endpoint_id, project_id): with sql.session_for_write() as session: endpoint_filter_ref = self._get_project_endpoint_ref( session, endpoint_id, project_id ) session.delete(endpoint_filter_ref) def list_endpoints_for_project(self, project_id): with sql.session_for_read() as session: query = session.query(ProjectEndpoint) query = query.filter_by(project_id=project_id) endpoint_filter_refs = query.all() return [ref.to_dict() for ref in endpoint_filter_refs] def list_projects_for_endpoint(self, endpoint_id): with sql.session_for_read() as session: query = session.query(ProjectEndpoint) query = query.filter_by(endpoint_id=endpoint_id) endpoint_filter_refs = query.all() return [ref.to_dict() for ref in endpoint_filter_refs] def delete_association_by_endpoint(self, endpoint_id): with sql.session_for_write() as session: query = session.query(ProjectEndpoint) query = query.filter_by(endpoint_id=endpoint_id) query.delete(synchronize_session=False) def delete_association_by_project(self, project_id): with sql.session_for_write() as session: query = session.query(ProjectEndpoint) query = query.filter_by(project_id=project_id) query.delete(synchronize_session=False) def create_endpoint_group(self, endpoint_group_id, endpoint_group): with sql.session_for_write() as session: endpoint_group_ref = EndpointGroup.from_dict(endpoint_group) session.add(endpoint_group_ref) return endpoint_group_ref.to_dict() def _get_endpoint_group(self, session, endpoint_group_id): endpoint_group_ref = session.get(EndpointGroup, endpoint_group_id) if endpoint_group_ref is None: raise exception.EndpointGroupNotFound( endpoint_group_id=endpoint_group_id ) return endpoint_group_ref def get_endpoint_group(self, endpoint_group_id): with sql.session_for_read() as session: endpoint_group_ref = self._get_endpoint_group( session, endpoint_group_id ) return endpoint_group_ref.to_dict() def update_endpoint_group(self, endpoint_group_id, endpoint_group): with sql.session_for_write() as session: endpoint_group_ref = self._get_endpoint_group( session, endpoint_group_id ) old_endpoint_group = endpoint_group_ref.to_dict() old_endpoint_group.update(endpoint_group) new_endpoint_group = EndpointGroup.from_dict(old_endpoint_group) for attr in EndpointGroup.mutable_attributes: setattr( endpoint_group_ref, attr, getattr(new_endpoint_group, attr) ) return endpoint_group_ref.to_dict() def delete_endpoint_group(self, endpoint_group_id): with sql.session_for_write() as session: endpoint_group_ref = self._get_endpoint_group( session, endpoint_group_id ) self._delete_endpoint_group_association_by_endpoint_group( session, endpoint_group_id ) session.delete(endpoint_group_ref) def get_endpoint_group_in_project(self, endpoint_group_id, project_id): with sql.session_for_read() as session: ref = self._get_endpoint_group_in_project( session, endpoint_group_id, project_id ) return ref.to_dict() @sql.handle_conflicts(conflict_type='project_endpoint_group') def add_endpoint_group_to_project(self, endpoint_group_id, project_id): with sql.session_for_write() as session: # Create a new Project Endpoint group entity endpoint_group_project_ref = ProjectEndpointGroupMembership( endpoint_group_id=endpoint_group_id, project_id=project_id ) session.add(endpoint_group_project_ref) def _get_endpoint_group_in_project( self, session, endpoint_group_id, project_id ): endpoint_group_project_ref = session.get( ProjectEndpointGroupMembership, (endpoint_group_id, project_id), ) if endpoint_group_project_ref is None: msg = _('Endpoint Group Project Association not found') raise exception.NotFound(msg) else: return endpoint_group_project_ref def list_endpoint_groups(self, hints): with sql.session_for_read() as session: query = session.query(EndpointGroup) endpoint_group_refs = sql.filter_limit_query( EndpointGroup, query, hints ) return [e.to_dict() for e in endpoint_group_refs] def list_endpoint_groups_for_project(self, project_id): with sql.session_for_read() as session: query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(project_id=project_id) endpoint_group_refs = query.all() return [ref.to_dict() for ref in endpoint_group_refs] def remove_endpoint_group_from_project( self, endpoint_group_id, project_id ): with sql.session_for_write() as session: endpoint_group_project_ref = self._get_endpoint_group_in_project( session, endpoint_group_id, project_id ) session.delete(endpoint_group_project_ref) def list_projects_associated_with_endpoint_group(self, endpoint_group_id): with sql.session_for_read() as session: query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(endpoint_group_id=endpoint_group_id) endpoint_group_refs = query.all() return [ref.to_dict() for ref in endpoint_group_refs] def _delete_endpoint_group_association_by_endpoint_group( self, session, endpoint_group_id ): query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(endpoint_group_id=endpoint_group_id) query.delete() def delete_endpoint_group_association_by_project(self, project_id): with sql.session_for_write() as session: query = session.query(ProjectEndpointGroupMembership) query = query.filter_by(project_id=project_id) query.delete() class ProjectEndpoint(sql.ModelBase, sql.ModelDictMixin): """project-endpoint relationship table.""" __tablename__ = 'project_endpoint' attributes = ['endpoint_id', 'project_id'] endpoint_id = sql.Column(sql.String(64), primary_key=True, nullable=False) project_id = sql.Column(sql.String(64), primary_key=True, nullable=False) class EndpointGroup(sql.ModelBase, sql.ModelDictMixin): """Endpoint Groups table.""" __tablename__ = 'endpoint_group' attributes = ['id', 'name', 'description', 'filters'] mutable_attributes = frozenset(['name', 'description', 'filters']) id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), nullable=False) description = sql.Column(sql.Text, nullable=True) filters = sql.Column(sql.JsonBlob(), nullable=False) class ProjectEndpointGroupMembership(sql.ModelBase, sql.ModelDictMixin): """Project to Endpoint group relationship table.""" __tablename__ = 'project_endpoint_group' attributes = ['endpoint_group_id', 'project_id'] endpoint_group_id = sql.Column( sql.String(64), sql.ForeignKey('endpoint_group.id'), nullable=False ) project_id = sql.Column(sql.String(64), nullable=False) __table_args__ = ( sql.PrimaryKeyConstraint('endpoint_group_id', 'project_id'), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/catalog/backends/templated.py0000664000175000017500000003053400000000000023046 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path from oslo_log import log from keystone.catalog.backends import base from keystone.common import utils import keystone.conf from keystone import exception LOG = log.getLogger(__name__) CONF = keystone.conf.CONF def parse_templates(template_lines): o = {} for line in template_lines: if ' = ' not in line: continue k, v = line.strip().split(' = ') if not k.startswith('catalog.'): continue parts = k.split('.') region = parts[1] # NOTE(termie): object-store insists on having a dash service = parts[2].replace('_', '-') key = parts[3] region_ref = o.get(region, {}) service_ref = region_ref.get(service, {}) service_ref[key] = v region_ref[service] = service_ref o[region] = region_ref return o class Catalog(base.CatalogDriverBase): """A backend that generates endpoints for the Catalog based on templates. It is usually configured via config entries that look like: catalog.$REGION.$SERVICE.$key = $value and is stored in a similar looking hierarchy. When expanding the template it will pass in a dict made up of the conf instance plus a few additional key-values, notably project_id and user_id. It does not care what the keys and values are but it is worth noting that keystone_compat will expect certain keys to be there so that it can munge them into the output format keystone expects. These keys are: name - the name of the service, most likely repeated for all services of the same type, across regions. adminURL - the url of the admin endpoint publicURL - the url of the public endpoint internalURL - the url of the internal endpoint """ def __init__(self, templates=None): super().__init__() LOG.warning( 'The templated catalog driver has been deprecated and ' 'will be removed in a future release.' ) if templates: self.templates = templates else: template_file = CONF.catalog.template_file if not os.path.exists(template_file): template_file = CONF.find_file(template_file) self._load_templates(template_file) def _load_templates(self, template_file): try: with open(template_file) as f: self.templates = parse_templates(f) except OSError: LOG.critical('Unable to open template file %s', template_file) raise # region crud def create_region(self, region_ref): raise exception.NotImplemented() def list_regions(self, hints): return [ {'id': region_id, 'description': '', 'parent_region_id': ''} for region_id in self.templates ] def get_region(self, region_id): if region_id in self.templates: return {'id': region_id, 'description': '', 'parent_region_id': ''} raise exception.RegionNotFound(region_id=region_id) def update_region(self, region_id, region_ref): raise exception.NotImplemented() def delete_region(self, region_id): raise exception.NotImplemented() # service crud def create_service(self, service_id, service_ref): raise exception.NotImplemented() def _list_services(self, hints): for region_ref in self.templates.values(): for service_type, service_ref in region_ref.items(): yield { 'id': service_type, 'enabled': True, 'name': service_ref.get('name', ''), 'description': service_ref.get('description', ''), 'type': service_type, } def list_services(self, hints): return list(self._list_services(hints=None)) def get_service(self, service_id): for service in self._list_services(hints=None): if service['id'] == service_id: return service raise exception.ServiceNotFound(service_id=service_id) def update_service(self, service_id, service_ref): raise exception.NotImplemented() def delete_service(self, service_id): raise exception.NotImplemented() # endpoint crud def create_endpoint(self, endpoint_id, endpoint_ref): raise exception.NotImplemented() def _list_endpoints(self): for region_id, region_ref in self.templates.items(): for service_type, service_ref in region_ref.items(): for key in service_ref: if key.endswith('URL'): interface = key[:-3] endpoint_id = '{}-{}-{}'.format( region_id, service_type, interface, ) yield { 'id': endpoint_id, 'service_id': service_type, 'interface': interface, 'url': service_ref[key], 'legacy_endpoint_id': None, 'region_id': region_id, 'enabled': True, } def list_endpoints(self, hints): return list(self._list_endpoints()) def get_endpoint(self, endpoint_id): for endpoint in self._list_endpoints(): if endpoint['id'] == endpoint_id: return endpoint raise exception.EndpointNotFound(endpoint_id=endpoint_id) def update_endpoint(self, endpoint_id, endpoint_ref): raise exception.NotImplemented() def delete_endpoint(self, endpoint_id): raise exception.NotImplemented() def get_catalog(self, user_id, project_id): """Retrieve and format the V2 service catalog. :param user_id: The id of the user who has been authenticated for creating service catalog. :param project_id: The id of the project. 'project_id' will be None in the case this being called to create a catalog to go in a domain scoped token. In this case, any endpoint that requires a project_id as part of their URL will be skipped. :returns: A nested dict representing the service catalog or an empty dict. """ substitutions = dict(CONF.items()) substitutions.update({'user_id': user_id}) silent_keyerror_failures = [] if project_id: substitutions.update( { 'tenant_id': project_id, 'project_id': project_id, } ) else: silent_keyerror_failures = [ 'tenant_id', 'project_id', ] catalog = {} # TODO(davechen): If there is service with no endpoints, we should # skip the service instead of keeping it in the catalog. # see bug #1436704. for region, region_ref in self.templates.items(): catalog[region] = {} for service, service_ref in region_ref.items(): service_data = {} try: for k, v in service_ref.items(): formatted_value = utils.format_url( v, substitutions, silent_keyerror_failures=silent_keyerror_failures, ) if formatted_value: service_data[k] = formatted_value except exception.MalformedEndpoint: # nosec(tkelsey) continue # this failure is already logged in format_url() catalog[region][service] = service_data return catalog def get_v3_catalog(self, user_id, project_id): """Retrieve and format the current V3 service catalog. This implementation builds the V3 catalog from the V2 catalog. :param user_id: The id of the user who has been authenticated for creating service catalog. :param project_id: The id of the project. 'project_id' will be None in the case this being called to create a catalog to go in a domain scoped token. In this case, any endpoint that requires a project_id as part of their URL will be skipped. :returns: A list representing the service catalog or an empty list """ v2_catalog = self.get_catalog(user_id, project_id) v3_catalog = {} for region_name, region in v2_catalog.items(): for service_type, service in region.items(): if service_type not in v3_catalog: v3_catalog[service_type] = { 'type': service_type, 'endpoints': [], } for attr, value in service.items(): # Attributes that end in URL are interfaces. In the V2 # catalog, these are internalURL, publicURL, and adminURL. # For example, .publicURL= in the V2 # catalog becomes the V3 interface for the service: # { 'interface': 'public', 'url': '', 'region': # 'region: '' } if attr.endswith('URL'): v3_interface = attr[: -len('URL')] v3_catalog[service_type]['endpoints'].append( { 'interface': v3_interface, 'region': region_name, 'url': value, } ) continue # Other attributes are copied to the service. v3_catalog[service_type][attr] = value return list(v3_catalog.values()) def add_endpoint_to_project(self, endpoint_id, project_id): raise exception.NotImplemented() def remove_endpoint_from_project(self, endpoint_id, project_id): raise exception.NotImplemented() def check_endpoint_in_project(self, endpoint_id, project_id): raise exception.NotImplemented() def list_endpoints_for_project(self, project_id): raise exception.NotImplemented() def list_projects_for_endpoint(self, endpoint_id): raise exception.NotImplemented() def delete_association_by_endpoint(self, endpoint_id): raise exception.NotImplemented() def delete_association_by_project(self, project_id): raise exception.NotImplemented() def create_endpoint_group(self, endpoint_group): raise exception.NotImplemented() def get_endpoint_group(self, endpoint_group_id): raise exception.NotImplemented() def update_endpoint_group(self, endpoint_group_id, endpoint_group): raise exception.NotImplemented() def delete_endpoint_group(self, endpoint_group_id): raise exception.NotImplemented() def add_endpoint_group_to_project(self, endpoint_group_id, project_id): raise exception.NotImplemented() def get_endpoint_group_in_project(self, endpoint_group_id, project_id): raise exception.NotImplemented() def list_endpoint_groups(self, hints): raise exception.NotImplemented() def list_endpoint_groups_for_project(self, project_id): raise exception.NotImplemented() def list_projects_associated_with_endpoint_group(self, endpoint_group_id): raise exception.NotImplemented() def remove_endpoint_group_from_project( self, endpoint_group_id, project_id ): raise exception.NotImplemented() def delete_endpoint_group_association_by_project(self, project_id): raise exception.NotImplemented() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/catalog/core.py0000664000175000017500000003324500000000000020247 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2012 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Catalog service.""" from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs # This is a general cache region for catalog administration (CRUD operations). MEMOIZE = cache.get_memoization_decorator(group='catalog') # This builds a discrete cache region dedicated to complete service catalogs # computed for a given user + project pair. Any write operation to create, # modify or delete elements of the service catalog should invalidate this # entire cache region. COMPUTED_CATALOG_REGION = cache.create_region(name='computed catalog region') MEMOIZE_COMPUTED_CATALOG = cache.get_memoization_decorator( group='catalog', region=COMPUTED_CATALOG_REGION ) class Manager(manager.Manager): """Default pivot point for the Catalog backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.catalog' _provides_api = 'catalog_api' _ENDPOINT = 'endpoint' _SERVICE = 'service' _REGION = 'region' def __init__(self): super().__init__(CONF.catalog.driver) notifications.register_event_callback( notifications.ACTIONS.deleted, 'project', self._on_project_or_endpoint_delete, ) notifications.register_event_callback( notifications.ACTIONS.deleted, 'endpoint', self._on_project_or_endpoint_delete, ) def _on_project_or_endpoint_delete( self, service, resource_type, operation, payload ): project_or_endpoint_id = payload['resource_info'] if resource_type == 'project': PROVIDERS.catalog_api.delete_association_by_project( project_or_endpoint_id ) PROVIDERS.catalog_api.delete_endpoint_group_association_by_project( project_or_endpoint_id ) else: PROVIDERS.catalog_api.delete_association_by_endpoint( project_or_endpoint_id ) def create_region(self, region_ref, initiator=None): # Check duplicate ID try: self.get_region(region_ref['id']) except exception.RegionNotFound: # nosec # A region with the same id doesn't exist already, good. pass else: msg = _('Duplicate ID, %s.') % region_ref['id'] raise exception.Conflict(type='region', details=msg) # NOTE(lbragstad,dstanek): The description column of the region # database cannot be null. So if the user doesn't pass in a # description or passes in a null description then set it to an # empty string. if region_ref.get('description') is None: region_ref['description'] = '' try: ret = self.driver.create_region(region_ref) except exception.NotFound: parent_region_id = region_ref.get('parent_region_id') raise exception.RegionNotFound(region_id=parent_region_id) notifications.Audit.created(self._REGION, ret['id'], initiator) COMPUTED_CATALOG_REGION.invalidate() return ret @MEMOIZE def get_region(self, region_id): try: return self.driver.get_region(region_id) except exception.NotFound: raise exception.RegionNotFound(region_id=region_id) def update_region(self, region_id, region_ref, initiator=None): # NOTE(lbragstad,dstanek): The description column of the region # database cannot be null. So if the user passes in a null # description set it to an empty string. if 'description' in region_ref and region_ref['description'] is None: region_ref['description'] = '' ref = self.driver.update_region(region_id, region_ref) notifications.Audit.updated(self._REGION, region_id, initiator) self.get_region.invalidate(self, region_id) COMPUTED_CATALOG_REGION.invalidate() return ref def delete_region(self, region_id, initiator=None): try: ret = self.driver.delete_region(region_id) notifications.Audit.deleted(self._REGION, region_id, initiator) self.get_region.invalidate(self, region_id) COMPUTED_CATALOG_REGION.invalidate() return ret except exception.NotFound: raise exception.RegionNotFound(region_id=region_id) @manager.response_truncated def list_regions(self, hints=None): return self.driver.list_regions(hints or driver_hints.Hints()) def create_service(self, service_id, service_ref, initiator=None): service_ref.setdefault('enabled', True) service_ref.setdefault('name', '') ref = self.driver.create_service(service_id, service_ref) notifications.Audit.created(self._SERVICE, service_id, initiator) COMPUTED_CATALOG_REGION.invalidate() return ref @MEMOIZE def get_service(self, service_id): try: return self.driver.get_service(service_id) except exception.NotFound: raise exception.ServiceNotFound(service_id=service_id) def update_service(self, service_id, service_ref, initiator=None): ref = self.driver.update_service(service_id, service_ref) notifications.Audit.updated(self._SERVICE, service_id, initiator) self.get_service.invalidate(self, service_id) COMPUTED_CATALOG_REGION.invalidate() return ref def delete_service(self, service_id, initiator=None): try: endpoints = self.list_endpoints() ret = self.driver.delete_service(service_id) notifications.Audit.deleted(self._SERVICE, service_id, initiator) self.get_service.invalidate(self, service_id) for endpoint in endpoints: if endpoint['service_id'] == service_id: self.get_endpoint.invalidate(self, endpoint['id']) COMPUTED_CATALOG_REGION.invalidate() return ret except exception.NotFound: raise exception.ServiceNotFound(service_id=service_id) @manager.response_truncated def list_services(self, hints=None): return self.driver.list_services(hints or driver_hints.Hints()) def _assert_region_exists(self, region_id): try: if region_id is not None: self.get_region(region_id) except exception.RegionNotFound: raise exception.ValidationError( attribute='endpoint region_id', target='region table' ) def _assert_service_exists(self, service_id): try: if service_id is not None: self.get_service(service_id) except exception.ServiceNotFound: raise exception.ValidationError( attribute='endpoint service_id', target='service table' ) def create_endpoint(self, endpoint_id, endpoint_ref, initiator=None): self._assert_region_exists(endpoint_ref.get('region_id')) self._assert_service_exists(endpoint_ref['service_id']) ref = self.driver.create_endpoint(endpoint_id, endpoint_ref) notifications.Audit.created(self._ENDPOINT, endpoint_id, initiator) COMPUTED_CATALOG_REGION.invalidate() return ref def update_endpoint(self, endpoint_id, endpoint_ref, initiator=None): self._assert_region_exists(endpoint_ref.get('region_id')) self._assert_service_exists(endpoint_ref.get('service_id')) ref = self.driver.update_endpoint(endpoint_id, endpoint_ref) notifications.Audit.updated(self._ENDPOINT, endpoint_id, initiator) self.get_endpoint.invalidate(self, endpoint_id) COMPUTED_CATALOG_REGION.invalidate() return ref def delete_endpoint(self, endpoint_id, initiator=None): try: ret = self.driver.delete_endpoint(endpoint_id) notifications.Audit.deleted(self._ENDPOINT, endpoint_id, initiator) self.get_endpoint.invalidate(self, endpoint_id) COMPUTED_CATALOG_REGION.invalidate() return ret except exception.NotFound: raise exception.EndpointNotFound(endpoint_id=endpoint_id) @MEMOIZE def get_endpoint(self, endpoint_id): try: return self.driver.get_endpoint(endpoint_id) except exception.NotFound: raise exception.EndpointNotFound(endpoint_id=endpoint_id) @manager.response_truncated def list_endpoints(self, hints=None): return self.driver.list_endpoints(hints or driver_hints.Hints()) @MEMOIZE_COMPUTED_CATALOG def get_v3_catalog(self, user_id, project_id): return self.driver.get_v3_catalog(user_id, project_id) def add_endpoint_to_project(self, endpoint_id, project_id): self.driver.add_endpoint_to_project(endpoint_id, project_id) COMPUTED_CATALOG_REGION.invalidate() def remove_endpoint_from_project(self, endpoint_id, project_id): self.driver.remove_endpoint_from_project(endpoint_id, project_id) COMPUTED_CATALOG_REGION.invalidate() def add_endpoint_group_to_project(self, endpoint_group_id, project_id): self.driver.add_endpoint_group_to_project( endpoint_group_id, project_id ) COMPUTED_CATALOG_REGION.invalidate() def remove_endpoint_group_from_project( self, endpoint_group_id, project_id ): self.driver.remove_endpoint_group_from_project( endpoint_group_id, project_id ) COMPUTED_CATALOG_REGION.invalidate() def delete_endpoint_group_association_by_project(self, project_id): try: self.driver.delete_endpoint_group_association_by_project( project_id ) except exception.NotImplemented: # Some catalog drivers don't support this pass def get_endpoint_groups_for_project(self, project_id): # recover the project endpoint group memberships and for each # membership recover the endpoint group PROVIDERS.resource_api.get_project(project_id) try: refs = self.list_endpoint_groups_for_project(project_id) endpoint_groups = [ self.get_endpoint_group(ref['endpoint_group_id']) for ref in refs ] return endpoint_groups except exception.EndpointGroupNotFound: return [] def get_endpoints_filtered_by_endpoint_group(self, endpoint_group_id): endpoints = self.list_endpoints() filters = self.get_endpoint_group(endpoint_group_id)['filters'] filtered_endpoints = [] for endpoint in endpoints: is_candidate = True for key, value in filters.items(): if endpoint[key] != value: is_candidate = False break if is_candidate: filtered_endpoints.append(endpoint) return filtered_endpoints def list_endpoints_for_project(self, project_id): """List all endpoints associated with a project. :param project_id: project identifier to check :type project_id: string :returns: a list of endpoint ids or an empty list. """ refs = self.driver.list_endpoints_for_project(project_id) filtered_endpoints = {} for ref in refs: try: endpoint = self.get_endpoint(ref['endpoint_id']) filtered_endpoints.update({ref['endpoint_id']: endpoint}) except exception.EndpointNotFound: # remove bad reference from association self.remove_endpoint_from_project( ref['endpoint_id'], project_id ) # need to recover endpoint_groups associated with project # then for each endpoint group return the endpoints. endpoint_groups = self.get_endpoint_groups_for_project(project_id) for endpoint_group in endpoint_groups: endpoint_refs = self.get_endpoints_filtered_by_endpoint_group( endpoint_group['id'] ) # now check if any endpoints for current endpoint group are not # contained in the list of filtered endpoints for endpoint_ref in endpoint_refs: if endpoint_ref['id'] not in filtered_endpoints: filtered_endpoints[endpoint_ref['id']] = endpoint_ref return filtered_endpoints def delete_association_by_endpoint(self, endpoint_id): try: self.driver.delete_association_by_endpoint(endpoint_id) except exception.NotImplemented: # Some catalog drivers don't support this pass def delete_association_by_project(self, project_id): try: self.driver.delete_association_by_project(project_id) except exception.NotImplemented: # Some catalog drivers don't support this pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/catalog/schema.py0000664000175000017500000000540500000000000020554 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _service_properties_type = {'type': 'string', 'minLength': 1, 'maxLength': 255} _region_properties = { 'description': validation.nullable(parameter_types.description), # NOTE(lbragstad): Regions use ID differently. The user can specify the ID # or it will be generated automatically. 'id': {'type': 'string'}, 'parent_region_id': {'type': ['string', 'null']}, } region_create = { 'type': 'object', 'properties': _region_properties, 'additionalProperties': True, # NOTE(lbragstad): No parameters are required for creating regions. } region_update = { 'type': 'object', 'properties': _region_properties, 'minProperties': 1, 'additionalProperties': True, } # Schema for Service v3 _service_properties = { 'enabled': parameter_types.boolean, 'name': parameter_types.name, 'type': _service_properties_type, } service_create = { 'type': 'object', 'properties': _service_properties, 'required': ['type'], 'additionalProperties': True, } service_update = { 'type': 'object', 'properties': _service_properties, 'minProperties': 1, 'additionalProperties': True, } _endpoint_properties = { 'enabled': parameter_types.boolean, 'interface': {'type': 'string', 'enum': ['admin', 'internal', 'public']}, 'region_id': {'type': 'string'}, 'region': {'type': 'string'}, 'service_id': {'type': 'string'}, 'url': parameter_types.url, } endpoint_create = { 'type': 'object', 'properties': _endpoint_properties, 'required': ['interface', 'service_id', 'url'], 'additionalProperties': True, } endpoint_update = { 'type': 'object', 'properties': _endpoint_properties, 'minProperties': 1, 'additionalProperties': True, } _endpoint_group_properties = { 'description': validation.nullable(parameter_types.description), 'filters': {'type': 'object'}, 'name': parameter_types.name, } endpoint_group_create = { 'type': 'object', 'properties': _endpoint_group_properties, 'required': ['name', 'filters'], } endpoint_group_update = { 'type': 'object', 'properties': _endpoint_group_properties, 'minProperties': 1, } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.502114 keystone-26.0.0/keystone/cmd/0000775000175000017500000000000000000000000016067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/__init__.py0000664000175000017500000000000000000000000020166 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/bootstrap.py0000664000175000017500000003602200000000000020461 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log from keystone.common import driver_hints from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.server import backends CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs class Bootstrapper: def __init__(self): backends.load_backends() self.admin_password = None self.admin_username = None self.project_id = None self.project_name = None self.reader_role_id = None self.reader_role_name = 'reader' self.member_role_id = None self.member_role_name = 'member' self.manager_role_id = None self.manager_role_name = 'manager' self.admin_role_id = None self.admin_role_name = None self.service_role_id = None self.service_role_name = 'service' self.region_id = None self.service_name = None self.public_url = None self.internal_url = None self.admin_url = None self.endpoints = {} self.default_domain_id = None self.admin_user_id = None self.immutable_roles = False def bootstrap(self): # NOTE(morganfainberg): Ensure the default domain is in-fact created self._bootstrap_default_domain() self._bootstrap_project() self._bootstrap_admin_user() self._bootstrap_reader_role() self._bootstrap_member_role() self._bootstrap_manager_role() self._bootstrap_admin_role() self._bootstrap_service_role() self._bootstrap_project_role_assignment() self._bootstrap_system_role_assignment() self._bootstrap_region() self._bootstrap_catalog() def _bootstrap_default_domain(self): default_domain = { 'id': CONF.identity.default_domain_id, 'name': 'Default', 'enabled': True, 'description': 'The default domain', } try: PROVIDERS.resource_api.create_domain( domain_id=default_domain['id'], domain=default_domain ) LOG.info('Created domain %s', default_domain['id']) except exception.Conflict: # NOTE(morganfainberg): Domain already exists, continue on. LOG.info( 'Domain %s already exists, skipping creation.', default_domain['id'], ) self.default_domain_id = default_domain['id'] def _bootstrap_project(self): try: project_id = uuid.uuid4().hex project = { 'enabled': True, 'id': project_id, 'domain_id': self.default_domain_id, 'description': 'Bootstrap project for initializing the cloud.', 'name': self.project_name, } PROVIDERS.resource_api.create_project(project_id, project) LOG.info('Created project %s', self.project_name) except exception.Conflict: LOG.info( 'Project %s already exists, skipping creation.', self.project_name, ) project = PROVIDERS.resource_api.get_project_by_name( self.project_name, self.default_domain_id ) self.project_id = project['id'] def _ensure_role_exists(self, role_name): # NOTE(morganfainberg): Do not create the role if it already exists. try: role_id = uuid.uuid4().hex role = {'name': role_name, 'id': role_id} if self.immutable_roles: role['options'] = {'immutable': True} role = PROVIDERS.role_api.create_role(role_id, role) LOG.info('Created role %s', role_name) if not self.immutable_roles: LOG.warning( "Role %(role)s was created as a mutable role. It " "is recommended to make this role immutable by " "adding the 'immutable' resource option to this " "role, or re-running this command without " "--no-immutable-role.", {'role': role_name}, ) return role except exception.Conflict: LOG.info('Role %s exists, skipping creation.', role_name) # NOTE(davechen): There is no backend method to get the role # by name, so build the hints to list the roles and filter by # name instead. hints = driver_hints.Hints() hints.add_filter('name', role_name) # Only return global roles, domain-specific roles can't be used in # system assignments and bootstrap isn't designed to work with # domain-specific roles. hints.add_filter('domain_id', None) # NOTE(lbragstad): Global roles are unique based on name. At this # point we should be safe to return the first, and only, element in # the list. return PROVIDERS.role_api.list_roles(hints)[0] def _ensure_implied_role(self, prior_role_id, implied_role_id): try: PROVIDERS.role_api.create_implied_role( prior_role_id, implied_role_id ) LOG.info( 'Created implied role where %s implies %s', prior_role_id, implied_role_id, ) except exception.Conflict: LOG.info( 'Implied role where %s implies %s exists, skipping creation.', prior_role_id, implied_role_id, ) def _bootstrap_service_role(self): role = self._ensure_role_exists(self.service_role_name) self.service_role_id = role['id'] def _bootstrap_reader_role(self): role = self._ensure_role_exists(self.reader_role_name) self.reader_role_id = role['id'] def _bootstrap_member_role(self): role = self._ensure_role_exists(self.member_role_name) self.member_role_id = role['id'] self._ensure_implied_role(self.member_role_id, self.reader_role_id) def _bootstrap_manager_role(self): role = self._ensure_role_exists(self.manager_role_name) self.manager_role_id = role['id'] self._ensure_implied_role(self.manager_role_id, self.member_role_id) def _bootstrap_admin_role(self): role = self._ensure_role_exists(self.admin_role_name) self.admin_role_id = role['id'] self._ensure_implied_role(self.admin_role_id, self.manager_role_id) # NOTE(dmendiza): deployments older than 2023.2 did not have a # "manager" role, so we need to clean up the old admin -> member # implied role try: PROVIDERS.role_api.delete_implied_role( self.admin_role_id, self.member_role_id ) except exception.ImpliedRoleNotFound: pass def _bootstrap_admin_user(self): # NOTE(morganfainberg): Do not create the user if it already exists. try: user = PROVIDERS.identity_api.get_user_by_name( self.admin_username, self.default_domain_id ) LOG.info( 'User %s already exists, skipping creation.', self.admin_username, ) # If the user is not enabled, re-enable them. This also helps # provide some useful logging output later. update = {} enabled = user['enabled'] if not enabled: update['enabled'] = True try: PROVIDERS.identity_api.driver.authenticate( user['id'], self.admin_password ) except AssertionError: # This means that authentication failed and that we need to # update the user's password. This is going to persist a # revocation event that will make all previous tokens for the # user invalid, which is OK because it falls within the scope # of revocation. If a password changes, we shouldn't be able to # use tokens obtained with an old password. update['password'] = self.admin_password # Only make a call to update the user if the password has changed # or the user was previously disabled. This allows bootstrap to act # as a recovery tool, without having to create a new user. if update: user = PROVIDERS.identity_api.update_user(user['id'], update) LOG.info('Reset password for user %s.', self.admin_username) if not enabled and user['enabled']: # Although we always try to enable the user, this log # message only makes sense if we know that the user was # previously disabled. LOG.info('Enabled user %s.', self.admin_username) except exception.UserNotFound: user = PROVIDERS.identity_api.create_user( user_ref={ 'name': self.admin_username, 'enabled': True, 'domain_id': self.default_domain_id, 'password': self.admin_password, } ) LOG.info('Created user %s', self.admin_username) self.admin_user_id = user['id'] def _bootstrap_project_role_assignment(self): try: PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=self.admin_user_id, project_id=self.project_id, role_id=self.admin_role_id, ) LOG.info( 'Granted role %(role)s on project %(project)s to ' 'user %(username)s.', { 'role': self.admin_role_name, 'project': self.project_name, 'username': self.admin_username, }, ) except exception.Conflict: LOG.info( 'User %(username)s already has role %(role)s on ' 'project %(project)s.', { 'username': self.admin_username, 'role': self.admin_role_name, 'project': self.project_name, }, ) def _bootstrap_system_role_assignment(self): # NOTE(lbragstad): We need to make sure a user has at least one role on # the system. Otherwise it's possible for administrators to lock # themselves out of system-level APIs in their deployment. This is # considered backwards compatible because even if the assignment # exists, it needs to be enabled through oslo.policy configuration # options to be enforced. try: PROVIDERS.assignment_api.create_system_grant_for_user( self.admin_user_id, self.admin_role_id ) LOG.info( 'Granted role %(role)s on the system to user %(username)s.', { 'role': self.admin_role_name, 'username': self.admin_username, }, ) except exception.Conflict: LOG.info( 'User %(username)s already has role %(role)s on ' 'the system.', { 'username': self.admin_username, 'role': self.admin_role_name, }, ) def _bootstrap_region(self): if self.region_id: try: PROVIDERS.catalog_api.create_region( region_ref={'id': self.region_id} ) LOG.info('Created region %s', self.region_id) except exception.Conflict: LOG.info( 'Region %s exists, skipping creation.', self.region_id ) def _bootstrap_catalog(self): if self.public_url or self.admin_url or self.internal_url: hints = driver_hints.Hints() hints.add_filter('type', 'identity') services = PROVIDERS.catalog_api.list_services(hints) if services: service = services[0] hints = driver_hints.Hints() hints.add_filter('service_id', service['id']) if self.region_id: hints.add_filter('region_id', self.region_id) endpoints = PROVIDERS.catalog_api.list_endpoints(hints) else: service_id = uuid.uuid4().hex service = { 'id': service_id, 'name': self.service_name, 'type': 'identity', 'enabled': True, } PROVIDERS.catalog_api.create_service(service_id, service) endpoints = [] self.service_id = service['id'] available_interfaces = {e['interface']: e for e in endpoints} expected_endpoints = { 'public': self.public_url, 'internal': self.internal_url, 'admin': self.admin_url, } for interface, url in expected_endpoints.items(): if not url: # not specified to bootstrap command continue try: endpoint_ref = available_interfaces[interface] except KeyError: endpoint_ref = { 'id': uuid.uuid4().hex, 'interface': interface, 'url': url, 'service_id': self.service_id, 'enabled': True, } if self.region_id: endpoint_ref['region_id'] = self.region_id PROVIDERS.catalog_api.create_endpoint( endpoint_id=endpoint_ref['id'], endpoint_ref=endpoint_ref, ) LOG.info( 'Created %(interface)s endpoint %(url)s', {'interface': interface, 'url': url}, ) else: endpoint_ref['url'] = url PROVIDERS.catalog_api.update_endpoint( endpoint_id=endpoint_ref['id'], endpoint_ref=endpoint_ref, ) LOG.info('%s endpoint updated', interface) self.endpoints[interface] = endpoint_ref['id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/cli.py0000664000175000017500000016400200000000000017213 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import datetime import os import sys import uuid from oslo_config import cfg from oslo_db import exception as db_exception from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils import pbr.version from keystone.cmd import bootstrap from keystone.cmd import doctor from keystone.cmd import idutils from keystone.common import driver_hints from keystone.common import fernet_utils from keystone.common import jwt_utils from keystone.common import sql from keystone.common.sql import upgrades from keystone.common import utils import keystone.conf from keystone.credential.providers import fernet as credential_fernet from keystone import exception from keystone.federation import idp from keystone.federation import utils as mapping_engine from keystone.i18n import _ from keystone.server import backends # We need to define the log level to INFO. Otherwise, when using the # system, we will not be able to see anything. log.set_defaults(default_log_levels="INFO") CONF = keystone.conf.CONF LOG = log.getLogger(__name__) class BaseApp: name: str @classmethod def add_argument_parser(cls, subparsers): parser = subparsers.add_parser(cls.name, help=cls.__doc__) parser.set_defaults(cmd_class=cls) return parser class BootStrap(BaseApp): """Perform the basic bootstrap process.""" name = "bootstrap" def __init__(self): self.bootstrapper = bootstrap.Bootstrapper() @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--bootstrap-username', default='admin', metavar='OS_BOOTSTRAP_USERNAME', help=( 'The username of the initial keystone ' 'user during bootstrap process.' ), ) # NOTE(morganfainberg): See below for ENV Variable that can be used # in lieu of the command-line arguments. parser.add_argument( '--bootstrap-password', default=None, metavar='OS_BOOTSTRAP_PASSWORD', help='The bootstrap user password', ) parser.add_argument( '--bootstrap-project-name', default='admin', metavar='OS_BOOTSTRAP_PROJECT_NAME', help=( 'The initial project created during the ' 'keystone bootstrap process.' ), ) parser.add_argument( '--bootstrap-role-name', default='admin', metavar='OS_BOOTSTRAP_ROLE_NAME', help=( 'The initial role-name created during the ' 'keystone bootstrap process.' ), ) parser.add_argument( '--bootstrap-service-name', default='keystone', metavar='OS_BOOTSTRAP_SERVICE_NAME', help=( 'The initial name for the initial identity ' 'service created during the keystone ' 'bootstrap process.' ), ) parser.add_argument( '--bootstrap-admin-url', metavar='OS_BOOTSTRAP_ADMIN_URL', help=( 'The initial identity admin url created ' 'during the keystone bootstrap process. ' 'e.g. http://127.0.0.1:5000/v3' ), ) parser.add_argument( '--bootstrap-public-url', metavar='OS_BOOTSTRAP_PUBLIC_URL', help=( 'The initial identity public url created ' 'during the keystone bootstrap process. ' 'e.g. http://127.0.0.1:5000/v3' ), ) parser.add_argument( '--bootstrap-internal-url', metavar='OS_BOOTSTRAP_INTERNAL_URL', help=( 'The initial identity internal url created ' 'during the keystone bootstrap process. ' 'e.g. http://127.0.0.1:5000/v3' ), ) parser.add_argument( '--bootstrap-region-id', metavar='OS_BOOTSTRAP_REGION_ID', help=( 'The initial region_id endpoints will be ' 'placed in during the keystone bootstrap ' 'process.' ), ) parser.add_argument( '--immutable-roles', default=True, action='store_true', help=( 'Whether default roles (admin, member, and ' 'reader) should be immutable. This is the ' 'default.' ), ) parser.add_argument( '--no-immutable-roles', default=False, action='store_true', help=( 'Whether default roles (admin, member, and ' 'reader) should be immutable. Immutable ' 'default roles is the default, use this ' 'flag to opt out of immutable default ' 'roles.' ), ) return parser def do_bootstrap(self): """Perform the bootstrap actions. Create bootstrap user, project, and role so that CMS, humans, or scripts can continue to perform initial setup (domains, projects, services, endpoints, etc) of Keystone when standing up a new deployment. """ self.username = ( os.environ.get('OS_BOOTSTRAP_USERNAME') or CONF.command.bootstrap_username ) self.project_name = ( os.environ.get('OS_BOOTSTRAP_PROJECT_NAME') or CONF.command.bootstrap_project_name ) self.role_name = ( os.environ.get('OS_BOOTSTRAP_ROLE_NAME') or CONF.command.bootstrap_role_name ) self.password = ( os.environ.get('OS_BOOTSTRAP_PASSWORD') or CONF.command.bootstrap_password ) self.service_name = ( os.environ.get('OS_BOOTSTRAP_SERVICE_NAME') or CONF.command.bootstrap_service_name ) self.admin_url = ( os.environ.get('OS_BOOTSTRAP_ADMIN_URL') or CONF.command.bootstrap_admin_url ) self.public_url = ( os.environ.get('OS_BOOTSTRAP_PUBLIC_URL') or CONF.command.bootstrap_public_url ) self.internal_url = ( os.environ.get('OS_BOOTSTRAP_INTERNAL_URL') or CONF.command.bootstrap_internal_url ) self.region_id = ( os.environ.get('OS_BOOTSTRAP_REGION_ID') or CONF.command.bootstrap_region_id ) self.service_id = None self.endpoints = None if self.password is None: print( _( 'ERROR: Either --bootstrap-password argument or ' 'OS_BOOTSTRAP_PASSWORD must be set.' ) ) sys.exit(1) self.bootstrapper.admin_password = self.password self.bootstrapper.admin_username = self.username self.bootstrapper.project_name = self.project_name self.bootstrapper.admin_role_name = self.role_name self.bootstrapper.service_name = self.service_name self.bootstrapper.service_id = self.service_id self.bootstrapper.admin_url = self.admin_url self.bootstrapper.public_url = self.public_url self.bootstrapper.internal_url = self.internal_url self.bootstrapper.region_id = self.region_id if CONF.command.no_immutable_roles: self.bootstrapper.immutable_roles = False else: self.bootstrapper.immutable_roles = True self.bootstrapper.bootstrap() self.service_role_id = self.bootstrapper.service_role_id self.reader_role_id = self.bootstrapper.reader_role_id self.member_role_id = self.bootstrapper.member_role_id self.manager_role_id = self.bootstrapper.manager_role_id self.role_id = self.bootstrapper.admin_role_id self.project_id = self.bootstrapper.project_id @classmethod def main(cls): klass = cls() klass.do_bootstrap() class ProjectSetup(BaseApp): """Create project with specified UUID.""" name = 'project_setup' def __init__(self): self.identity = idutils.Identity() @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--project-name', default=None, required=True, help='The name of the keystone project being created.', ) parser.add_argument( '--project-id', default=None, help='The UUID of the keystone project being created.', ) return parser def do_project_setup(self): """Create project with specified UUID.""" self.identity.project_name = CONF.command.project_name self.identity.project_id = CONF.command.project_id self.identity.project_setup() @classmethod def main(cls): klass = cls() klass.do_project_setup() class UserSetup(BaseApp): """Create user with specified UUID.""" name = 'user_setup' def __init__(self): self.identity = idutils.Identity() @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--username', default=None, required=True, help='The username of the keystone user that is being created.', ) parser.add_argument( '--user-password-plain', default=None, required=True, help='The plaintext password for the keystone' ' user that is being created.', ) parser.add_argument( '--user-id', default=None, help='The UUID of the keystone user being created.', ) return parser def do_user_setup(self): """Create user with specified UUID.""" self.identity.user_name = CONF.command.username self.identity.user_password = CONF.command.user_password_plain self.identity.user_id = CONF.command.user_id self.identity.user_setup() @classmethod def main(cls): klass = cls() klass.do_user_setup() class Doctor(BaseApp): """Diagnose common problems with keystone deployments.""" name = 'doctor' @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) return parser @staticmethod def main(): # Return a non-zero exit code if we detect any symptoms. raise SystemExit(doctor.diagnose()) class DbSync(BaseApp): """Sync the database.""" name = 'db_sync' @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( 'version', default=None, nargs='?', help=( 'Migrate the database up to a specified version. ' 'If not provided, db_sync will migrate the database to the ' 'latest known version. ' 'Schema downgrades are not supported.' ), ) group = parser.add_mutually_exclusive_group() group.add_argument( '--expand', default=False, action='store_true', help=( 'Expand the database schema in preparation for data migration.' ), ) group.add_argument( '--migrate', default=False, action='store_true', help=( 'Copy all data that needs to be migrated within the database ' 'ahead of starting the first keystone node upgraded to the ' 'new release. ' 'This command should be run after the --expand command. ' 'Once the --migrate command has completed, you can upgrade ' 'all your keystone nodes to the new release and restart them.' ), ) group.add_argument( '--contract', default=False, action='store_true', help=( 'Remove any database tables and columns that are no longer ' 'required. This command should be run after all keystone ' 'nodes are running the new release.' ), ) group.add_argument( '--check', default=False, action='store_true', help=( 'Check for outstanding database actions that still need to be ' 'executed. This command can be used to verify the condition ' 'of the current database state.' ), ) return parser @classmethod def check_db_sync_status(cls): status = 0 try: expand_version = upgrades.get_db_version(branch='expand') except db_exception.DBMigrationError: LOG.info( 'Your database is not currently under version ' 'control or the database is already controlled. ' 'Your first step is to run `keystone-manage db_sync --expand`.' ) return 2 try: contract_version = upgrades.get_db_version(branch='contract') except db_exception.DBMigrationError: contract_version = None heads = upgrades.get_current_heads() if ( upgrades.EXPAND_BRANCH not in heads or heads[upgrades.EXPAND_BRANCH] != expand_version ): LOG.info( 'Your database is not up to date. Your first step is ' 'to run `keystone-manage db_sync --expand`.' ) status = 2 elif ( upgrades.CONTRACT_BRANCH not in heads or heads[upgrades.CONTRACT_BRANCH] != contract_version ): LOG.info( 'Expand version is ahead of contract. Your next ' 'step is to run `keystone-manage db_sync --contract`.' ) status = 4 else: LOG.info( 'All db_sync commands are upgraded to the same ' 'version and up-to-date.' ) LOG.info( 'Current repository versions:\n' 'Expand: %(expand)s (head: %(expand_head)s)\n' 'Contract: %(contract)s (head: %(contract_head)s)', { 'expand': expand_version, 'expand_head': heads.get(upgrades.EXPAND_BRANCH), 'contract': contract_version, 'contract_head': heads.get(upgrades.CONTRACT_BRANCH), }, ) return status @staticmethod def main(): if CONF.command.check: sys.exit(DbSync.check_db_sync_status()) elif CONF.command.expand: upgrades.expand_schema() elif CONF.command.migrate: upgrades.migrate_data() elif CONF.command.contract: upgrades.contract_schema() else: upgrades.offline_sync_database_to_version(CONF.command.version) class DbVersion(BaseApp): """Print the current migration version of the database.""" name = 'db_version' @staticmethod def main(): print(upgrades.get_db_version()) class ResetLastActive(BaseApp): """Reset null values for all users to current time.""" name = "reset_last_active" @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--force', action='store_true', help='Write to the database without asking for confirmation', ) return parser @staticmethod def main(): if not CONF.command.force: confirm = input( "Security Warning: reset_last_active will update all users\n" "in the database with a NULL value for last_active_at to be\n" "last active at the current time. This includes users that\n" "have never logged in. If your Keystone deployment is\n" "configured to use disable_user_account_days_inactive, these\n" "users will still be enabled and won't be disabled until the\n" "configured amount of time has passed after this command is\n" "run.\n" "Are you sure you want to continue [y/N]? " ) if confirm.lower() not in ('y', 'yes'): raise SystemExit('reset_last_active aborted.') LOG.debug( "Resetting null values to current time %s", timeutils.utcnow(), ) drivers = backends.load_backends() identity_api = drivers['identity_api'] identity_api.reset_last_active() class BasePermissionsSetup(BaseApp): """Common user/group setup for file permissions.""" @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) running_as_root = os.geteuid() == 0 parser.add_argument('--keystone-user', required=running_as_root) parser.add_argument('--keystone-group', required=running_as_root) return parser @staticmethod def get_user_group(): keystone_user_id = None keystone_group_id = None try: a = CONF.command.keystone_user if a: keystone_user_id = utils.get_unix_user(a)[0] except KeyError: raise ValueError("Unknown user '%s' in --keystone-user" % a) try: a = CONF.command.keystone_group if a: keystone_group_id = utils.get_unix_group(a)[0] except KeyError: raise ValueError("Unknown group '%s' in --keystone-group" % a) return keystone_user_id, keystone_group_id @classmethod def initialize_fernet_repository( cls, keystone_user_id, keystone_group_id, config_group=None ): conf_group = getattr(CONF, config_group) futils = fernet_utils.FernetUtils( conf_group.key_repository, conf_group.max_active_keys, config_group ) futils.create_key_directory(keystone_user_id, keystone_group_id) if futils.validate_key_repository(requires_write=True): futils.initialize_key_repository( keystone_user_id, keystone_group_id ) @classmethod def rotate_fernet_repository( cls, keystone_user_id, keystone_group_id, config_group=None ): conf_group = getattr(CONF, config_group) futils = fernet_utils.FernetUtils( conf_group.key_repository, conf_group.max_active_keys, config_group ) if futils.validate_key_repository(requires_write=True): futils.rotate_keys(keystone_user_id, keystone_group_id) class FernetSetup(BasePermissionsSetup): """Setup key repositories for Fernet tokens and auth receipts. This also creates a primary key used for both creating and validating Fernet tokens and auth receipts. To improve security, you should rotate your keys (using keystone-manage fernet_rotate, for example). """ name = 'fernet_setup' @classmethod def main(cls): keystone_user_id, keystone_group_id = cls.get_user_group() cls.initialize_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_tokens' ) if os.path.abspath( CONF.fernet_tokens.key_repository ) != os.path.abspath(CONF.fernet_receipts.key_repository): cls.initialize_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_receipts' ) elif ( CONF.fernet_tokens.max_active_keys != CONF.fernet_receipts.max_active_keys ): # WARNING(adriant): If the directories are the same, # 'max_active_keys' is ignored from fernet_receipts in favor of # fernet_tokens to avoid a potential mismatch. Only if the # directories are different do we create a different one for # receipts, and then respect 'max_active_keys' for receipts. LOG.warning( "Receipt and Token fernet key directories are the same " "but `max_active_keys` is different. Receipt " "`max_active_keys` will be ignored in favor of Token " "`max_active_keys`." ) class FernetRotate(BasePermissionsSetup): """Rotate Fernet encryption keys. This assumes you have already run keystone-manage fernet_setup. A new primary key is placed into rotation, which is used for new tokens. The old primary key is demoted to secondary, which can then still be used for validating tokens. Excess secondary keys (beyond [fernet_tokens] max_active_keys) are revoked. Revoked keys are permanently deleted. A new staged key will be created and used to validate tokens. The next time key rotation takes place, the staged key will be put into rotation as the primary key. Rotating keys too frequently, or with [fernet_tokens] max_active_keys set too low, will cause tokens to become invalid prior to their expiration. """ name = 'fernet_rotate' @classmethod def main(cls): keystone_user_id, keystone_group_id = cls.get_user_group() cls.rotate_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_tokens' ) if os.path.abspath( CONF.fernet_tokens.key_repository ) != os.path.abspath(CONF.fernet_receipts.key_repository): cls.rotate_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_receipts' ) class CreateJWSKeyPair(BasePermissionsSetup): """Create a key pair for signing and validating JWS tokens. This command creates a public and private key pair to use for signing and validating JWS token signatures. The key pair is written to the directory where the command is invoked. """ name = 'create_jws_keypair' @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--force', action='store_true', help=('Forcibly overwrite keys if they already exist'), ) return parser @classmethod def main(cls): current_directory = os.getcwd() private_key_path = os.path.join(current_directory, 'private.pem') public_key_path = os.path.join(current_directory, 'public.pem') if os.path.isfile(private_key_path) and not CONF.command.force: raise SystemExit( _('Private key %(path)s already exists') % {'path': private_key_path} ) if os.path.isfile(public_key_path) and not CONF.command.force: raise SystemExit( _('Public key %(path)s already exists') % {'path': public_key_path} ) jwt_utils.create_jws_keypair(private_key_path, public_key_path) class TokenSetup(BasePermissionsSetup): """Setup a key repository for tokens. This also creates a primary key used for both creating and validating tokens. To improve security, you should rotate your keys (using keystone-manage token_rotate, for example). """ name = 'token_setup' @classmethod def main(cls): keystone_user_id, keystone_group_id = cls.get_user_group() cls.initialize_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_tokens' ) class TokenRotate(BasePermissionsSetup): """Rotate token encryption keys. This assumes you have already run keystone-manage token_setup. A new primary key is placed into rotation, which is used for new tokens. The old primary key is demoted to secondary, which can then still be used for validating tokens. Excess secondary keys (beyond [token] max_active_keys) are revoked. Revoked keys are permanently deleted. A new staged key will be created and used to validate tokens. The next time key rotation takes place, the staged key will be put into rotation as the primary key. Rotating keys too frequently, or with [token] max_active_keys set too low, will cause tokens to become invalid prior to their expiration. """ name = 'token_rotate' @classmethod def main(cls): keystone_user_id, keystone_group_id = cls.get_user_group() cls.rotate_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_tokens' ) class ReceiptSetup(BasePermissionsSetup): """Setup a key repository for auth receipts. This also creates a primary key used for both creating and validating receipts. To improve security, you should rotate your keys (using keystone-manage receipt_rotate, for example). """ name = 'receipt_setup' @classmethod def main(cls): keystone_user_id, keystone_group_id = cls.get_user_group() cls.initialize_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_receipts' ) class ReceiptRotate(BasePermissionsSetup): """Rotate auth receipts encryption keys. This assumes you have already run keystone-manage receipt_setup. A new primary key is placed into rotation, which is used for new receipts. The old primary key is demoted to secondary, which can then still be used for validating receipts. Excess secondary keys (beyond [receipt] max_active_keys) are revoked. Revoked keys are permanently deleted. A new staged key will be created and used to validate receipts. The next time key rotation takes place, the staged key will be put into rotation as the primary key. Rotating keys too frequently, or with [receipt] max_active_keys set too low, will cause receipts to become invalid prior to their expiration. """ name = 'receipt_rotate' @classmethod def main(cls): keystone_user_id, keystone_group_id = cls.get_user_group() cls.rotate_fernet_repository( keystone_user_id, keystone_group_id, 'fernet_receipts' ) class CredentialSetup(BasePermissionsSetup): """Setup a Fernet key repository for credential encryption. The purpose of this command is very similar to `keystone-manage fernet_setup` only the keys included in this repository are for encrypting and decrypting credential secrets instead of token payloads. Keys can be rotated using `keystone-manage credential_rotate`. """ name = 'credential_setup' @classmethod def main(cls): futils = fernet_utils.FernetUtils( CONF.credential.key_repository, credential_fernet.MAX_ACTIVE_KEYS, 'credential', ) keystone_user_id, keystone_group_id = cls.get_user_group() futils.create_key_directory(keystone_user_id, keystone_group_id) if futils.validate_key_repository(requires_write=True): futils.initialize_key_repository( keystone_user_id, keystone_group_id ) class CredentialRotate(BasePermissionsSetup): """Rotate Fernet encryption keys for credential encryption. This assumes you have already run `keystone-manage credential_setup`. A new primary key is placed into rotation only if all credentials are encrypted with the current primary key. If any credentials are encrypted with a secondary key the rotation will abort. This protects against removing a key that is still required to decrypt credentials. Once a key is removed from the repository, it is impossible to recover the original data without restoring from a backup external to keystone (more on backups below). To make sure all credentials are encrypted with the latest primary key, please see the `keystone-manage credential_migrate` command. Since the maximum number of keys in the credential repository is 3, once all credentials are encrypted with the latest primary key we can safely introduce a new primary key. All credentials will still be decryptable since they are all encrypted with the only secondary key in the repository. It is imperitive to understand the importance of backing up keys used to encrypt credentials. In the event keys are overrotated, applying a key repository from backup can help recover otherwise useless credentials. Persisting snapshots of the key repository in secure and encrypted source control, or a dedicated key management system are good examples of encryption key backups. The `keystone-manage credential_rotate` and `keystone-manage credential_migrate` commands are intended to be done in sequence. After performing a rotation, a migration must be done before performing another rotation. This ensures we don't over-rotate encryption keys. """ name = 'credential_rotate' def __init__(self): drivers = backends.load_backends() self.credential_provider_api = drivers['credential_provider_api'] self.credential_api = drivers['credential_api'] def validate_primary_key(self): crypto, keys = credential_fernet.get_multi_fernet_keys() primary_key_hash = credential_fernet.primary_key_hash(keys) credentials = self.credential_api.driver.list_credentials( driver_hints.Hints() ) for credential in credentials: if credential['key_hash'] != primary_key_hash: msg = _( 'Unable to rotate credential keys because not all ' 'credentials are encrypted with the primary key. ' 'Please make sure all credentials have been encrypted ' 'with the primary key using `keystone-manage ' 'credential_migrate`.' ) raise SystemExit(msg) @classmethod def main(cls): futils = fernet_utils.FernetUtils( CONF.credential.key_repository, credential_fernet.MAX_ACTIVE_KEYS, 'credential', ) keystone_user_id, keystone_group_id = cls.get_user_group() if futils.validate_key_repository(requires_write=True): klass = cls() klass.validate_primary_key() futils.rotate_keys(keystone_user_id, keystone_group_id) class CredentialMigrate(BasePermissionsSetup): """Provides the ability to encrypt credentials using a new primary key. This assumes that there is already a credential key repository in place and that the database backend has been upgraded to at least the Newton schema. If the credential repository doesn't exist yet, you can use ``keystone-manage credential_setup`` to create one. """ name = 'credential_migrate' def __init__(self): drivers = backends.load_backends() self.credential_provider_api = drivers['credential_provider_api'] self.credential_api = drivers['credential_api'] def migrate_credentials(self): crypto, keys = credential_fernet.get_multi_fernet_keys() primary_key_hash = credential_fernet.primary_key_hash(keys) # FIXME(lbragstad): We *should* be able to use Hints() to ask only for # credentials that have a key_hash equal to a secondary key hash or # None, but Hints() doesn't seem to honor None values. See # https://bugs.launchpad.net/keystone/+bug/1614154. As a workaround - # we have to ask for *all* credentials and filter them ourselves. credentials = self.credential_api.driver.list_credentials( driver_hints.Hints() ) for credential in credentials: if credential['key_hash'] != primary_key_hash: # If the key_hash isn't None but doesn't match the # primary_key_hash, then we know the credential was encrypted # with a secondary key. Let's decrypt it, and send it through # the update path to re-encrypt it with the new primary key. decrypted_blob = self.credential_provider_api.decrypt( credential['encrypted_blob'] ) cred = {'blob': decrypted_blob} self.credential_api.update_credential(credential['id'], cred) @classmethod def main(cls): # Check to make sure we have a repository that works... futils = fernet_utils.FernetUtils( CONF.credential.key_repository, credential_fernet.MAX_ACTIVE_KEYS, 'credential', ) futils.validate_key_repository(requires_write=True) klass = cls() klass.migrate_credentials() class TrustFlush(BaseApp): """Flush expired and non-expired soft deleted trusts from the backend.""" name = 'trust_flush' @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--project-id', default=None, help=( 'The id of the project of which the ' 'expired or non-expired soft-deleted ' 'trusts is to be purged' ), ) parser.add_argument( '--trustor-user-id', default=None, help=( 'The id of the trustor of which the ' 'expired or non-expired soft-deleted ' 'trusts is to be purged' ), ) parser.add_argument( '--trustee-user-id', default=None, help=( 'The id of the trustee of which the ' 'expired or non-expired soft-deleted ' 'trusts is to be purged' ), ) parser.add_argument( '--date', default=timeutils.utcnow(), help=( 'The date of which the expired or ' 'non-expired soft-deleted trusts older ' 'than that will be purged. The format of ' 'the date to be "DD-MM-YYYY". If no date ' 'is supplied keystone-manage will use the ' 'system clock time at runtime' ), ) return parser @classmethod def main(cls): drivers = backends.load_backends() trust_manager = drivers['trust_api'] if CONF.command.date: if not isinstance(CONF.command.date, datetime.datetime): try: CONF.command.date = datetime.datetime.strptime( CONF.command.date, '%d-%m-%Y' ) except KeyError: raise ValueError( "'%s'Invalid input for date, should be DD-MM-YYYY", CONF.command.date, ) else: LOG.info( "No date is supplied, keystone-manage will use the " "system clock time at runtime " ) trust_manager.flush_expired_and_soft_deleted_trusts( project_id=CONF.command.project_id, trustor_user_id=CONF.command.trustor_user_id, trustee_user_id=CONF.command.trustee_user_id, date=CONF.command.date, ) class MappingPurge(BaseApp): """Purge the mapping table.""" name = 'mapping_purge' @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--all', default=False, action='store_true', help=('Purge all mappings.'), ) parser.add_argument( '--domain-name', default=None, help=('Purge any mappings for the domain specified.'), ) parser.add_argument( '--public-id', default=None, help=('Purge the mapping for the Public ID specified.'), ) parser.add_argument( '--local-id', default=None, help=('Purge the mappings for the Local ID specified.'), ) parser.add_argument( '--type', default=None, choices=['user', 'group'], help=('Purge any mappings for the type specified.'), ) return parser @staticmethod def main(): def validate_options(): # NOTE(henry-nash): It would be nice to use the argparse automated # checking for this validation, but the only way I can see doing # that is to make the default (i.e. if no optional parameters # are specified) to purge all mappings - and that sounds too # dangerous as a default. So we use it in a slightly # unconventional way, where all parameters are optional, but you # must specify at least one. if ( CONF.command.all is False and CONF.command.domain_name is None and CONF.command.public_id is None and CONF.command.local_id is None and CONF.command.type is None ): raise ValueError(_('At least one option must be provided')) if CONF.command.all is True and ( CONF.command.domain_name is not None or CONF.command.public_id is not None or CONF.command.local_id is not None or CONF.command.type is not None ): raise ValueError( _('--all option cannot be mixed with other options') ) def get_domain_id(name): try: return resource_manager.get_domain_by_name(name)['id'] except KeyError: raise ValueError( _( "Unknown domain '%(name)s' specified by " "--domain-name" ) % {'name': name} ) validate_options() drivers = backends.load_backends() resource_manager = drivers['resource_api'] mapping_manager = drivers['id_mapping_api'] # Now that we have validated the options, we know that at least one # option has been specified, and if it was the --all option then this # was the only option specified. # # The mapping dict is used to filter which mappings are purged, so # leaving it empty means purge them all mapping = {} if CONF.command.domain_name is not None: mapping['domain_id'] = get_domain_id(CONF.command.domain_name) if CONF.command.public_id is not None: mapping['public_id'] = CONF.command.public_id if CONF.command.local_id is not None: mapping['local_id'] = CONF.command.local_id if CONF.command.type is not None: mapping['entity_type'] = CONF.command.type mapping_manager.purge_mappings(mapping) DOMAIN_CONF_FHEAD = 'keystone.' DOMAIN_CONF_FTAIL = '.conf' def _domain_config_finder(conf_dir): """Return a generator of all domain config files found in a directory. Domain configs match the filename pattern of 'keystone..conf'. :returns: generator yielding (filename, domain_name) tuples """ LOG.info('Scanning %r for domain config files', conf_dir) for r, d, f in os.walk(conf_dir): for fname in f: if fname.startswith(DOMAIN_CONF_FHEAD) and fname.endswith( DOMAIN_CONF_FTAIL ): if fname.count('.') >= 2: domain_name = fname[ len(DOMAIN_CONF_FHEAD) : -len(DOMAIN_CONF_FTAIL) ] yield (os.path.join(r, fname), domain_name) continue LOG.warning( 'Ignoring file (%s) while scanning domain config directory', fname, ) class DomainConfigUploadFiles: def __init__(self, domain_config_finder=_domain_config_finder): super().__init__() self.load_backends() self._domain_config_finder = domain_config_finder def load_backends(self): drivers = backends.load_backends() self.resource_manager = drivers['resource_api'] self.domain_config_manager = drivers['domain_config_api'] def valid_options(self): """Validate the options, returning True if they are indeed valid. It would be nice to use the argparse automated checking for this validation, but the only way I can see doing that is to make the default (i.e. if no optional parameters are specified) to upload all configuration files - and that sounds too dangerous as a default. So we use it in a slightly unconventional way, where all parameters are optional, but you must specify at least one. """ if CONF.command.all is False and CONF.command.domain_name is None: print( _( 'At least one option must be provided, use either ' '--all or --domain-name' ) ) return False if CONF.command.all is True and CONF.command.domain_name is not None: print( _( 'The --all option cannot be used with ' 'the --domain-name option' ) ) return False return True def _upload_config_to_database(self, file_name, domain_name): """Upload a single config file to the database. :param file_name: the file containing the config options :param domain_name: the domain name :returns: a boolean indicating if the upload succeeded """ try: domain_ref = self.resource_manager.get_domain_by_name(domain_name) except exception.DomainNotFound: print( _( 'Invalid domain name: %(domain)s found in config file ' 'name: %(file)s - ignoring this file.' ) % {'domain': domain_name, 'file': file_name} ) return False if self.domain_config_manager.get_config_with_sensitive_info( domain_ref['id'] ): print( _( 'Domain: %(domain)s already has a configuration ' 'defined - ignoring file: %(file)s.' ) % {'domain': domain_name, 'file': file_name} ) return False sections = {} try: parser = cfg.ConfigParser(file_name, sections) parser.parse() except Exception: # We explicitly don't try and differentiate the error cases, in # order to keep the code in this tool more robust as oslo.config # changes. print( _( 'Error parsing configuration file for domain: %(domain)s, ' 'file: %(file)s.' ) % {'domain': domain_name, 'file': file_name} ) return False try: for group in sections: for option in sections[group]: sections[group][option] = sections[group][option][0] self.domain_config_manager.create_config( domain_ref['id'], sections ) return True except Exception as e: msg = ( 'Error processing config file for domain: ' '%(domain_name)s, file: %(filename)s, error: %(error)s' ) LOG.error( msg, { 'domain_name': domain_name, 'filename': file_name, 'error': e, }, exc_info=True, ) return False def read_domain_configs_from_files(self): """Read configs from file(s) and load into database. The command line parameters have already been parsed and the CONF command option will have been set. It is either set to the name of an explicit domain, or it's None to indicate that we want all domain config files. """ domain_name = CONF.command.domain_name conf_dir = CONF.identity.domain_config_dir if not os.path.exists(conf_dir): print(_('Unable to locate domain config directory: %s') % conf_dir) raise ValueError if domain_name: # Request is to upload the configs for just one domain fname = DOMAIN_CONF_FHEAD + domain_name + DOMAIN_CONF_FTAIL if not self._upload_config_to_database( os.path.join(conf_dir, fname), domain_name ): return False return True success_cnt = 0 failure_cnt = 0 for filename, domain_name in self._domain_config_finder(conf_dir): if self._upload_config_to_database(filename, domain_name): success_cnt += 1 LOG.info('Successfully uploaded domain config %r', filename) else: failure_cnt += 1 if success_cnt == 0: LOG.warning('No domain configs uploaded from %r', conf_dir) if failure_cnt: return False return True def run(self): # First off, let's just check we can talk to the domain database try: self.resource_manager.list_domains(driver_hints.Hints()) except Exception: # It is likely that there is some SQL or other backend error # related to set up print( _( 'Unable to access the keystone database, please check it ' 'is configured correctly.' ) ) raise if not self.valid_options(): return 1 if not self.read_domain_configs_from_files(): return 1 class DomainConfigUpload(BaseApp): """Upload the domain specific configuration files to the database.""" name = 'domain_config_upload' @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--all', default=False, action='store_true', help='Upload contents of all domain specific ' 'configuration files. Either use this option ' 'or use the --domain-name option to choose a ' 'specific domain.', ) parser.add_argument( '--domain-name', default=None, help='Upload contents of the specific ' 'configuration file for the given domain. ' 'Either use this option or use the --all ' 'option to upload contents for all domains.', ) return parser @staticmethod def main(): dcu = DomainConfigUploadFiles() status = dcu.run() if status is not None: sys.exit(status) class SamlIdentityProviderMetadata(BaseApp): """Generate Identity Provider metadata.""" name = 'saml_idp_metadata' @staticmethod def main(): metadata = idp.MetadataGenerator().generate_metadata() print(metadata) class MappingEngineTester(BaseApp): """Execute mapping engine locally.""" name = 'mapping_engine' def __init__(self): super().__init__() self.mapping_id = uuid.uuid4().hex self.rules_pathname = None self.rules = None self.assertion_pathname = None self.assertion = None def read_rules(self, path): self.rules_pathname = path try: with open(path, "rb") as file: self.rules = jsonutils.load(file) except ValueError as e: raise SystemExit( _('Error while parsing rules %(path)s: %(err)s') % {'path': path, 'err': e} ) def read_assertion(self, path): self.assertion_pathname = path try: with open(path) as file: self.assertion = file.read().strip() except OSError as e: raise SystemExit( _("Error while opening file %(path)s: %(err)s") % {'path': path, 'err': e} ) LOG.debug("Assertions loaded: [%s].", self.assertion) def normalize_assertion(self): def split(line, line_num): try: k, v = line.split(':', 1) return k.strip(), v.strip() except ValueError: msg = _( "assertion file %(pathname)s at line %(line_num)d " "expected 'key: value' but found '%(line)s' " "see help for file format" ) raise SystemExit( msg % { 'pathname': self.assertion_pathname, 'line_num': line_num, 'line': line, } ) assertion = self.assertion.splitlines() assertion_dict = {} prefix = CONF.command.prefix for line_num, line in enumerate(assertion, 1): line = line.strip() if line == '': continue k, v = split(line, line_num) if prefix: if k.startswith(prefix): assertion_dict[k] = v else: assertion_dict[k] = v self.assertion = assertion_dict def normalize_rules(self): if isinstance(self.rules, list): self.rules = {'rules': self.rules} @classmethod def main(cls): if CONF.command.engine_debug: mapping_engine.LOG.logger.setLevel('DEBUG') LOG.logger.setLevel('DEBUG') LOG.debug("Debug log level enabled!") else: mapping_engine.LOG.logger.setLevel('WARN') tester = cls() tester.read_rules(CONF.command.rules) tester.normalize_rules() attribute_mapping = tester.rules.copy() if CONF.command.mapping_schema_version: attribute_mapping['schema_version'] = ( CONF.command.mapping_schema_version ) if not attribute_mapping.get('schema_version'): default_schema_version = '1.0' LOG.warning( 'No schema version defined in rules [%s]. Therefore,' 'we will use the default as [%s].', attribute_mapping, default_schema_version, ) attribute_mapping['schema_version'] = default_schema_version LOG.info("Validating Attribute mapping rules [%s].", attribute_mapping) mapping_engine.validate_mapping_structure(attribute_mapping) LOG.info("Attribute mapping rules are valid.") tester.read_assertion(CONF.command.input) tester.normalize_assertion() if CONF.command.engine_debug: print( "Using Rules:\n%s" % (jsonutils.dumps(tester.rules, indent=2)) ) print( "Using Assertion:\n%s" % (jsonutils.dumps(tester.assertion, indent=2)) ) rp = mapping_engine.RuleProcessor( tester.mapping_id, tester.rules['rules'] ) mapped = rp.process(tester.assertion) LOG.info("Result of the attribute mapping processing.") print(jsonutils.dumps(mapped, indent=2)) @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.formatter_class = argparse.RawTextHelpFormatter parser.add_argument( '--rules', default=None, required=True, help=( "Path to the file with " "rules to be executed. " "Content must be\na proper JSON structure, " "with a top-level key 'rules' and\n" "corresponding value being a list." ), ) parser.add_argument( '--input', default=None, required=True, help=( "Path to the file with input attributes. " "The content\nconsists of ':' separated " "parameter names and their values.\nThere " "is only one key-value pair per line. " "A ';' in the\nvalue is a separator and " "then a value is treated as a list.\n" "Example:\n" "\tEMAIL: me@example.com\n" "\tLOGIN: me\n" "\tGROUPS: group1;group2;group3" ), ) parser.add_argument( '--prefix', default=None, help=( "A prefix used for each environment " "variable in the\nassertion. For example, " "all environment variables may have\nthe " "prefix ASDF_." ), ) parser.add_argument( '--engine-debug', default=False, action="store_true", help=("Enable debug messages from the mapping engine."), ) parser.add_argument( '--mapping-schema-version', default=None, required=False, help=( "The override for the schema version of " "the rules that are loaded in the 'rules' " "option of the test CLI." ), ) class MappingPopulate(BaseApp): """Pre-populate entries from domain-specific backends. Running this command is not required. It should only be run right after the LDAP was configured, when many new users were added, or when "mapping_purge" is run. This command will take a while to run. It is perfectly fine for it to run more than several minutes. """ name = "mapping_populate" @classmethod def load_backends(cls): drivers = backends.load_backends() cls.identity_api = drivers['identity_api'] cls.resource_api = drivers['resource_api'] @classmethod def add_argument_parser(cls, subparsers): parser = super().add_argument_parser(subparsers) parser.add_argument( '--domain-name', default=None, required=True, help=( "Name of the domain configured to use " "domain-specific backend" ), ) return parser @classmethod def main(cls): """Process entries for id_mapping_api.""" cls.load_backends() domain_name = CONF.command.domain_name try: domain_id = cls.resource_api.get_domain_by_name(domain_name)['id'] except exception.DomainNotFound: print( _('Invalid domain name: %(domain)s') % {'domain': domain_name} ) return False # We don't actually need to tackle id_mapping_api in order to get # entries there, because list_users does this anyway. That's why it # will be enough to just make the call below. cls.identity_api.list_users(domain_scope=domain_id) CMDS = [ BootStrap, CredentialMigrate, CredentialRotate, CredentialSetup, DbSync, DbVersion, Doctor, DomainConfigUpload, FernetRotate, FernetSetup, CreateJWSKeyPair, MappingPopulate, MappingPurge, MappingEngineTester, ProjectSetup, ReceiptRotate, ReceiptSetup, ResetLastActive, SamlIdentityProviderMetadata, TokenRotate, TokenSetup, TrustFlush, UserSetup, ] def add_command_parsers(subparsers): for cmd in CMDS: cmd.add_argument_parser(subparsers) command_opt = cfg.SubCommandOpt( 'command', title='Commands', help='Available commands', handler=add_command_parsers, ) def main(argv=None, developer_config_file=None): """Main entry point into the keystone-manage CLI utility. :param argv: Arguments supplied via the command line using the ``sys`` standard library. :type argv: list :param developer_config_file: The location of a configuration file normally found in development environments. :type developer_config_file: string """ CONF.register_cli_opt(command_opt) keystone.conf.configure() sql.initialize() keystone.conf.set_default_for_default_log_levels() user_supplied_config_file = False if argv: for argument in argv: if argument == '--config-file': user_supplied_config_file = True if developer_config_file: developer_config_file = [developer_config_file] # NOTE(lbragstad): At this point in processing, the first element of argv # is the binary location of keystone-manage, which oslo.config doesn't need # and is keystone specific. Only pass a list of arguments so that # oslo.config can determine configuration file locations based on user # provided arguments, if present. CONF( args=argv[1:], project='keystone', version=pbr.version.VersionInfo('keystone').version_string(), usage='%(prog)s [' + '|'.join([cmd.name for cmd in CMDS]) + ']', default_config_files=developer_config_file, ) if not CONF.default_config_files and not user_supplied_config_file: LOG.warning('Config file not found, using default configs.') keystone.conf.setup_logging() CONF.command.cmd_class.main() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.506114 keystone-26.0.0/keystone/cmd/doctor/0000775000175000017500000000000000000000000017361 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/__init__.py0000664000175000017500000000534000000000000021474 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.cmd.doctor import caching from keystone.cmd.doctor import credential from keystone.cmd.doctor import database from keystone.cmd.doctor import debug from keystone.cmd.doctor import federation from keystone.cmd.doctor import ldap from keystone.cmd.doctor import security_compliance from keystone.cmd.doctor import tokens from keystone.cmd.doctor import tokens_fernet import keystone.conf from keystone.i18n import _ CONF = keystone.conf.CONF SYMPTOM_PREFIX = 'symptom_' SYMPTOM_MODULES = [ caching, credential, database, debug, federation, ldap, security_compliance, tokens, tokens_fernet, ] def diagnose(): """Report diagnosis for any symptoms we find. Returns true when any symptoms are found, false otherwise. """ symptoms_found = False for symptom in gather_symptoms(): if CONF.debug: # Some symptoms may take a long time to check, so let's keep # curious users posted on our progress as we go. print( 'Checking for %s...' % symptom.__name__[len(SYMPTOM_PREFIX) :].replace('_', ' ') ) # All symptoms are just callables that return true when they match the # condition that they're looking for. When that happens, we need to # inform the user by providing some helpful documentation. if symptom(): # We use this to keep track of our exit condition symptoms_found = True # Ignore 'H701: empty localization string' because we're definitely # passing a string here. Also, we include a line break here to # visually separate the symptom's description from any other # checks -- it provides a better user experience. print( _('\nWARNING: %s') % _(symptom.__doc__) ) # noqa: See comment above. return symptoms_found def gather_symptoms(): """Gather all of the objects in this module that are named symptom_*.""" symptoms = [] for module in SYMPTOM_MODULES: for name in dir(module): if name.startswith(SYMPTOM_PREFIX): symptoms.append(getattr(module, name)) return symptoms ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/caching.py0000664000175000017500000000402200000000000021325 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import cache import keystone.conf CONF = keystone.conf.CONF def symptom_caching_disabled(): """`keystone.conf [cache] enabled` is not enabled. Caching greatly improves the performance of keystone, and it is highly recommended that you enable it. """ return not CONF.cache.enabled def symptom_caching_enabled_without_a_backend(): """Caching is not completely configured. Although caching is enabled in `keystone.conf [cache] enabled`, the default backend is still set to the no-op backend. Instead, configure keystone to point to a real caching backend like memcached. """ return CONF.cache.enabled and CONF.cache.backend == 'dogpile.cache.null' def symptom_connection_to_memcached(): """Memcached isn't reachable. Caching is enabled and the `keystone.conf [cache] backend` option is configured but one or more Memcached servers are not reachable or marked as dead. Please ensure `keystone.conf [cache] memcache_servers` is configured properly. """ memcached_drivers = ['dogpile.cache.memcached', 'oslo_cache.memcache_pool'] if CONF.cache.enabled and CONF.cache.backend in memcached_drivers: cache.configure_cache() cache_stats = cache.CACHE_REGION.actual_backend.client.get_stats() memcached_server_count = len(CONF.cache.memcache_servers) if len(cache_stats) != memcached_server_count: return True else: return False else: return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/credential.py0000664000175000017500000000552100000000000022050 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import fernet_utils as utils import keystone.conf from keystone.credential.providers import fernet as credential_fernet CONF = keystone.conf.CONF def symptom_unique_key_repositories(): """Key repositories for encryption should be unique. Even though credentials are encrypted using the same mechanism as Fernet tokens, they should have key repository locations that are independent of one another. Using the same repository to encrypt credentials and tokens can be considered a security vulnerability because ciphertext from the keys used to encrypt credentials is exposed as the token ID. Sharing a key repository can also lead to premature key removal during key rotation. This could result in indecipherable credentials, rendering them completely useless, or early token invalidation because the key that was used to encrypt the entity has been deleted. Ensure `keystone.conf [credential] key_repository` and `keystone.conf [fernet_tokens] key_repository` are not pointing to the same location. """ return CONF.credential.key_repository == CONF.fernet_tokens.key_repository def symptom_usability_of_credential_fernet_key_repository(): """Credential key repository is not setup correctly. The credential Fernet key repository is expected to be readable by the user running keystone, but not world-readable, because it contains security sensitive secrets. """ fernet_utils = utils.FernetUtils( CONF.credential.key_repository, credential_fernet.MAX_ACTIVE_KEYS, 'credential', ) return ( 'fernet' in CONF.credential.provider and not fernet_utils.validate_key_repository() ) def symptom_keys_in_credential_fernet_key_repository(): """Credential key repository is empty. After configuring keystone to use the Fernet credential provider, you should use `keystone-manage credential_setup` to initially populate your key repository with keys, and periodically rotate your keys with `keystone-manage credential_rotate`. """ fernet_utils = utils.FernetUtils( CONF.credential.key_repository, credential_fernet.MAX_ACTIVE_KEYS, 'credential', ) return ( 'fernet' in CONF.credential.provider and not fernet_utils.load_keys() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/database.py0000664000175000017500000000211100000000000021472 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import keystone.conf CONF = keystone.conf.CONF def symptom_database_connection_is_not_SQLite(): """SQLite is not recommended for production deployments. SQLite does not enforce type checking and has limited support for migrations, making it unsuitable for use in keystone. Please change your `keystone.conf [database] connection` value to point to a supported database driver, such as MySQL. """ # noqa: D403 return ( CONF.database.connection is not None and 'sqlite' in CONF.database.connection ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/debug.py0000664000175000017500000000164500000000000021027 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import keystone.conf CONF = keystone.conf.CONF def symptom_debug_mode_is_enabled(): """Debug mode should be set to False. Debug mode can be used to get more information back when trying to isolate a problem, but it is not recommended to be enabled when running a production environment. Ensure `keystone.conf debug` is set to False """ return CONF.debug ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/federation.py0000664000175000017500000000243000000000000022052 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import keystone.conf CONF = keystone.conf.CONF def symptom_comma_in_SAML_public_certificate_path(): """`[saml] certfile` should not contain a comma (`,`). Because a comma is part of the API between keystone and the external xmlsec1 binary which utilizes the certificate, keystone cannot include a comma in the path to the public certificate file. """ return ',' in CONF.saml.certfile def symptom_comma_in_SAML_private_key_file_path(): """`[saml] certfile` should not contain a comma (`,`). Because a comma is part of the API between keystone and the external xmlsec1 binary which utilizes the key, keystone cannot include a comma in the path to the private key file. """ return ',' in CONF.saml.keyfile ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/ldap.py0000664000175000017500000001303400000000000020654 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import configparser import os import re import keystone.conf CONF = keystone.conf.CONF CONFIG_REGEX = r'^keystone\..*?\.conf$' def symptom_LDAP_user_enabled_emulation_dn_ignored(): """`[ldap] user_enabled_emulation_dn` is being ignored. There is no reason to set this value unless `keystone.conf [ldap] user_enabled_emulation` is also enabled. """ return ( not CONF.ldap.user_enabled_emulation and CONF.ldap.user_enabled_emulation_dn is not None ) def symptom_LDAP_user_enabled_emulation_use_group_config_ignored(): """`[ldap] user_enabled_emulation_use_group_config` is being ignored. There is no reason to set this value unless `keystone.conf [ldap] user_enabled_emulation` is also enabled. """ return ( not CONF.ldap.user_enabled_emulation and CONF.ldap.user_enabled_emulation_use_group_config ) def symptom_LDAP_group_members_are_ids_disabled(): """`[ldap] group_members_are_ids` is not enabled. Because you've set `keystone.conf [ldap] group_objectclass = posixGroup`, we would have also expected you to enable set `keystone.conf [ldap] group_members_are_ids` because we suspect you're using Open Directory, which would contain user ID's in a `posixGroup` rather than LDAP DNs, as other object classes typically would. """ return ( CONF.ldap.group_objectclass == 'posixGroup' and not CONF.ldap.group_members_are_ids ) def symptom_LDAP_file_based_domain_specific_configs(): """Domain specific driver directory is invalid or contains invalid files. If `keystone.conf [identity] domain_specific_drivers_enabled` is set to `true`, then support is enabled for individual domains to have their own identity drivers. The configurations for these can either be stored in a config file or in the database. The case we handle in this symptom is when they are stored in config files, which is indicated by `keystone.conf [identity] domain_configurations_from_database` being set to `false`. """ if ( not CONF.identity.domain_specific_drivers_enabled or CONF.identity.domain_configurations_from_database ): return False invalid_files = [] filedir = CONF.identity.domain_config_dir if os.path.isdir(filedir): for filename in os.listdir(filedir): if not re.match(CONFIG_REGEX, filename): invalid_files.append(filename) if invalid_files: invalid_str = ', '.join(invalid_files) print( 'Warning: The following non-config files were found: %s\n' 'If they are intended to be config files then rename them ' 'to the form of `keystone..conf`. ' 'Otherwise, ignore this warning' % invalid_str ) return True else: print('Could not find directory ', filedir) return True return False def symptom_LDAP_file_based_domain_specific_configs_formatted_correctly(): """LDAP domain specific configuration files are not formatted correctly. If `keystone.conf [identity] domain_specific_drivers_enabled` is set to `true`, then support is enabled for individual domains to have their own identity drivers. The configurations for these can either be stored in a config file or in the database. The case we handle in this symptom is when they are stored in config files, which is indicated by `keystone.conf [identity] domain_configurations_from_database` being set to false. The config files located in the directory specified by `keystone.conf [identity] domain_config_dir` should be in the form of `keystone..conf` and their contents should look something like this: [ldap] url = ldap://ldapservice.thecustomer.com query_scope = sub user_tree_dn = ou=Users,dc=openstack,dc=org user_objectclass = MyOrgPerson user_id_attribute = uid ... """ filedir = CONF.identity.domain_config_dir # NOTE(gagehugo): If domain_specific_drivers_enabled = false or # the value set in domain_config_dir is nonexistent/invalid, then # there is no point in continuing with this check. # symptom_LDAP_file_based_domain_specific_config will catch and # report this issue. if ( not CONF.identity.domain_specific_drivers_enabled or CONF.identity.domain_configurations_from_database or not os.path.isdir(filedir) ): return False invalid_files = [] for filename in os.listdir(filedir): if re.match(CONFIG_REGEX, filename): try: parser = configparser.ConfigParser() parser.read(os.path.join(filedir, filename)) except configparser.Error: invalid_files.append(filename) if invalid_files: invalid_str = ', '.join(invalid_files) print( 'Error: The following config files are formatted incorrectly: ', invalid_str, ) return True return False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/security_compliance.py0000664000175000017500000000456000000000000024001 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import keystone.conf CONF = keystone.conf.CONF def symptom_minimum_password_age_greater_than_expires_days(): """Minimum password age should be less than the password expires days. If the minimum password age is greater than or equal to the password expires days, then users would not be able to change their passwords before they expire. Ensure `[security_compliance] minimum_password_age` is less than the `[security_compliance] password_expires_days`. """ min_age = CONF.security_compliance.minimum_password_age expires = CONF.security_compliance.password_expires_days return (min_age >= expires) if (min_age > 0 and expires > 0) else False def symptom_invalid_password_regular_expression(): """Invalid password regular expression. The password regular expression is invalid and users will not be able to make password changes until this has been corrected. Ensure `[security_compliance] password_regex` is a valid regular expression. """ try: if CONF.security_compliance.password_regex: re.match(CONF.security_compliance.password_regex, 'password') return False except re.error: return True def symptom_password_regular_expression_description_not_set(): """Password regular expression description is not set. The password regular expression is set, but the description is not. Thus, if a user fails the password regular expression, they will not receive a message to explain why their requested password was insufficient. Ensure `[security_compliance] password_regex_description` is set with a description of your password regular expression in a language for humans. """ return ( CONF.security_compliance.password_regex and not CONF.security_compliance.password_regex_description ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/tokens.py0000664000175000017500000000301100000000000021231 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import keystone.conf CONF = keystone.conf.CONF def symptom_unreasonable_max_token_size(): """`keystone.conf [DEFAULT] max_token_size` should be adjusted. This option is intended to protect keystone from unreasonably sized tokens, where "reasonable" is mostly dependent on the `keystone.conf [token] provider` that you're using. If you're using one of the following token providers, then you should set `keystone.conf [DEFAULT] max_token_size` accordingly: - For Fernet, set `keystone.conf [DEFAULT] max_token_size = 255`, because Fernet tokens should never exceed this length in most deployments. However, if you are also using `keystone.conf [identity] driver = ldap`, Fernet tokens may not be built using an efficient packing method, depending on the IDs returned from LDAP, resulting in longer Fernet tokens (adjust your `max_token_size` accordingly). """ return 'fernet' in CONF.token.provider and CONF.max_token_size > 255 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/doctor/tokens_fernet.py0000664000175000017500000000335200000000000022604 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import fernet_utils as utils import keystone.conf CONF = keystone.conf.CONF def symptom_usability_of_Fernet_key_repository(): """Fernet key repository is not setup correctly. The Fernet key repository is expected to be readable by the user running keystone, but not world-readable, because it contains security-sensitive secrets. """ fernet_utils = utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) return ( 'fernet' in CONF.token.provider and not fernet_utils.validate_key_repository() ) def symptom_keys_in_Fernet_key_repository(): """Fernet key repository is empty. After configuring keystone to use the Fernet token provider, you should use `keystone-manage fernet_setup` to initially populate your key repository with keys, and periodically rotate your keys with `keystone-manage fernet_rotate`. """ fernet_utils = utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) return 'fernet' in CONF.token.provider and not fernet_utils.load_keys() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/idutils.py0000664000175000017500000001401700000000000020121 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_log import log from keystone.common import provider_api from keystone.common.validation import validators import keystone.conf from keystone import exception from keystone.identity.mapping_backends import mapping from keystone import notifications from keystone.server import backends CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs class Identity: def __init__(self): backends.load_backends() self.user_id = None self.user_name = None self.user_password = None self.project_id = None self.project_name = None self.default_domain_id = CONF.identity.default_domain_id def project_setup(self): try: project_id = self.project_id if project_id is None: project_id = uuid.uuid4().hex project = { 'enabled': True, 'id': project_id, 'domain_id': self.default_domain_id, 'description': 'Bootstrap project for initializing the cloud.', 'name': self.project_name, } PROVIDERS.resource_api.create_project(project_id, project) LOG.info('Created project %s', self.project_name) except exception.Conflict: LOG.info( 'Project %s already exists, skipping creation.', self.project_name, ) project = PROVIDERS.resource_api.get_project_by_name( self.project_name, self.default_domain_id ) self.project_id = project['id'] def _create_user(self, user_ref, initiator=None): _self = PROVIDERS.identity_api.create_user.__self__ user = user_ref.copy() if 'password' in user: validators.validate_password(user['password']) user['name'] = user['name'].strip() user.setdefault('enabled', True) domain_id = user['domain_id'] PROVIDERS.resource_api.get_domain(domain_id) _self._assert_default_project_id_is_not_domain( user_ref.get('default_project_id') ) # For creating a user, the domain is in the object itself domain_id = user_ref['domain_id'] driver = _self._select_identity_driver(domain_id) user = _self._clear_domain_id_if_domain_unaware(driver, user) # Generate a local ID - in the future this might become a function of # the underlying driver so that it could conform to rules set down by # that particular driver type. user['id'] = self.user_id ref = _self._create_user_with_federated_objects(user, driver) notifications.Audit.created(_self._USER, user['id'], initiator) return _self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER ) def user_setup(self): # NOTE(morganfainberg): Do not create the user if it already exists. try: user = PROVIDERS.identity_api.get_user_by_name( self.user_name, self.default_domain_id ) LOG.info( 'User %s already exists, skipping creation.', self.user_name ) if self.user_id is not None and user['id'] != self.user_id: msg = ( f'user `{self.user_name}` already exists ' f'with `{self.user_id}`' ) raise exception.Conflict(type='user_id', details=msg) # If the user is not enabled, re-enable them. This also helps # provide some useful logging output later. update = {} enabled = user['enabled'] if not enabled: update['enabled'] = True try: PROVIDERS.identity_api.driver.authenticate( user['id'], self.user_password ) except AssertionError: # This means that authentication failed and that we need to # update the user's password. This is going to persist a # revocation event that will make all previous tokens for the # user invalid, which is OK because it falls within the scope # of revocation. If a password changes, we shouldn't be able to # use tokens obtained with an old password. update['password'] = self.user_password # Only make a call to update the user if the password has changed # or the user was previously disabled. This allows bootstrap to act # as a recovery tool, without having to create a new user. if update: user = PROVIDERS.identity_api.update_user(user['id'], update) LOG.info('Reset password for user %s.', self.user_name) if not enabled and user['enabled']: # Although we always try to enable the user, this log # message only makes sense if we know that the user was # previously disabled. LOG.info('Enabled user %s.', self.user_name) except exception.UserNotFound: user = self._create_user( user_ref={ 'name': self.user_name, 'enabled': True, 'domain_id': self.default_domain_id, 'password': self.user_password, } ) LOG.info('Created user %s', self.user_name) self.user_id = user['id'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/manage.py0000664000175000017500000000244000000000000017671 0ustar00zuulzuul00000000000000# # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from keystone.cmd import cli # If ../../keystone/__init__.py exists, add ../../ to Python search path, so # that it will override what happens to be installed in # /usr/(local/)lib/python... possible_topdir = os.path.normpath( os.path.join(os.path.abspath(__file__), os.pardir, os.pardir, os.pardir) ) if os.path.exists(os.path.join(possible_topdir, 'keystone', '__init__.py')): sys.path.insert(0, possible_topdir) # entry point. def main(): developer_config = os.path.join(possible_topdir, 'etc', 'keystone.conf') if not os.path.exists(developer_config): developer_config = None cli.main(argv=sys.argv, developer_config_file=developer_config) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/cmd/status.py0000664000175000017500000000772200000000000017774 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import _checks from oslo_policy import policy from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck from keystone.common import driver_hints from keystone.common import provider_api from keystone.common import rbac_enforcer import keystone.conf from keystone.server import backends CONF = keystone.conf.CONF ENFORCER = rbac_enforcer.RBACEnforcer PROVIDERS = provider_api.ProviderAPIs class Checks(upgradecheck.UpgradeCommands): """Programmable upgrade checks. Each method here should be a programmable check that helps check for things that might cause issues for deployers in the upgrade process. A good example of an upgrade check would be to ensure all roles defined in policies actually exist within the roles backend. """ def check_trust_policies_are_not_empty(self): enforcer = policy.Enforcer(CONF) ENFORCER.register_rules(enforcer) enforcer.load_rules() rules = [ 'identity:list_trusts', 'identity:delete_trust', 'identity:get_trust', 'identity:list_roles_for_trust', 'identity:get_role_for_trust', ] failed_rules = [] for rule in rules: current_rule = enforcer.rules.get(rule) if isinstance(current_rule, _checks.TrueCheck): failed_rules.append(rule) if any(failed_rules): return upgradecheck.Result( upgradecheck.Code.FAILURE, "Policy check string for rules \"%s\" are overridden to " "\"\", \"@\", or []. In the next release, this will cause " "these rules to be fully permissive as hardcoded enforcement " "will be removed. To correct this issue, either stop " "overriding these rules in config to accept the defaults, or " "explicitly set check strings that are not empty." % "\", \"".join(failed_rules), ) return upgradecheck.Result( upgradecheck.Code.SUCCESS, 'Trust policies are safe.' ) def check_default_roles_are_immutable(self): hints = driver_hints.Hints() hints.add_filter('domain_id', None) # Only check global roles roles = PROVIDERS.role_api.list_roles(hints=hints) default_roles = ( 'admin', 'member', 'reader', ) failed_roles = [] for role in [r for r in roles if r['name'] in default_roles]: if not role.get('options', {}).get('immutable'): failed_roles.append(role['name']) if any(failed_roles): return upgradecheck.Result( upgradecheck.Code.FAILURE, "Roles are not immutable: %s" % ", ".join(failed_roles), ) return upgradecheck.Result( upgradecheck.Code.SUCCESS, "Default roles are immutable." ) _upgrade_checks = ( ( "Check trust policies are not empty", check_trust_policies_are_not_empty, ), ( "Check default roles are immutable", check_default_roles_are_immutable, ), ( "Policy File JSON to YAML Migration", (common_checks.check_policy_json, {'conf': CONF}), ), ) def main(): keystone.conf.configure() backends.load_backends() return upgradecheck.main(CONF, 'keystone', Checks()) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.506114 keystone-26.0.0/keystone/common/0000775000175000017500000000000000000000000016614 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/__init__.py0000664000175000017500000000000000000000000020713 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/authorization.py0000664000175000017500000000265100000000000022072 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 - 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # A couple common constants for Auth data # Header used to transmit the auth token AUTH_TOKEN_HEADER = 'X-Auth-Token' # nosec # Header used to transmit the auth receipt AUTH_RECEIPT_HEADER = 'Openstack-Auth-Receipt' # Header used to transmit the subject token SUBJECT_TOKEN_HEADER = 'X-Subject-Token' # nosec # Environment variable used to convey the Keystone auth context, # the user credential used for policy enforcement. AUTH_CONTEXT_ENV = 'KEYSTONE_AUTH_CONTEXT' # Header set by versions of keystonemiddleware that understand application # credential access rules ACCESS_RULES_HEADER = 'OpenStack-Identity-Access-Rules' ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.506114 keystone-26.0.0/keystone/common/cache/0000775000175000017500000000000000000000000017657 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/cache/__init__.py0000664000175000017500000000116000000000000021766 0ustar00zuulzuul00000000000000# Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.cache.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/cache/_context_cache.py0000664000175000017500000000725500000000000023210 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A dogpile.cache proxy that caches objects in the request local cache.""" from dogpile.cache import api from dogpile.cache import proxy from oslo_context import context as oslo_context from oslo_serialization import msgpackutils # Register our new handler. _registry = msgpackutils.default_registry def _register_model_handler(handler_class): """Register a new model handler.""" _registry.frozen = False _registry.register(handler_class(registry=_registry)) _registry.frozen = True class _ResponseCacheProxy(proxy.ProxyBackend): __key_pfx = '_request_cache_%s' def _get_request_context(self): # Return the current context or a new/empty context. return oslo_context.get_current() or oslo_context.RequestContext() def _get_request_key(self, key): return self.__key_pfx % key def _set_local_cache(self, key, value): # Set a serialized version of the returned value in local cache for # subsequent calls to the memoized method. ctx = self._get_request_context() serialize = {'payload': value.payload, 'metadata': value.metadata} setattr(ctx, self._get_request_key(key), msgpackutils.dumps(serialize)) def _get_local_cache(self, key): # Return the version from our local request cache if it exists. ctx = self._get_request_context() try: value = getattr(ctx, self._get_request_key(key)) except AttributeError: return api.NO_VALUE value = msgpackutils.loads(value) return api.CachedValue( payload=value['payload'], metadata=value['metadata'] ) def _delete_local_cache(self, key): # On invalidate/delete remove the value from the local request cache ctx = self._get_request_context() try: delattr(ctx, self._get_request_key(key)) except AttributeError: # nosec # NOTE(morganfainberg): We will simply pass here, this value has # not been cached locally in the request. pass def get(self, key): value = self._get_local_cache(key) if value is api.NO_VALUE: value = self.proxied.get(key) if value is not api.NO_VALUE: self._set_local_cache(key, value) return value def set(self, key, value): self._set_local_cache(key, value) self.proxied.set(key, value) def delete(self, key): self._delete_local_cache(key) self.proxied.delete(key) def get_multi(self, keys): values = {} for key in keys: v = self._get_local_cache(key) if v is not api.NO_VALUE: values[key] = v query_keys = set(keys).difference(set(values.keys())) values.update( dict(zip(query_keys, self.proxied.get_multi(query_keys))) ) return [values[k] for k in keys] def set_multi(self, mapping): for k, v in mapping.items(): self._set_local_cache(k, v) self.proxied.set_multi(mapping) def delete_multi(self, keys): for k in keys: self._delete_local_cache(k) self.proxied.delete_multi(keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/cache/core.py0000664000175000017500000001400300000000000021157 0ustar00zuulzuul00000000000000# Copyright 2013 Metacloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone Caching Layer Implementation.""" import secrets from dogpile.cache import region from dogpile.cache import util from oslo_cache import core as cache from keystone.common.cache import _context_cache import keystone.conf CONF = keystone.conf.CONF class RegionInvalidationManager: REGION_KEY_PREFIX = '<<>>:' def __init__(self, invalidation_region, region_name): self._invalidation_region = invalidation_region self._region_key = self.REGION_KEY_PREFIX + region_name def _generate_new_id(self): return secrets.token_bytes(10) @property def region_id(self): return self._invalidation_region.get_or_create( self._region_key, self._generate_new_id, expiration_time=-1 ) def invalidate_region(self): new_region_id = self._generate_new_id() self._invalidation_region.set(self._region_key, new_region_id) return new_region_id def is_region_key(self, key): return key == self._region_key class DistributedInvalidationStrategy(region.RegionInvalidationStrategy): def __init__(self, region_manager): self._region_manager = region_manager def invalidate(self, hard=None): self._region_manager.invalidate_region() def is_invalidated(self, timestamp): return False def was_hard_invalidated(self): return False def is_hard_invalidated(self, timestamp): return False def was_soft_invalidated(self): return False def is_soft_invalidated(self, timestamp): return False def key_mangler_factory(invalidation_manager, orig_key_mangler): def key_mangler(key): # NOTE(dstanek): Since *all* keys go through the key mangler we # need to make sure the region keys don't get the region_id added. # If it were there would be no way to get to it, making the cache # effectively useless. if not invalidation_manager.is_region_key(key): key = f'{key}:{invalidation_manager.region_id}' if orig_key_mangler: key = orig_key_mangler(key) return key return key_mangler def create_region(name): """Create a dopile region. Wraps oslo_cache.core.create_region. This is used to ensure that the Region is properly patched and allows us to more easily specify a region name. :param str name: The region name :returns: The new region. :rtype: :class:`dogpile.cache.region.CacheRegion` """ region = cache.create_region() region.name = name # oslo.cache doesn't allow this yet return region CACHE_REGION = create_region(name='shared default') CACHE_INVALIDATION_REGION = create_region(name='invalidation region') register_model_handler = _context_cache._register_model_handler def configure_cache(region=None): if region is None: region = CACHE_REGION # NOTE(morganfainberg): running cache.configure_cache_region() # sets region.is_configured, this must be captured before # cache.configure_cache_region is called. configured = region.is_configured cache.configure_cache_region(CONF, region) # Only wrap the region if it was not configured. This should be pushed # to oslo_cache lib somehow. if not configured: region.wrap(_context_cache._ResponseCacheProxy) region_manager = RegionInvalidationManager( CACHE_INVALIDATION_REGION, region.name ) region.key_mangler = key_mangler_factory( region_manager, region.key_mangler ) region.region_invalidator = DistributedInvalidationStrategy( region_manager ) def _sha1_mangle_key(key): """Wrapper for dogpile's sha1_mangle_key. dogpile's sha1_mangle_key function expects an encoded string, so we should take steps to properly handle multiple inputs before passing the key through. NOTE(dstanek): this was copied directly from olso_cache """ try: key = key.encode('utf-8', errors='xmlcharrefreplace') except (UnicodeError, AttributeError): # NOTE(stevemar): if encoding fails just continue anyway. pass return util.sha1_mangle_key(key) def configure_invalidation_region(): if CACHE_INVALIDATION_REGION.is_configured: return # NOTE(dstanek): Configuring this region manually so that we control the # expiration and can ensure that the keys don't expire. config_dict = cache._build_cache_config(CONF) config_dict['expiration_time'] = None # we don't want an expiration CACHE_INVALIDATION_REGION.configure_from_config( config_dict, '%s.' % CONF.cache.config_prefix ) # NOTE(breton): Wrap the cache invalidation region to avoid excessive # calls to memcached, which would result in poor performance. CACHE_INVALIDATION_REGION.wrap(_context_cache._ResponseCacheProxy) # NOTE(morganfainberg): if the backend requests the use of a # key_mangler, we should respect that key_mangler function. If a # key_mangler is not defined by the backend, use the sha1_mangle_key # mangler provided by dogpile.cache. This ensures we always use a fixed # size cache-key. if CACHE_INVALIDATION_REGION.key_mangler is None: CACHE_INVALIDATION_REGION.key_mangler = _sha1_mangle_key def get_memoization_decorator(group, expiration_group=None, region=None): if region is None: region = CACHE_REGION return cache.get_memoization_decorator( CONF, region, group, expiration_group=expiration_group ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/context.py0000664000175000017500000000546600000000000020665 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_context import context as oslo_context REQUEST_CONTEXT_ENV = 'keystone.oslo_request_context' def _prop(name): return property( lambda x: getattr(x, name), lambda x, y: setattr(x, name, y) ) class RequestContext(oslo_context.RequestContext): def __init__(self, **kwargs): self.username = kwargs.pop('username', None) self.project_tag_name = kwargs.pop('project_tag_name', None) self.is_delegated_auth = kwargs.pop('is_delegated_auth', False) self.trust_id = kwargs.pop('trust_id', None) self.trustor_id = kwargs.pop('trustor_id', None) self.trustee_id = kwargs.pop('trustee_id', None) self.oauth_consumer_id = kwargs.pop('oauth_consumer_id', None) self.oauth_access_token_id = kwargs.pop('oauth_access_token_id', None) self.authenticated = kwargs.pop('authenticated', False) super().__init__(**kwargs) def to_policy_values(self): """Add keystone-specific policy values to policy representation. This method converts generic policy values to a dictionary form using the base implementation from oslo_context.context.RequestContext. Afterwards, it is going to pull keystone-specific values off the context and represent them as items in the policy values dictionary. This is because keystone uses default policies that rely on these values, so we need to guarantee they are present during policy enforcement if they are present on the context object. This method is automatically called in oslo_policy.policy.Enforcer.enforce() if oslo.policy knows it's dealing with a context object. """ # TODO(morgan): Rework this to not need an explicit token render as # this is a generally poorly designed behavior. The enforcer should not # rely on a contract of the token's rendered JSON form. This likely # needs reworking of how we handle the context in oslo.policy. Until # this is reworked, it is not possible to merge the token render # function into keystone.api values = super().to_policy_values() values['token'] = self.token_reference['token'] values['domain_id'] = self.domain_id if self.domain_id else None return values ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/driver_hints.py0000664000175000017500000001077400000000000021677 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from keystone import exception from keystone.i18n import _ def truncated(f): """Ensure list truncation is detected in Driver list entity methods. This is designed to wrap Driver list_{entity} methods in order to calculate if the resultant list has been truncated. Provided a limit dict is found in the hints list, we increment the limit by one so as to ask the wrapped function for one more entity than the limit, and then once the list has been generated, we check to see if the original limit has been exceeded, in which case we truncate back to that limit and set the 'truncated' boolean to 'true' in the hints limit dict. """ @functools.wraps(f) def wrapper(self, hints, *args, **kwargs): if not hasattr(hints, 'limit'): raise exception.UnexpectedError( _( 'Cannot truncate a driver call without hints list as ' 'first parameter after self ' ) ) if hints.limit is None or hints.filters: return f(self, hints, *args, **kwargs) # A limit is set, so ask for one more entry than we need list_limit = hints.limit['limit'] hints.set_limit(list_limit + 1) ref_list = f(self, hints, *args, **kwargs) # If we got more than the original limit then trim back the list and # mark it truncated. In both cases, make sure we set the limit back # to its original value. if len(ref_list) > list_limit: hints.set_limit(list_limit, truncated=True) return ref_list[:list_limit] else: hints.set_limit(list_limit) return ref_list return wrapper class Hints: """Encapsulate driver hints for listing entities. Hints are modifiers that affect the return of entities from a list_ operation. They are typically passed to a driver to give direction as to what filtering, pagination or list limiting actions are being requested. It is optional for a driver to action some or all of the list hints, but any filters that it does satisfy must be marked as such by calling removing the filter from the list. A Hint object contains filters, which is a list of dicts that can be accessed publicly. Also it contains a dict called limit, which will indicate the amount of data we want to limit our listing to. If the filter is discovered to never match, then `cannot_match` can be set to indicate that there will not be any matches and the backend work can be short-circuited. Each filter term consists of: * ``name``: the name of the attribute being matched * ``value``: the value against which it is being matched * ``comparator``: the operation, which can be one of ``equals``, ``contains``, ``startswith`` or ``endswith`` * ``case_sensitive``: whether any comparison should take account of case """ def __init__(self): self.limit = None self.filters = list() self.cannot_match = False def add_filter( self, name, value, comparator='equals', case_sensitive=False ): """Add a filter to the filters list, which is publicly accessible.""" self.filters.append( { 'name': name, 'value': value, 'comparator': comparator, 'case_sensitive': case_sensitive, } ) def get_exact_filter_by_name(self, name): """Return a filter key and value if exact filter exists for name.""" for entry in self.filters: if entry['name'] == name and entry['comparator'] == 'equals': return entry def set_limit(self, limit, truncated=False): """Set a limit to indicate the list should be truncated.""" self.limit = {'limit': limit, 'truncated': truncated} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/common/fernet_utils.py0000664000175000017500000002760300000000000021701 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import os import stat from cryptography import fernet from oslo_log import log from keystone.common import utils import keystone.conf LOG = log.getLogger(__name__) CONF = keystone.conf.CONF # NOTE(lbragstad): In the event there are no encryption keys on disk, let's use # a default one until a proper key repository is set up. This allows operators # to gracefully upgrade from Mitaka to Newton without a key repository, # especially in multi-node deployments. The NULL_KEY is specific to credential # encryption only and has absolutely no beneficial purpose outside of easing # upgrades. NULL_KEY = base64.urlsafe_b64encode(b'\x00' * 32) class FernetUtils: def __init__(self, key_repository, max_active_keys, config_group): self.key_repository = key_repository self.max_active_keys = max_active_keys self.config_group = config_group def validate_key_repository(self, requires_write=False): """Validate permissions on the key repository directory.""" # NOTE(lbragstad): We shouldn't need to check if the directory was # passed in as None because we don't set allow_no_values to True. # ensure current user has sufficient access to the key repository is_valid = os.access(self.key_repository, os.R_OK) and os.access( self.key_repository, os.X_OK ) if requires_write: is_valid = is_valid and os.access(self.key_repository, os.W_OK) if not is_valid: LOG.error( 'Either [%(config_group)s] key_repository does not exist ' 'or Keystone does not have sufficient permission to ' 'access it: %(key_repo)s', { 'key_repo': self.key_repository, 'config_group': self.config_group, }, ) else: # ensure the key repository isn't world-readable stat_info = os.stat(self.key_repository) if ( stat_info.st_mode & stat.S_IROTH or stat_info.st_mode & stat.S_IXOTH ): LOG.warning( 'key_repository is world readable: %s', self.key_repository ) return is_valid def create_key_directory( self, keystone_user_id=None, keystone_group_id=None ): """Attempt to create the key directory if it doesn't exist.""" utils.create_directory( self.key_repository, keystone_user_id=keystone_user_id, keystone_group_id=keystone_group_id, ) def _create_new_key(self, keystone_user_id, keystone_group_id): """Securely create a new encryption key. Create a new key that is readable by the Keystone group and Keystone user. To avoid disk write failure, this function will create a tmp key file first, and then rename it as the valid new key. """ self._create_tmp_new_key(keystone_user_id, keystone_group_id) self._become_valid_new_key() def _create_tmp_new_key(self, keystone_user_id, keystone_group_id): """Securely create a new tmp encryption key. This created key is not effective until _become_valid_new_key(). """ key = fernet.Fernet.generate_key() # key is bytes # This ensures the key created is not world-readable old_umask = os.umask(0o177) if keystone_user_id and keystone_group_id: old_egid = os.getegid() old_euid = os.geteuid() os.setegid(keystone_group_id) os.seteuid(keystone_user_id) elif keystone_user_id or keystone_group_id: LOG.warning( 'Unable to change the ownership of the new key without a ' 'keystone user ID and keystone group ID both being provided: ' '%s', self.key_repository, ) # Determine the file name of the new key key_file = os.path.join(self.key_repository, '0.tmp') create_success = False try: with open(key_file, 'w') as f: # convert key to str for the file. f.write(key.decode('utf-8')) f.flush() create_success = True except OSError: LOG.error('Failed to create new temporary key: %s', key_file) raise finally: # After writing the key, set the umask back to it's original value. # Do the same with group and user identifiers if a Keystone group # or user was supplied. os.umask(old_umask) if keystone_user_id and keystone_group_id: os.seteuid(old_euid) os.setegid(old_egid) # Deal with the tmp key file if not create_success and os.access(key_file, os.F_OK): os.remove(key_file) LOG.info('Created a new temporary key: %s', key_file) def _become_valid_new_key(self): """Make the tmp new key a valid new key. The tmp new key must be created by _create_tmp_new_key(). """ tmp_key_file = os.path.join(self.key_repository, '0.tmp') valid_key_file = os.path.join(self.key_repository, '0') os.rename(tmp_key_file, valid_key_file) LOG.info('Become a valid new key: %s', valid_key_file) def _get_key_files(self, key_repo): key_files = dict() keys = dict() for filename in os.listdir(key_repo): path = os.path.join(key_repo, str(filename)) if os.path.isfile(path): with open(path) as key_file: try: key_id = int(filename) except ValueError: # nosec : name is not a number pass else: key = key_file.read() if len(key) == 0: LOG.warning( 'Ignoring empty key found in key ' 'repository: %s', path, ) continue key_files[key_id] = path keys[key_id] = key return key_files, keys def initialize_key_repository( self, keystone_user_id=None, keystone_group_id=None ): """Create a key repository and bootstrap it with a key. :param keystone_user_id: User ID of the Keystone user. :param keystone_group_id: Group ID of the Keystone user. """ # make sure we have work to do before proceeding if os.access(os.path.join(self.key_repository, '0'), os.F_OK): LOG.info('Key repository is already initialized; aborting.') return # bootstrap an existing key self._create_new_key(keystone_user_id, keystone_group_id) # ensure that we end up with a primary and secondary key self.rotate_keys(keystone_user_id, keystone_group_id) def rotate_keys(self, keystone_user_id=None, keystone_group_id=None): """Create a new primary key and revoke excess active keys. :param keystone_user_id: User ID of the Keystone user. :param keystone_group_id: Group ID of the Keystone user. Key rotation utilizes the following behaviors: - The highest key number is used as the primary key (used for encryption). - All keys can be used for decryption. - New keys are always created as key "0," which serves as a placeholder before promoting it to be the primary key. This strategy allows you to safely perform rotation on one node in a cluster, before syncing the results of the rotation to all other nodes (during both key rotation and synchronization, all nodes must recognize all primary keys). """ # read the list of key files key_files, _ = self._get_key_files(self.key_repository) LOG.info( 'Starting key rotation with %(count)s key files: %(list)s', {'count': len(key_files), 'list': list(key_files.values())}, ) # add a tmp new key to the rotation, which will be the *next* primary self._create_tmp_new_key(keystone_user_id, keystone_group_id) # determine the number of the new primary key current_primary_key = max(key_files.keys()) LOG.info('Current primary key is: %s', current_primary_key) new_primary_key = current_primary_key + 1 LOG.info('Next primary key will be: %s', new_primary_key) # promote the next primary key to be the primary os.rename( os.path.join(self.key_repository, '0'), os.path.join(self.key_repository, str(new_primary_key)), ) key_files.pop(0) key_files[new_primary_key] = os.path.join( self.key_repository, str(new_primary_key) ) LOG.info('Promoted key 0 to be the primary: %s', new_primary_key) # rename the tmp key to the real staged key self._become_valid_new_key() max_active_keys = self.max_active_keys # purge excess keys # Note that key_files doesn't contain the new active key that was # created, only the old active keys. keys = sorted(key_files.keys(), reverse=True) while len(keys) > (max_active_keys - 1): index_to_purge = keys.pop() key_to_purge = key_files[index_to_purge] LOG.info('Excess key to purge: %s', key_to_purge) os.remove(key_to_purge) def load_keys(self, use_null_key=False): """Load keys from disk into a list. The first key in the list is the primary key used for encryption. All other keys are active secondary keys that can be used for decrypting tokens. :param use_null_key: If true, a known key containing null bytes will be appended to the list of returned keys. """ if not self.validate_key_repository(): if use_null_key: return [NULL_KEY] return [] # build a dictionary of key_number:encryption_key pairs _, keys = self._get_key_files(self.key_repository) if len(keys) != self.max_active_keys: # Once the number of keys matches max_active_keys, this log entry # is too repetitive to be useful. Also note that it only makes # sense to log this message for tokens since credentials doesn't # have a `max_active_key` configuration option. if self.key_repository == CONF.fernet_tokens.key_repository: msg = ( 'Loaded %(count)d Fernet keys from %(dir)s, but ' '`[fernet_tokens] max_active_keys = %(max)d`; perhaps ' 'there have not been enough key rotations to reach ' '`max_active_keys` yet?' ) LOG.debug( msg, { 'count': len(keys), 'max': self.max_active_keys, 'dir': self.key_repository, }, ) # return the encryption_keys, sorted by key number, descending key_list = [keys[x] for x in sorted(keys.keys(), reverse=True)] if use_null_key: key_list.append(NULL_KEY) return key_list ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/json_home.py0000664000175000017500000001117700000000000021156 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import typing as ty from oslo_serialization import jsonutils from keystone import exception from keystone.i18n import _ def build_v3_resource_relation(resource_name): return ( 'https://docs.openstack.org/api/openstack-identity/3/rel/%s' % resource_name ) def build_v3_extension_resource_relation( extension_name, extension_version, resource_name ): return ( 'https://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel/' '%s' % (extension_name, extension_version, resource_name) ) def build_v3_parameter_relation(parameter_name): return ( 'https://docs.openstack.org/api/openstack-identity/3/param/%s' % parameter_name ) def build_v3_extension_parameter_relation( extension_name, extension_version, parameter_name ): return ( 'https://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/param/' '%s' % (extension_name, extension_version, parameter_name) ) class Parameters: """Relationships for Common parameters.""" DOMAIN_ID = build_v3_parameter_relation('domain_id') ENDPOINT_ID = build_v3_parameter_relation('endpoint_id') GROUP_ID = build_v3_parameter_relation('group_id') POLICY_ID = build_v3_parameter_relation('policy_id') PROJECT_ID = build_v3_parameter_relation('project_id') REGION_ID = build_v3_parameter_relation('region_id') ROLE_ID = build_v3_parameter_relation('role_id') SERVICE_ID = build_v3_parameter_relation('service_id') USER_ID = build_v3_parameter_relation('user_id') TAG_VALUE = build_v3_parameter_relation('tag_value') REGISTERED_LIMIT_ID = build_v3_parameter_relation('registered_limit_id') LIMIT_ID = build_v3_parameter_relation('limit_id') APPLICATION_CRED_ID = build_v3_parameter_relation( 'application_credential_id' ) ACCESS_RULE_ID = build_v3_parameter_relation('access_rule_id') class Status: """Status values supported.""" DEPRECATED = 'deprecated' EXPERIMENTAL = 'experimental' STABLE = 'stable' @classmethod def update_resource_data(cls, resource_data, status): if status is cls.STABLE: # We currently do not add a status if the resource is stable, the # absence of the status property can be taken as meaning that the # resource is stable. return if status is cls.DEPRECATED or status is cls.EXPERIMENTAL: resource_data['hints'] = {'status': status} return raise exception.Error( message=_('Unexpected status requested for JSON Home response, %s') % status ) class JsonHomeResources: """JSON Home resource data.""" __resources: dict = {} __serialized_resource_data: ty.Optional[str] = None @classmethod def _reset(cls): # NOTE(morgan): this will reset all json home resource definitions. # This is only used for testing. cls.__resources.clear() cls.__serialized_resource_data = None @classmethod def append_resource(cls, rel, data): cls.__resources[rel] = data cls.__serialized_resource_data = None @classmethod def resources(cls): # NOTE(morgan): We use a serialized form of the resource data to # ensure that the raw data is not changed by processing, this method # simply populates the serialized store if it is not already populated. # Any changes to this class storage object will result in clearing # the serialized data value. if cls.__serialized_resource_data is None: cls.__serialized_resource_data = jsonutils.dumps(cls.__resources) return {'resources': jsonutils.loads(cls.__serialized_resource_data)} def translate_urls(json_home, new_prefix): """Given a JSON Home document, sticks new_prefix on each of the urls.""" for dummy_rel, resource in json_home['resources'].items(): if 'href' in resource: resource['href'] = new_prefix + resource['href'] elif 'href-template' in resource: resource['href-template'] = new_prefix + resource['href-template'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/jwt_utils.py0000664000175000017500000000316500000000000021217 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.asymmetric import ec from cryptography.hazmat.primitives import serialization def create_jws_keypair(private_key_path, public_key_path): """Create an ECDSA key pair using an secp256r1, or NIST P-256, curve. :param private_key_path: location to save the private key :param public_key_path: location to save the public key """ private_key = ec.generate_private_key(ec.SECP256R1(), default_backend()) with open(private_key_path, 'wb') as f: f.write( private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.PKCS8, encryption_algorithm=serialization.NoEncryption(), ) ) public_key = private_key.public_key() with open(public_key_path, 'wb') as f: f.write( public_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.SubjectPublicKeyInfo, ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/manager.py0000664000175000017500000001677300000000000020616 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import inspect import time import types from oslo_log import log import stevedore from keystone.common import provider_api from keystone.i18n import _ LOG = log.getLogger(__name__) def response_truncated(f): """Truncate the list returned by the wrapped function. This is designed to wrap Manager list_{entity} methods to ensure that any list limits that are defined are passed to the driver layer. If a hints list is provided, the wrapper will insert the relevant limit into the hints so that the underlying driver call can try and honor it. If the driver does truncate the response, it will update the 'truncated' attribute in the 'limit' entry in the hints list, which enables the caller of this function to know if truncation has taken place. If, however, the driver layer is unable to perform truncation, the 'limit' entry is simply left in the hints list for the caller to handle. A _get_list_limit() method is required to be present in the object class hierarchy, which returns the limit for this backend to which we will truncate. If a hints list is not provided in the arguments of the wrapped call then any limits set in the config file are ignored. This allows internal use of such wrapped methods where the entire data set is needed as input for the calculations of some other API (e.g. get role assignments for a given project). """ @functools.wraps(f) def wrapper(self, *args, **kwargs): if kwargs.get('hints') is None: return f(self, *args, **kwargs) list_limit = self.driver._get_list_limit() if list_limit: kwargs['hints'].set_limit(list_limit) return f(self, *args, **kwargs) return wrapper def load_driver(namespace, driver_name, *args): try: driver_manager = stevedore.DriverManager( namespace, driver_name, invoke_on_load=True, invoke_args=args ) return driver_manager.driver except stevedore.exception.NoMatches: msg = _('Unable to find %(name)r driver in %(namespace)r.') raise ImportError(msg % {'name': driver_name, 'namespace': namespace}) class _TraceMeta(type): """A metaclass that, in trace mode, will log entry and exit of methods. This metaclass automatically wraps all methods on the class when instantiated with a decorator that will log entry/exit from a method when keystone is run in Trace log level. """ @staticmethod def wrapper(__f, __classname): __argspec = inspect.getfullargspec(__f) __fn_info = '{module}.{classname}.{funcname}'.format( module=inspect.getmodule(__f).__name__, classname=__classname, funcname=__f.__name__, ) # NOTE(morganfainberg): Omit "cls" and "self" when printing trace logs # the index can be calculated at wrap time rather than at runtime. if __argspec.args and __argspec.args[0] in ('self', 'cls'): __arg_idx = 1 else: __arg_idx = 0 @functools.wraps(__f) def wrapped(*args, **kwargs): __exc = None __t = time.time() __do_trace = LOG.logger.getEffectiveLevel() <= log.TRACE __ret_val = None try: if __do_trace: LOG.trace('CALL => %s', __fn_info) __ret_val = __f(*args, **kwargs) except Exception as e: # nosec __exc = e raise finally: if __do_trace: __subst = { 'run_time': (time.time() - __t), 'passed_args': ', '.join( [ ', '.join([repr(a) for a in args[__arg_idx:]]), ', '.join( [f'{k}={v!r}' for k, v in kwargs.items()] ), ] ), 'function': __fn_info, 'exception': __exc, 'ret_val': __ret_val, } if __exc is not None: __msg = ( '[%(run_time)ss] %(function)s ' '(%(passed_args)s) => raised ' '%(exception)r' ) else: # TODO(morganfainberg): find a way to indicate if this # was a cache hit or cache miss. __msg = ( '[%(run_time)ss] %(function)s' '(%(passed_args)s) => %(ret_val)r' ) LOG.trace(__msg, __subst) return __ret_val return wrapped def __new__(meta, classname, bases, class_dict): final_cls_dict = {} for attr_name, attr in class_dict.items(): # NOTE(morganfainberg): only wrap public instances and methods. if isinstance( attr, types.FunctionType ) and not attr_name.startswith('_'): attr = _TraceMeta.wrapper(attr, classname) final_cls_dict[attr_name] = attr return type.__new__(meta, classname, bases, final_cls_dict) class Manager(metaclass=_TraceMeta): """Base class for intermediary request layer. The Manager layer exists to support additional logic that applies to all or some of the methods exposed by a service that are not specific to the HTTP interface. It also provides a stable entry point to dynamic backends. An example of a probable use case is logging all the calls. """ driver_namespace: str _provides_api: str def __init__(self, driver_name): if self._provides_api is None: raise ValueError( 'Programming Error: All managers must provide an ' 'API that can be referenced by other components ' 'of Keystone.' ) if driver_name is not None: self.driver = load_driver(self.driver_namespace, driver_name) self.__register_provider_api() def __register_provider_api(self): provider_api.ProviderAPIs._register_provider_api( name=self._provides_api, obj=self ) def __getattr__(self, name): """Forward calls to the underlying driver. This method checks for a provider api before forwarding. """ try: return getattr(provider_api.ProviderAPIs, name) except AttributeError: # NOTE(morgan): We didn't find a provider api, move on and # forward to the driver as expected. pass f = getattr(self.driver, name) if callable(f): # NOTE(dstanek): only if this is callable (class or function) # cache this setattr(self, name, f) return f ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/password_hashing.py0000664000175000017500000001336000000000000022534 0ustar00zuulzuul00000000000000# Copyright 2017 Red Hat # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from oslo_log import log import passlib.hash import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) SUPPORTED_HASHERS = frozenset( [ passlib.hash.bcrypt, passlib.hash.bcrypt_sha256, passlib.hash.scrypt, passlib.hash.pbkdf2_sha512, passlib.hash.sha512_crypt, ] ) _HASHER_NAME_MAP = {hasher.name: hasher for hasher in SUPPORTED_HASHERS} # NOTE(notmorgan): Build the list of prefixes. This comprehension builds # a dictionary where the keys are the prefix (all hashedpasswords are # '$$$') so we can do a fast-lookup on the hasher to # use. If has hasher has multiple ident options it is encoded in the # .ident_values attribute whereas hashers that have a single option # ( ) only has the .ident attribute. # NOTE(noonedeadpunk): Though bcrypt_sha256 does define as part of # the metadata, actual indent is represented with a instead. def _get_hash_ident(hashers): for hasher in hashers: if hasattr(hasher, 'prefix'): ident = (getattr(hasher, 'prefix'),) elif hasattr(hasher, 'ident_values'): ident = getattr(hasher, 'ident_values') else: ident = (getattr(hasher, 'ident'),) yield (hasher, ident) _HASHER_IDENT_MAP = { prefix: module for module, prefix in itertools.chain( *[ zip([mod] * len(ident), ident) for mod, ident in _get_hash_ident(SUPPORTED_HASHERS) ] ) } def _get_hasher_from_ident(hashed): try: return _HASHER_IDENT_MAP[hashed[0 : hashed.index('$', 1) + 1]] except KeyError: raise ValueError( _('Unsupported password hashing algorithm ident: %s') % hashed[0 : hashed.index('$', 1) + 1] ) def verify_length_and_trunc_password(password): """Verify and truncate the provided password to the max_password_length. We also need to check that the configured password hashing algorithm does not silently truncate the password. For example, passlib.hash.bcrypt does this: https://passlib.readthedocs.io/en/stable/lib/passlib.hash.bcrypt.html#security-issues """ # When using bcrypt, we limit the password length to 54 to ensure all # bytes are fully mixed. See: # https://passlib.readthedocs.io/en/stable/lib/passlib.hash.bcrypt.html#security-issues BCRYPT_MAX_LENGTH = 72 if ( CONF.identity.password_hash_algorithm == 'bcrypt' # nosec: B105 and CONF.identity.max_password_length > BCRYPT_MAX_LENGTH ): msg = "Truncating password to algorithm specific maximum length %d characters." LOG.warning(msg, BCRYPT_MAX_LENGTH) max_length = BCRYPT_MAX_LENGTH else: max_length = CONF.identity.max_password_length try: password_utf8 = password.encode('utf-8') if len(password_utf8) > max_length: if CONF.strict_password_check: raise exception.PasswordVerificationError(size=max_length) else: msg = "Truncating user password to %d characters." LOG.warning(msg, max_length) return password_utf8[:max_length] else: return password_utf8 except AttributeError: raise exception.ValidationError(attribute='string', target='password') def check_password(password, hashed): """Check that a plaintext password matches hashed. hashpw returns the salt value concatenated with the actual hash value. It extracts the actual salt if this value is then passed as the salt. """ if password is None or hashed is None: return False password_utf8 = verify_length_and_trunc_password(password) hasher = _get_hasher_from_ident(hashed) return hasher.verify(password_utf8, hashed) def hash_user_password(user): """Hash a user dict's password without modifying the passed-in dict.""" password = user.get('password') if password is None: return user return dict(user, password=hash_password(password)) def hash_password(password): """Hash a password. Harder.""" params = {} password_utf8 = verify_length_and_trunc_password(password) conf_hasher = CONF.identity.password_hash_algorithm hasher = _HASHER_NAME_MAP.get(conf_hasher) if hasher is None: raise RuntimeError( _('Password Hash Algorithm %s not found') % CONF.identity.password_hash_algorithm ) if CONF.identity.password_hash_rounds: params['rounds'] = CONF.identity.password_hash_rounds if hasher is passlib.hash.scrypt: if CONF.identity.scrypt_block_size: params['block_size'] = CONF.identity.scrypt_block_size if CONF.identity.scrypt_parallelism: params['parallelism'] = CONF.identity.scrypt_parallelism if CONF.identity.salt_bytesize: params['salt_size'] = CONF.identity.salt_bytesize if hasher is passlib.hash.pbkdf2_sha512: if CONF.identity.salt_bytesize: params['salt_size'] = CONF.identity.salt_bytesize return hasher.using(**params).hash(password_utf8) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.510114 keystone-26.0.0/keystone/common/policies/0000775000175000017500000000000000000000000020423 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/__init__.py0000664000175000017500000000645000000000000022541 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from keystone.common.policies import access_rule from keystone.common.policies import access_token from keystone.common.policies import application_credential from keystone.common.policies import auth from keystone.common.policies import base from keystone.common.policies import consumer from keystone.common.policies import credential from keystone.common.policies import domain from keystone.common.policies import domain_config from keystone.common.policies import ec2_credential from keystone.common.policies import endpoint from keystone.common.policies import endpoint_group from keystone.common.policies import grant from keystone.common.policies import group from keystone.common.policies import identity_provider from keystone.common.policies import implied_role from keystone.common.policies import limit from keystone.common.policies import mapping from keystone.common.policies import policy from keystone.common.policies import policy_association from keystone.common.policies import project from keystone.common.policies import project_endpoint from keystone.common.policies import protocol from keystone.common.policies import region from keystone.common.policies import registered_limit from keystone.common.policies import revoke_event from keystone.common.policies import role from keystone.common.policies import role_assignment from keystone.common.policies import service from keystone.common.policies import service_provider from keystone.common.policies import token from keystone.common.policies import token_revocation from keystone.common.policies import trust from keystone.common.policies import user def list_rules(): return itertools.chain( base.list_rules(), access_rule.list_rules(), access_token.list_rules(), application_credential.list_rules(), auth.list_rules(), consumer.list_rules(), credential.list_rules(), domain.list_rules(), domain_config.list_rules(), ec2_credential.list_rules(), endpoint.list_rules(), endpoint_group.list_rules(), grant.list_rules(), group.list_rules(), identity_provider.list_rules(), implied_role.list_rules(), limit.list_rules(), mapping.list_rules(), policy.list_rules(), policy_association.list_rules(), project.list_rules(), project_endpoint.list_rules(), protocol.list_rules(), region.list_rules(), registered_limit.list_rules(), revoke_event.list_rules(), role.list_rules(), role_assignment.list_rules(), service.list_rules(), service_provider.list_rules(), token_revocation.list_rules(), token.list_rules(), trust.list_rules(), user.list_rules(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/access_rule.py0000664000175000017500000000400200000000000023261 0ustar00zuulzuul00000000000000# Copyright 2019 SUSE LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from keystone.common.policies import base collection_path = '/v3/users/{user_id}/access_rules' resource_path = collection_path + '/{access_rule_id}' SYSTEM_READER_OR_OWNER = ( '(' + base.SYSTEM_READER + ') or user_id:%(target.user.id)s' ) SYSTEM_ADMIN_OR_OWNER = ( '(' + base.SYSTEM_ADMIN + ') or user_id:%(target.user.id)s' ) access_rule_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_access_rule', check_str=SYSTEM_READER_OR_OWNER, scope_types=['system', 'project'], description='Show access rule details.', operations=[ {'path': resource_path, 'method': 'GET'}, {'path': resource_path, 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_access_rules', check_str=SYSTEM_READER_OR_OWNER, scope_types=['system', 'project'], description='List access rules for a user.', operations=[ {'path': collection_path, 'method': 'GET'}, {'path': collection_path, 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_access_rule', check_str=SYSTEM_ADMIN_OR_OWNER, scope_types=['system', 'project'], description='Delete an access_rule.', operations=[{'path': resource_path, 'method': 'DELETE'}], ), ] def list_rules(): return access_rule_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/access_token.py0000664000175000017500000000702700000000000023444 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from keystone.common.policies import base access_token_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'authorize_request_token', check_str=base.RULE_ADMIN_REQUIRED, # Since access tokens require a request token and request tokens # require a project, it makes sense to have a project-scoped token in # order to access these APIs. scope_types=['project'], description='Authorize OAUTH1 request token.', operations=[ { 'path': '/v3/OS-OAUTH1/authorize/{request_token_id}', 'method': 'PUT', } ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_access_token', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['project'], description='Get OAUTH1 access token for user by access token ID.', operations=[ { 'path': ( '/v3/users/{user_id}/OS-OAUTH1/access_tokens/' '{access_token_id}' ), 'method': 'GET', } ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_access_token_role', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['project'], description='Get role for user OAUTH1 access token.', operations=[ { 'path': ( '/v3/users/{user_id}/OS-OAUTH1/access_tokens/' '{access_token_id}/roles/{role_id}' ), 'method': 'GET', } ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_access_tokens', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['project'], description='List OAUTH1 access tokens for user.', operations=[ { 'path': '/v3/users/{user_id}/OS-OAUTH1/access_tokens', 'method': 'GET', } ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_access_token_roles', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['project'], description='List OAUTH1 access token roles.', operations=[ { 'path': ( '/v3/users/{user_id}/OS-OAUTH1/access_tokens/' '{access_token_id}/roles' ), 'method': 'GET', } ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_access_token', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['project'], description='Delete OAUTH1 access token.', operations=[ { 'path': ( '/v3/users/{user_id}/OS-OAUTH1/access_tokens/' '{access_token_id}' ), 'method': 'DELETE', } ], ), ] def list_rules(): return access_token_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/application_credential.py0000664000175000017500000000656300000000000025504 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base collection_path = '/v3/users/{user_id}/application_credentials' resource_path = collection_path + '/{application_credential_id}' DEPRECATED_REASON = ( "The application credential API is now aware of system scope and default " "roles." ) deprecated_list_application_credentials_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'list_application_credentials', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_application_credentials_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'get_application_credential', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_application_credentials_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'delete_application_credential', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) application_credential_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_application_credential', check_str=base.ADMIN_OR_SYSTEM_READER_OR_OWNER, scope_types=['system', 'project'], description='Show application credential details.', operations=[ {'path': resource_path, 'method': 'GET'}, {'path': resource_path, 'method': 'HEAD'}, ], deprecated_rule=deprecated_get_application_credentials_for_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_application_credentials', check_str=base.ADMIN_OR_SYSTEM_READER_OR_OWNER, scope_types=['system', 'project'], description='List application credentials for a user.', operations=[ {'path': collection_path, 'method': 'GET'}, {'path': collection_path, 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_application_credentials_for_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_application_credential', check_str=base.RULE_OWNER, scope_types=['project'], description='Create an application credential.', operations=[{'path': collection_path, 'method': 'POST'}], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_application_credential', check_str=base.RULE_ADMIN_OR_OWNER, scope_types=['system', 'project'], description='Delete an application credential.', operations=[{'path': resource_path, 'method': 'DELETE'}], deprecated_rule=deprecated_delete_application_credentials_for_user, ), ] def list_rules(): return application_credential_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/auth.py0000664000175000017500000000403400000000000021737 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from keystone.common.policies import base auth_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_auth_catalog', check_str='', description='Get service catalog.', operations=[ {'path': '/v3/auth/catalog', 'method': 'GET'}, {'path': '/v3/auth/catalog', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_auth_projects', check_str='', description=( 'List all projects a user has access to via role assignments.' ), operations=[ {'path': '/v3/auth/projects', 'method': 'GET'}, {'path': '/v3/auth/projects', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_auth_domains', check_str='', description=( 'List all domains a user has access to via role assignments.' ), operations=[ {'path': '/v3/auth/domains', 'method': 'GET'}, {'path': '/v3/auth/domains', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_auth_system', check_str='', description='List systems a user has access to via role assignments.', operations=[ {'path': '/v3/auth/system', 'method': 'GET'}, {'path': '/v3/auth/system', 'method': 'HEAD'}, ], ), ] def list_rules(): return auth_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/base.py0000664000175000017500000001102000000000000021701 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy IDENTITY = 'identity:%s' RULE_ADMIN_REQUIRED = 'rule:admin_required' RULE_OWNER = 'user_id:%(user_id)s' RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner' RULE_ADMIN_OR_CREDENTIAL_OWNER = ( 'rule:admin_required or ' '(rule:owner and user_id:%(target.credential.user_id)s)' ) RULE_ADMIN_OR_TARGET_DOMAIN = ( 'rule:admin_required or token.project.domain.id:%(target.domain.id)s' ) RULE_ADMIN_OR_TARGET_PROJECT = ( 'rule:admin_required or project_id:%(target.project.id)s' ) RULE_ADMIN_OR_TOKEN_SUBJECT = 'rule:admin_or_token_subject' # nosec RULE_REVOKE_EVENT_OR_ADMIN = 'rule:revoke_event_or_admin' RULE_SERVICE_ADMIN_OR_TOKEN_SUBJECT = ( 'rule:service_admin_or_token_subject' # nosec ) RULE_SERVICE_OR_ADMIN = 'rule:service_or_admin' RULE_TRUST_OWNER = 'user_id:%(trust.trustor_user_id)s' # We are explicitly setting system_scope:all in these check strings because # they provide backwards compatibility in the event a deployment sets # ``keystone.conf [oslo_policy] enforce_scope = False``, which the default. # Otherwise, this might open up APIs to be more permissive unintentionally if a # deployment isn't enforcing scope. For example, the identity:get_endpoint # policy might be ``rule:admin_required`` today and eventually ``role:reader`` # enforcing system scoped tokens. Until enforce_scope=True by default, it would # be possible for users with the ``reader`` role on a project to access an API # traditionally reserved for system administrators. Once keystone defaults # ``keystone.conf [oslo_policy] enforce_scope=True``, the ``system_scope:all`` # bits of these check strings can be removed since that will be handled # automatically by scope_types in oslo.policy's RuleDefault objects. SYSTEM_READER = 'role:reader and system_scope:all' SYSTEM_ADMIN = 'role:admin and system_scope:all' DOMAIN_READER = 'role:reader and domain_id:%(target.domain_id)s' RULE_SYSTEM_ADMIN_OR_OWNER = '(' + SYSTEM_ADMIN + ') or rule:owner' ADMIN_OR_SYSTEM_READER_OR_OWNER = ( '(' + RULE_ADMIN_REQUIRED + ') or ' '(' + SYSTEM_READER + ') or rule:owner' ) RULE_ADMIN_OR_SYSTEM_READER = 'rule:admin_required or (' + SYSTEM_READER + ')' # Credential and EC2 Credential policies ADMIN_OR_SYSTEM_READER_OR_CRED_OWNER = ( '(' + RULE_ADMIN_REQUIRED + ') or ' '(' + SYSTEM_READER + ') ' 'or user_id:%(target.credential.user_id)s' ) ADMIN_OR_CRED_OWNER = ( '(' + RULE_ADMIN_REQUIRED + ') ' 'or user_id:%(target.credential.user_id)s' ) # This rule template is meant for restricting role assignments done by domain # managers. It is intended to restrict the roles a domain manager can assign or # revoke to a sensible default set while allowing overrides via policy file by # adjusting the corresponding rule definition. # For the default, any roles with higher-level privileges than "manager" (e.g. # "admin") must be omitted to avoid privilege escalation. DOMAIN_MANAGER_ALLOWED_ROLES = ( "'manager':%(target.role.name)s or " "'member':%(target.role.name)s or " "'reader':%(target.role.name)s" ) rules = [ policy.RuleDefault( name='admin_required', check_str='role:admin or is_admin:1' ), policy.RuleDefault(name='service_role', check_str='role:service'), policy.RuleDefault( name='service_or_admin', check_str='rule:admin_required or rule:service_role', ), policy.RuleDefault(name='owner', check_str=RULE_OWNER), policy.RuleDefault( name='admin_or_owner', check_str='rule:admin_required or rule:owner' ), policy.RuleDefault( name='token_subject', check_str='user_id:%(target.token.user_id)s' ), policy.RuleDefault( name='admin_or_token_subject', check_str='rule:admin_required or rule:token_subject', ), policy.RuleDefault( name='service_admin_or_token_subject', check_str='rule:service_or_admin or rule:token_subject', ), policy.RuleDefault( name="domain_managed_target_role", check_str=DOMAIN_MANAGER_ALLOWED_ROLES, ), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/consumer.py0000664000175000017500000000755700000000000022646 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The OAUTH1 consumer API is now aware of system scope and default roles." ) deprecated_get_consumer = policy.DeprecatedRule( name=base.IDENTITY % 'get_consumer', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_consumers = policy.DeprecatedRule( name=base.IDENTITY % 'list_consumers', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_consumer = policy.DeprecatedRule( name=base.IDENTITY % 'create_consumer', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_update_consumer = policy.DeprecatedRule( name=base.IDENTITY % 'update_consumer', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_consumer = policy.DeprecatedRule( name=base.IDENTITY % 'delete_consumer', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) consumer_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_consumer', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Show OAUTH1 consumer details.', operations=[ {'path': '/v3/OS-OAUTH1/consumers/{consumer_id}', 'method': 'GET'} ], deprecated_rule=deprecated_get_consumer, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_consumers', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List OAUTH1 consumers.', operations=[{'path': '/v3/OS-OAUTH1/consumers', 'method': 'GET'}], deprecated_rule=deprecated_list_consumers, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_consumer', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create OAUTH1 consumer.', operations=[{'path': '/v3/OS-OAUTH1/consumers', 'method': 'POST'}], deprecated_rule=deprecated_create_consumer, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_consumer', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update OAUTH1 consumer.', operations=[ { 'path': '/v3/OS-OAUTH1/consumers/{consumer_id}', 'method': 'PATCH', } ], deprecated_rule=deprecated_update_consumer, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_consumer', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete OAUTH1 consumer.', operations=[ { 'path': '/v3/OS-OAUTH1/consumers/{consumer_id}', 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_consumer, ), ] def list_rules(): return consumer_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/credential.py0000664000175000017500000000750200000000000023113 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The credential API is now aware of system scope and default roles." ) deprecated_get_credential = policy.DeprecatedRule( name=base.IDENTITY % 'get_credential', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_credentials = policy.DeprecatedRule( name=base.IDENTITY % 'list_credentials', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_credential = policy.DeprecatedRule( name=base.IDENTITY % 'create_credential', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_credential = policy.DeprecatedRule( name=base.IDENTITY % 'update_credential', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_credential = policy.DeprecatedRule( name=base.IDENTITY % 'delete_credential', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) credential_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_credential', check_str=base.ADMIN_OR_SYSTEM_READER_OR_CRED_OWNER, scope_types=['system', 'domain', 'project'], description='Show credentials details.', operations=[ {'path': '/v3/credentials/{credential_id}', 'method': 'GET'} ], deprecated_rule=deprecated_get_credential, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_credentials', check_str=base.ADMIN_OR_SYSTEM_READER_OR_CRED_OWNER, scope_types=['system', 'domain', 'project'], description='List credentials.', operations=[{'path': '/v3/credentials', 'method': 'GET'}], deprecated_rule=deprecated_list_credentials, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_credential', check_str=base.ADMIN_OR_CRED_OWNER, scope_types=['system', 'domain', 'project'], description='Create credential.', operations=[{'path': '/v3/credentials', 'method': 'POST'}], deprecated_rule=deprecated_create_credential, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_credential', check_str=base.ADMIN_OR_CRED_OWNER, scope_types=['system', 'domain', 'project'], description='Update credential.', operations=[ {'path': '/v3/credentials/{credential_id}', 'method': 'PATCH'} ], deprecated_rule=deprecated_update_credential, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_credential', check_str=base.ADMIN_OR_CRED_OWNER, scope_types=['system', 'domain', 'project'], description='Delete credential.', operations=[ {'path': '/v3/credentials/{credential_id}', 'method': 'DELETE'} ], deprecated_rule=deprecated_delete_credential, ), ] def list_rules(): return credential_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/domain.py0000664000175000017500000001010600000000000022242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The domain API is now aware of system scope and default roles." ) deprecated_list_domains = policy.DeprecatedRule( name=base.IDENTITY % 'list_domains', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_get_domain = policy.DeprecatedRule( name=base.IDENTITY % 'get_domain', check_str=base.RULE_ADMIN_OR_TARGET_DOMAIN, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_domain = policy.DeprecatedRule( name=base.IDENTITY % 'update_domain', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_domain = policy.DeprecatedRule( name=base.IDENTITY % 'create_domain', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_domain = policy.DeprecatedRule( name=base.IDENTITY % 'delete_domain', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) ADMIN_OR_SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER = ( base.RULE_ADMIN_REQUIRED + ' or ' '(role:reader and system_scope:all) or ' 'token.domain.id:%(target.domain.id)s or ' 'token.project.domain.id:%(target.domain.id)s' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER = ( base.RULE_ADMIN_OR_SYSTEM_READER + ' or ' '(role:reader and domain_id:%(target.domain.id)s)' ) domain_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_domain', # NOTE(lbragstad): This policy allows system, domain, and # project-scoped tokens. check_str=ADMIN_OR_SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER, scope_types=['system', 'domain', 'project'], description='Show domain details.', operations=[{'path': '/v3/domains/{domain_id}', 'method': 'GET'}], deprecated_rule=deprecated_get_domain, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_domains', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description='List domains.', operations=[{'path': '/v3/domains', 'method': 'GET'}], deprecated_rule=deprecated_list_domains, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_domain', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create domain.', operations=[{'path': '/v3/domains', 'method': 'POST'}], deprecated_rule=deprecated_create_domain, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_domain', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update domain.', operations=[{'path': '/v3/domains/{domain_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_domain, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_domain', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete domain.', operations=[{'path': '/v3/domains/{domain_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_domain, ), ] def list_rules(): return domain_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/domain_config.py0000664000175000017500000001633700000000000023603 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The domain config API is now aware of system scope and default roles." ) deprecated_get_domain_config = policy.DeprecatedRule( name=base.IDENTITY % 'get_domain_config', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_domain_config_default = policy.DeprecatedRule( name=base.IDENTITY % 'get_domain_config_default', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_domain_config = policy.DeprecatedRule( name=base.IDENTITY % 'create_domain_config', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_update_domain_config = policy.DeprecatedRule( name=base.IDENTITY % 'update_domain_config', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_domain_config = policy.DeprecatedRule( name=base.IDENTITY % 'delete_domain_config', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) domain_config_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_domain_config', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create domain configuration.', operations=[ {'path': '/v3/domains/{domain_id}/config', 'method': 'PUT'} ], deprecated_rule=deprecated_create_domain_config, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_domain_config', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description=( 'Get the entire domain configuration for a domain, an ' 'option group within a domain, or a specific ' 'configuration option within a group for a domain.' ), operations=[ {'path': '/v3/domains/{domain_id}/config', 'method': 'GET'}, {'path': '/v3/domains/{domain_id}/config', 'method': 'HEAD'}, { 'path': '/v3/domains/{domain_id}/config/{group}', 'method': 'GET', }, { 'path': '/v3/domains/{domain_id}/config/{group}', 'method': 'HEAD', }, { 'path': '/v3/domains/{domain_id}/config/{group}/{option}', 'method': 'GET', }, { 'path': '/v3/domains/{domain_id}/config/{group}/{option}', 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_domain_config, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_security_compliance_domain_config', check_str='', # This should be accessible to anyone with a valid token, regardless of # system-scope or project-scope. scope_types=['system', 'domain', 'project'], description=( 'Get security compliance domain configuration for ' 'either a domain or a specific option in a domain.' ), operations=[ { 'path': '/v3/domains/{domain_id}/config/security_compliance', 'method': 'GET', }, { 'path': '/v3/domains/{domain_id}/config/security_compliance', 'method': 'HEAD', }, { 'path': ( '/v3/domains/{domain_id}/config/' 'security_compliance/{option}' ), 'method': 'GET', }, { 'path': ( '/v3/domains/{domain_id}/config/' 'security_compliance/{option}' ), 'method': 'HEAD', }, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_domain_config', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description=( 'Update domain configuration for either a domain, ' 'specific group or a specific option in a group.' ), operations=[ {'path': '/v3/domains/{domain_id}/config', 'method': 'PATCH'}, { 'path': '/v3/domains/{domain_id}/config/{group}', 'method': 'PATCH', }, { 'path': '/v3/domains/{domain_id}/config/{group}/{option}', 'method': 'PATCH', }, ], deprecated_rule=deprecated_update_domain_config, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_domain_config', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description=( 'Delete domain configuration for either a domain, ' 'specific group or a specific option in a group.' ), operations=[ {'path': '/v3/domains/{domain_id}/config', 'method': 'DELETE'}, { 'path': '/v3/domains/{domain_id}/config/{group}', 'method': 'DELETE', }, { 'path': '/v3/domains/{domain_id}/config/{group}/{option}', 'method': 'DELETE', }, ], deprecated_rule=deprecated_delete_domain_config, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_domain_config_default', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description=( 'Get domain configuration default for either a domain, ' 'specific group or a specific option in a group.' ), operations=[ {'path': '/v3/domains/config/default', 'method': 'GET'}, {'path': '/v3/domains/config/default', 'method': 'HEAD'}, {'path': '/v3/domains/config/{group}/default', 'method': 'GET'}, {'path': '/v3/domains/config/{group}/default', 'method': 'HEAD'}, { 'path': '/v3/domains/config/{group}/{option}/default', 'method': 'GET', }, { 'path': '/v3/domains/config/{group}/{option}/default', 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_domain_config_default, ), ] def list_rules(): return domain_config_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/ec2_credential.py0000664000175000017500000000717100000000000023646 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The EC2 credential API is now aware of system scope and default roles." ) deprecated_ec2_get_credential = policy.DeprecatedRule( name=base.IDENTITY % 'ec2_get_credential', check_str=base.RULE_ADMIN_OR_CREDENTIAL_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_ec2_list_credentials = policy.DeprecatedRule( name=base.IDENTITY % 'ec2_list_credentials', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_ec2_create_credential = policy.DeprecatedRule( name=base.IDENTITY % 'ec2_create_credential', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_ec2_delete_credential = policy.DeprecatedRule( name=base.IDENTITY % 'ec2_delete_credential', check_str=base.RULE_ADMIN_OR_CREDENTIAL_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) ec2_credential_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'ec2_get_credential', check_str=base.ADMIN_OR_SYSTEM_READER_OR_CRED_OWNER, scope_types=['system', 'project'], description='Show ec2 credential details.', operations=[ { 'path': ( '/v3/users/{user_id}/credentials/OS-EC2/{credential_id}' ), 'method': 'GET', } ], deprecated_rule=deprecated_ec2_get_credential, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'ec2_list_credentials', check_str=base.ADMIN_OR_SYSTEM_READER_OR_OWNER, scope_types=['system', 'project'], description='List ec2 credentials.', operations=[ {'path': '/v3/users/{user_id}/credentials/OS-EC2', 'method': 'GET'} ], deprecated_rule=deprecated_ec2_list_credentials, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'ec2_create_credential', check_str=base.RULE_ADMIN_OR_OWNER, scope_types=['system', 'project'], description='Create ec2 credential.', operations=[ { 'path': '/v3/users/{user_id}/credentials/OS-EC2', 'method': 'POST', } ], deprecated_rule=deprecated_ec2_create_credential, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'ec2_delete_credential', check_str=base.ADMIN_OR_CRED_OWNER, scope_types=['system', 'project'], description='Delete ec2 credential.', operations=[ { 'path': ( '/v3/users/{user_id}/credentials/OS-EC2/{credential_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_ec2_delete_credential, ), ] def list_rules(): return ec2_credential_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/endpoint.py0000664000175000017500000000723700000000000022626 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The endpoint API is now aware of system scope and default roles." ) deprecated_get_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'get_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_endpoints = policy.DeprecatedRule( name=base.IDENTITY % 'list_endpoints', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'update_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'create_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'delete_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) endpoint_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_endpoint', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Show endpoint details.', operations=[{'path': '/v3/endpoints/{endpoint_id}', 'method': 'GET'}], deprecated_rule=deprecated_get_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_endpoints', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List endpoints.', operations=[{'path': '/v3/endpoints', 'method': 'GET'}], deprecated_rule=deprecated_list_endpoints, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_endpoint', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create endpoint.', operations=[{'path': '/v3/endpoints', 'method': 'POST'}], deprecated_rule=deprecated_create_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_endpoint', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update endpoint.', operations=[ {'path': '/v3/endpoints/{endpoint_id}', 'method': 'PATCH'} ], deprecated_rule=deprecated_update_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_endpoint', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete endpoint.', operations=[ {'path': '/v3/endpoints/{endpoint_id}', 'method': 'DELETE'} ], deprecated_rule=deprecated_delete_endpoint, ), ] def list_rules(): return endpoint_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/endpoint_group.py0000664000175000017500000002342300000000000024035 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The endpoint groups API is now aware of system scope and default roles." ) deprecated_list_endpoint_groups = policy.DeprecatedRule( name=base.IDENTITY % 'list_endpoint_groups', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_endpoint_group = policy.DeprecatedRule( name=base.IDENTITY % 'get_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_projects_assoc_with_endpoint_group = policy.DeprecatedRule( name=base.IDENTITY % 'list_projects_associated_with_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_endpoints_assoc_with_endpoint_group = policy.DeprecatedRule( name=base.IDENTITY % 'list_endpoints_associated_with_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_endpoint_group_in_project = policy.DeprecatedRule( name=base.IDENTITY % 'get_endpoint_group_in_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_endpoint_groups_for_project = policy.DeprecatedRule( name=base.IDENTITY % 'list_endpoint_groups_for_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_endpoint_group = policy.DeprecatedRule( name=base.IDENTITY % 'create_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_update_endpoint_group = policy.DeprecatedRule( name=base.IDENTITY % 'update_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_endpoint_group = policy.DeprecatedRule( name=base.IDENTITY % 'delete_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_add_endpoint_group_to_project = policy.DeprecatedRule( name=base.IDENTITY % 'add_endpoint_group_to_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_remove_endpoint_group_from_project = policy.DeprecatedRule( name=base.IDENTITY % 'remove_endpoint_group_from_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) group_endpoint_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create endpoint group.', operations=[ {'path': '/v3/OS-EP-FILTER/endpoint_groups', 'method': 'POST'} ], deprecated_rule=deprecated_create_endpoint_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_endpoint_groups', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List endpoint groups.', operations=[ {'path': '/v3/OS-EP-FILTER/endpoint_groups', 'method': 'GET'} ], deprecated_rule=deprecated_list_endpoint_groups, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_endpoint_group', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Get endpoint group.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}' ), 'method': 'GET', }, { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_endpoint_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update endpoint group.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}' ), 'method': 'PATCH', } ], deprecated_rule=deprecated_update_endpoint_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_endpoint_group', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete endpoint group.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_endpoint_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_projects_associated_with_endpoint_group', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description=( 'List all projects associated with a specific endpoint group.' ), operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects' ), 'method': 'GET', } ], deprecated_rule=deprecated_list_projects_assoc_with_endpoint_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_endpoints_associated_with_endpoint_group', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List all endpoints associated with an endpoint group.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/endpoints' ), 'method': 'GET', } ], deprecated_rule=deprecated_list_endpoints_assoc_with_endpoint_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_endpoint_group_in_project', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description=( 'Check if an endpoint group is associated with a project.' ), operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects/{project_id}' ), 'method': 'GET', }, { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects/{project_id}' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_endpoint_group_in_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_endpoint_groups_for_project', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List endpoint groups associated with a specific project.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/projects/{project_id}/endpoint_groups' ), 'method': 'GET', } ], deprecated_rule=deprecated_list_endpoint_groups_for_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'add_endpoint_group_to_project', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Allow a project to access an endpoint group.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects/{project_id}' ), 'method': 'PUT', } ], deprecated_rule=deprecated_add_endpoint_group_to_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'remove_endpoint_group_from_project', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Remove endpoint group from project.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects/{project_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_remove_endpoint_group_from_project, ), ] def list_rules(): return group_endpoint_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/grant.py0000664000175000017500000003551300000000000022117 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base # Two of the three portions of this check string are specific to domain # readers. The first catches domain readers who are checking or listing grants # for users. The second does the same for groups. We have to overload the check # string to handle both cases because `identity:check_grant` is used to protect # both user and group grant APIs. If the `identity:check_grant` policy is every # broken apart, we can write specific check strings that are tailored to either # users or groups (e.g., `identity:check_group_grant` or # `identity:check_user_grant`) and prevent overloading like this. DOMAIN_MATCHES_USER_DOMAIN = 'domain_id:%(target.user.domain_id)s' DOMAIN_MATCHES_GROUP_DOMAIN = 'domain_id:%(target.group.domain_id)s' DOMAIN_MATCHES_PROJECT_DOMAIN = 'domain_id:%(target.project.domain_id)s' DOMAIN_MATCHES_TARGET_DOMAIN = 'domain_id:%(target.domain.id)s' DOMAIN_MATCHES_ROLE = ( 'domain_id:%(target.role.domain_id)s or None:%(target.role.domain_id)s' ) GRANTS_DOMAIN_READER = ( '(role:reader and ' + DOMAIN_MATCHES_USER_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_PROJECT_DOMAIN + ') or ' '(role:reader and ' + DOMAIN_MATCHES_USER_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_TARGET_DOMAIN + ') or ' '(role:reader and ' + DOMAIN_MATCHES_GROUP_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_PROJECT_DOMAIN + ') or ' '(role:reader and ' + DOMAIN_MATCHES_GROUP_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_TARGET_DOMAIN + ')' ) SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.SYSTEM_READER + ') or ' '(' + GRANTS_DOMAIN_READER + ') and ' '(' + DOMAIN_MATCHES_ROLE + ')' ) SYSTEM_READER_OR_DOMAIN_READER_LIST = ( '(' + base.SYSTEM_READER + ') or ' + GRANTS_DOMAIN_READER ) GRANTS_DOMAIN_ADMIN = ( '(role:admin and ' + DOMAIN_MATCHES_USER_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_PROJECT_DOMAIN + ') or ' '(role:admin and ' + DOMAIN_MATCHES_USER_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_TARGET_DOMAIN + ') or ' '(role:admin and ' + DOMAIN_MATCHES_GROUP_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_PROJECT_DOMAIN + ') or ' '(role:admin and ' + DOMAIN_MATCHES_GROUP_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_TARGET_DOMAIN + ')' ) GRANTS_DOMAIN_MANAGER = ( '(role:manager and ' + DOMAIN_MATCHES_USER_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_PROJECT_DOMAIN + ') or ' '(role:manager and ' + DOMAIN_MATCHES_USER_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_TARGET_DOMAIN + ') or ' '(role:manager and ' + DOMAIN_MATCHES_GROUP_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_PROJECT_DOMAIN + ') or ' '(role:manager and ' + DOMAIN_MATCHES_GROUP_DOMAIN + ' and' ' ' + DOMAIN_MATCHES_TARGET_DOMAIN + ')' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' '(' + SYSTEM_READER_OR_DOMAIN_READER + ')' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_LIST = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' '(' + SYSTEM_READER_OR_DOMAIN_READER_LIST + ')' ) ADMIN_OR_DOMAIN_ADMIN_OR_DOMAIN_MANAGER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' '(' + GRANTS_DOMAIN_ADMIN + ') and ' '(' + DOMAIN_MATCHES_ROLE + ') or ' '(' + GRANTS_DOMAIN_MANAGER + ') and ' 'rule:domain_managed_target_role' ) DEPRECATED_REASON = ( "The assignment API is now aware of system scope and default roles." ) deprecated_check_system_grant_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'check_system_grant_for_user', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_system_grants_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'list_system_grants_for_user', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_system_grant_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'create_system_grant_for_user', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_revoke_system_grant_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'revoke_system_grant_for_user', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_check_system_grant_for_group = policy.DeprecatedRule( name=base.IDENTITY % 'check_system_grant_for_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_system_grants_for_group = policy.DeprecatedRule( name=base.IDENTITY % 'list_system_grants_for_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_system_grant_for_group = policy.DeprecatedRule( name=base.IDENTITY % 'create_system_grant_for_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_revoke_system_grant_for_group = policy.DeprecatedRule( name=base.IDENTITY % 'revoke_system_grant_for_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_grants = policy.DeprecatedRule( name=base.IDENTITY % 'list_grants', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_check_grant = policy.DeprecatedRule( name=base.IDENTITY % 'check_grant', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_grant = policy.DeprecatedRule( name=base.IDENTITY % 'create_grant', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_revoke_grant = policy.DeprecatedRule( name=base.IDENTITY % 'revoke_grant', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) resource_paths = [ '/projects/{project_id}/users/{user_id}/roles/{role_id}', '/projects/{project_id}/groups/{group_id}/roles/{role_id}', '/domains/{domain_id}/users/{user_id}/roles/{role_id}', '/domains/{domain_id}/groups/{group_id}/roles/{role_id}', ] resource_paths += [ '/OS-INHERIT' + path + '/inherited_to_projects' for path in resource_paths ] collection_paths = [ '/projects/{project_id}/users/{user_id}/roles', '/projects/{project_id}/groups/{group_id}/roles', '/domains/{domain_id}/users/{user_id}/roles', '/domains/{domain_id}/groups/{group_id}/roles', ] inherited_collection_paths = [ ( '/OS-INHERIT/domains/{domain_id}/groups/{group_id}/roles/' 'inherited_to_projects' ), ( '/OS-INHERIT/domains/{domain_id}/users/{user_id}/roles/' 'inherited_to_projects' ), ] def list_operations(paths, methods): return [ {'path': '/v3' + path, 'method': method} for path in paths for method in methods ] # NOTE(samueldmq): Unlike individual resource paths, collection # paths for the inherited grants do not contain a HEAD API list_grants_operations = list_operations( collection_paths, ['GET', 'HEAD'] ) + list_operations(inherited_collection_paths, ['GET']) grant_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_grant', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description=( 'Check a role grant between a target and an actor. A ' 'target can be either a domain or a project. An actor ' 'can be either a user or a group. These terms also apply ' 'to the OS-INHERIT APIs, where grants on the target ' 'are inherited to all projects in the subtree, if ' 'applicable.' ), operations=list_operations(resource_paths, ['HEAD', 'GET']), deprecated_rule=deprecated_check_grant, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_grants', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_LIST, scope_types=['system', 'domain', 'project'], description=( 'List roles granted to an actor on a target. A target ' 'can be either a domain or a project. An actor can be ' 'either a user or a group. For the OS-INHERIT APIs, it ' 'is possible to list inherited role grants for actors on ' 'domains, where grants are inherited to all projects ' 'in the specified domain.' ), operations=list_grants_operations, deprecated_rule=deprecated_list_grants, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_grant', check_str=ADMIN_OR_DOMAIN_ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description=( 'Create a role grant between a target and an actor. A ' 'target can be either a domain or a project. An actor ' 'can be either a user or a group. These terms also apply ' 'to the OS-INHERIT APIs, where grants on the target ' 'are inherited to all projects in the subtree, if ' 'applicable.' ), operations=list_operations(resource_paths, ['PUT']), deprecated_rule=deprecated_create_grant, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'revoke_grant', check_str=ADMIN_OR_DOMAIN_ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description=( 'Revoke a role grant between a target and an actor. A ' 'target can be either a domain or a project. An actor ' 'can be either a user or a group. These terms also apply ' 'to the OS-INHERIT APIs, where grants on the target ' 'are inherited to all projects in the subtree, if ' 'applicable. In that case, revoking the role grant in ' 'the target would remove the logical effect of ' 'inheriting it to the target\'s projects subtree.' ), operations=list_operations(resource_paths, ['DELETE']), deprecated_rule=deprecated_revoke_grant, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_system_grants_for_user', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List all grants a specific user has on the system.', operations=[ { 'path': '/v3/system/users/{user_id}/roles', 'method': ['HEAD', 'GET'], } ], deprecated_rule=deprecated_list_system_grants_for_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_system_grant_for_user', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Check if a user has a role on the system.', operations=[ { 'path': '/v3/system/users/{user_id}/roles/{role_id}', 'method': ['HEAD', 'GET'], } ], deprecated_rule=deprecated_check_system_grant_for_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_system_grant_for_user', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Grant a user a role on the system.', operations=[ { 'path': '/v3/system/users/{user_id}/roles/{role_id}', 'method': ['PUT'], } ], deprecated_rule=deprecated_create_system_grant_for_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'revoke_system_grant_for_user', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Remove a role from a user on the system.', operations=[ { 'path': '/v3/system/users/{user_id}/roles/{role_id}', 'method': ['DELETE'], } ], deprecated_rule=deprecated_revoke_system_grant_for_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_system_grants_for_group', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List all grants a specific group has on the system.', operations=[ { 'path': '/v3/system/groups/{group_id}/roles', 'method': ['HEAD', 'GET'], } ], deprecated_rule=deprecated_list_system_grants_for_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_system_grant_for_group', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Check if a group has a role on the system.', operations=[ { 'path': '/v3/system/groups/{group_id}/roles/{role_id}', 'method': ['HEAD', 'GET'], } ], deprecated_rule=deprecated_check_system_grant_for_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_system_grant_for_group', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Grant a group a role on the system.', operations=[ { 'path': '/v3/system/groups/{group_id}/roles/{role_id}', 'method': ['PUT'], } ], deprecated_rule=deprecated_create_system_grant_for_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'revoke_system_grant_for_group', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Remove a role from a group on the system.', operations=[ { 'path': '/v3/system/groups/{group_id}/roles/{role_id}', 'method': ['DELETE'], } ], deprecated_rule=deprecated_revoke_system_grant_for_group, ), ] def list_rules(): return grant_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/group.py0000664000175000017500000002207400000000000022136 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_USER_OR_OWNER = ( '(role:reader and system_scope:all) or ' '(role:reader and domain_id:%(target.user.domain_id)s) or ' 'user_id:%(user_id)s' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_OR_OWNER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_USER_OR_OWNER ) SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP_USER = ( '(role:reader and system_scope:all) or ' '(role:reader and ' 'domain_id:%(target.group.domain_id)s and ' 'domain_id:%(target.user.domain_id)s)' ) DOMAIN_MANAGER_FOR_TARGET_GROUP = ( 'role:manager and domain_id:%(target.group.domain_id)s' ) DOMAIN_MANAGER_FOR_TARGET_GROUP_USER = ( 'role:manager and ' 'domain_id:%(target.group.domain_id)s and ' 'domain_id:%(target.user.domain_id)s' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP_USER ) SYSTEM_READER_OR_DOMAIN_READER = ( '(role:reader and system_scope:all) or ' '(role:reader and domain_id:%(target.group.domain_id)s)' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER ) SYSTEM_ADMIN_OR_DOMAIN_ADMIN = ( '(role:admin and system_scope:all) or ' '(role:admin and domain_id:%(target.group.domain_id)s)' ) ADMIN_OR_DOMAIN_MANAGER_FOR_GROUPS = ( '(' + base.RULE_ADMIN_REQUIRED + ') or (' + DOMAIN_MANAGER_FOR_TARGET_GROUP + ')' ) ADMIN_OR_DOMAIN_MANAGER_FOR_GROUP_ASSIGNMENTS = ( '(' + base.RULE_ADMIN_REQUIRED + ') or (' + DOMAIN_MANAGER_FOR_TARGET_GROUP_USER + ')' ) DEPRECATED_REASON = ( "The group API is now aware of system scope and default roles." ) deprecated_get_group = policy.DeprecatedRule( name=base.IDENTITY % 'get_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_groups = policy.DeprecatedRule( name=base.IDENTITY % 'list_groups', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_groups_for_user = policy.DeprecatedRule( name=base.IDENTITY % 'list_groups_for_user', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_users_in_group = policy.DeprecatedRule( name=base.IDENTITY % 'list_users_in_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_check_user_in_group = policy.DeprecatedRule( name=base.IDENTITY % 'check_user_in_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_group = policy.DeprecatedRule( name=base.IDENTITY % 'create_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_group = policy.DeprecatedRule( name=base.IDENTITY % 'update_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_group = policy.DeprecatedRule( name=base.IDENTITY % 'delete_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_remove_user_from_group = policy.DeprecatedRule( name=base.IDENTITY % 'remove_user_from_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_add_user_to_group = policy.DeprecatedRule( name=base.IDENTITY % 'add_user_to_group', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) group_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_group', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description='Show group details.', operations=[ {'path': '/v3/groups/{group_id}', 'method': 'GET'}, {'path': '/v3/groups/{group_id}', 'method': 'HEAD'}, ], deprecated_rule=deprecated_get_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_groups', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description='List groups.', operations=[ {'path': '/v3/groups', 'method': 'GET'}, {'path': '/v3/groups', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_groups, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_groups_for_user', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_OR_OWNER, scope_types=['system', 'domain', 'project'], description='List groups to which a user belongs.', operations=[ {'path': '/v3/users/{user_id}/groups', 'method': 'GET'}, {'path': '/v3/users/{user_id}/groups', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_groups_for_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_group', check_str=ADMIN_OR_DOMAIN_MANAGER_FOR_GROUPS, scope_types=['system', 'domain', 'project'], description='Create group.', operations=[{'path': '/v3/groups', 'method': 'POST'}], deprecated_rule=deprecated_create_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_group', check_str=ADMIN_OR_DOMAIN_MANAGER_FOR_GROUPS, scope_types=['system', 'domain', 'project'], description='Update group.', operations=[{'path': '/v3/groups/{group_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_group', check_str=ADMIN_OR_DOMAIN_MANAGER_FOR_GROUPS, scope_types=['system', 'domain', 'project'], description='Delete group.', operations=[{'path': '/v3/groups/{group_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_users_in_group', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description='List members of a specific group.', operations=[ {'path': '/v3/groups/{group_id}/users', 'method': 'GET'}, {'path': '/v3/groups/{group_id}/users', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_users_in_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'remove_user_from_group', check_str=ADMIN_OR_DOMAIN_MANAGER_FOR_GROUP_ASSIGNMENTS, scope_types=['system', 'domain', 'project'], description='Remove user from group.', operations=[ { 'path': '/v3/groups/{group_id}/users/{user_id}', 'method': 'DELETE', } ], deprecated_rule=deprecated_remove_user_from_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_user_in_group', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP, scope_types=['system', 'domain', 'project'], description='Check whether a user is a member of a group.', operations=[ { 'path': '/v3/groups/{group_id}/users/{user_id}', 'method': 'HEAD', }, {'path': '/v3/groups/{group_id}/users/{user_id}', 'method': 'GET'}, ], deprecated_rule=deprecated_check_user_in_group, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'add_user_to_group', check_str=ADMIN_OR_DOMAIN_MANAGER_FOR_GROUP_ASSIGNMENTS, scope_types=['system', 'domain', 'project'], description='Add user to group.', operations=[ {'path': '/v3/groups/{group_id}/users/{user_id}', 'method': 'PUT'} ], deprecated_rule=deprecated_add_user_to_group, ), ] def list_rules(): return group_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/identity_provider.py0000664000175000017500000001136300000000000024544 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The identity provider API is now aware of system scope and default roles." ) deprecated_get_idp = policy.DeprecatedRule( name=base.IDENTITY % 'get_identity_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_idp = policy.DeprecatedRule( name=base.IDENTITY % 'list_identity_providers', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_idp = policy.DeprecatedRule( name=base.IDENTITY % 'update_identity_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_idp = policy.DeprecatedRule( name=base.IDENTITY % 'create_identity_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_idp = policy.DeprecatedRule( name=base.IDENTITY % 'delete_identity_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) identity_provider_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_identity_provider', check_str=base.RULE_ADMIN_REQUIRED, # FIXME(lbragstad): All `scope_types` for identity provider policies # should be updated to include project scope if, or when, it becomes # possible to manage federated identity providers without modifying # configurations outside of keystone (Apache). It makes sense to # associate system scope to identity provider management since it # requires modifying configuration files. scope_types=['system', 'project'], description='Create identity provider.', operations=[ { 'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}', 'method': 'PUT', } ], deprecated_rule=deprecated_create_idp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_identity_providers', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List identity providers.', operations=[ {'path': '/v3/OS-FEDERATION/identity_providers', 'method': 'GET'}, {'path': '/v3/OS-FEDERATION/identity_providers', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_idp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_identity_provider', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Get identity provider.', operations=[ { 'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}', 'method': 'GET', }, { 'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}', 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_idp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_identity_provider', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update identity provider.', operations=[ { 'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}', 'method': 'PATCH', } ], deprecated_rule=deprecated_update_idp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_identity_provider', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete identity provider.', operations=[ { 'path': '/v3/OS-FEDERATION/identity_providers/{idp_id}', 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_idp, ), ] def list_rules(): return identity_provider_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/implied_role.py0000664000175000017500000001514400000000000023446 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The implied role API is now aware of system scope and default roles." ) deprecated_get_implied_role = policy.DeprecatedRule( name=base.IDENTITY % 'get_implied_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_implied_roles = policy.DeprecatedRule( name=base.IDENTITY % 'list_implied_roles', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_role_inference_rules = policy.DeprecatedRule( name=base.IDENTITY % 'list_role_inference_rules', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_check_implied_role = policy.DeprecatedRule( name=base.IDENTITY % 'check_implied_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_implied_role = policy.DeprecatedRule( name=base.IDENTITY % 'create_implied_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_implied_role = policy.DeprecatedRule( name=base.IDENTITY % 'delete_implied_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) implied_role_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_implied_role', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, # FIXME(lbragstad) The management of implied roles currently makes # sense as a system-only resource. Once keystone has the ability to # support RBAC solely over the API without having to customize policy # files, scope_types should include 'project'. scope_types=['system', 'project'], description='Get information about an association between two roles. ' 'When a relationship exists between a prior role and an ' 'implied role and the prior role is assigned to a user, ' 'the user also assumes the implied role.', operations=[ { 'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}', 'method': 'GET', } ], deprecated_rule=deprecated_get_implied_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_implied_roles', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List associations between two roles. When a relationship ' 'exists between a prior role and an implied role and the ' 'prior role is assigned to a user, the user also assumes ' 'the implied role. This will return all the implied roles ' 'that would be assumed by the user who gets the specified ' 'prior role.', operations=[ {'path': '/v3/roles/{prior_role_id}/implies', 'method': 'GET'}, {'path': '/v3/roles/{prior_role_id}/implies', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_implied_roles, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_implied_role', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create an association between two roles. When a ' 'relationship exists between a prior role and an implied ' 'role and the prior role is assigned to a user, the user ' 'also assumes the implied role.', operations=[ { 'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}', 'method': 'PUT', } ], deprecated_rule=deprecated_create_implied_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_implied_role', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete the association between two roles. When a ' 'relationship exists between a prior role and an implied ' 'role and the prior role is assigned to a user, the user ' 'also assumes the implied role. Removing the association ' 'will cause that effect to be eliminated.', operations=[ { 'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}', 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_implied_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_role_inference_rules', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List all associations between two roles in the system. ' 'When a relationship exists between a prior role and an ' 'implied role and the prior role is assigned to a user, ' 'the user also assumes the implied role.', operations=[ {'path': '/v3/role_inferences', 'method': 'GET'}, {'path': '/v3/role_inferences', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_role_inference_rules, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_implied_role', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Check an association between two roles. When a ' 'relationship exists between a prior role and an implied ' 'role and the prior role is assigned to a user, the user ' 'also assumes the implied role.', operations=[ { 'path': '/v3/roles/{prior_role_id}/implies/{implied_role_id}', 'method': 'HEAD', } ], deprecated_rule=deprecated_check_implied_role, ), ] def list_rules(): return implied_role_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/limit.py0000664000175000017500000000565600000000000022127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from keystone.common.policies import base ADMIN_OR_SYSTEM_OR_DOMAIN_OR_PROJECT_USER = ( base.RULE_ADMIN_REQUIRED + ' or ' '(' + base.SYSTEM_READER + ') or ' '(' 'domain_id:%(target.limit.domain.id)s or ' 'domain_id:%(target.limit.project.domain_id)s' ') or ' '(' 'project_id:%(target.limit.project_id)s and not ' 'None:%(target.limit.project_id)s' ')' ) limit_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_limit_model', check_str='', scope_types=['system', 'domain', 'project'], description='Get limit enforcement model.', operations=[ {'path': '/v3/limits/model', 'method': 'GET'}, {'path': '/v3/limits/model', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_limit', check_str=ADMIN_OR_SYSTEM_OR_DOMAIN_OR_PROJECT_USER, scope_types=['system', 'domain', 'project'], description='Show limit details.', operations=[ {'path': '/v3/limits/{limit_id}', 'method': 'GET'}, {'path': '/v3/limits/{limit_id}', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_limits', check_str='', scope_types=['system', 'domain', 'project'], description='List limits.', operations=[ {'path': '/v3/limits', 'method': 'GET'}, {'path': '/v3/limits', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_limits', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create limits.', operations=[{'path': '/v3/limits', 'method': 'POST'}], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_limit', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update limit.', operations=[{'path': '/v3/limits/{limit_id}', 'method': 'PATCH'}], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_limit', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete limit.', operations=[{'path': '/v3/limits/{limit_id}', 'method': 'DELETE'}], ), ] def list_rules(): return limit_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/mapping.py0000664000175000017500000001044000000000000022427 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The federated mapping API is now aware of system scope and default roles." ) deprecated_get_mapping = policy.DeprecatedRule( name=base.IDENTITY % 'get_mapping', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_mappings = policy.DeprecatedRule( name=base.IDENTITY % 'list_mappings', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_mapping = policy.DeprecatedRule( name=base.IDENTITY % 'update_mapping', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_mapping = policy.DeprecatedRule( name=base.IDENTITY % 'create_mapping', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_mapping = policy.DeprecatedRule( name=base.IDENTITY % 'delete_mapping', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) mapping_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_mapping', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description=( 'Create a new federated mapping containing one or ' 'more sets of rules.' ), operations=[ { 'path': '/v3/OS-FEDERATION/mappings/{mapping_id}', 'method': 'PUT', } ], deprecated_rule=deprecated_create_mapping, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_mapping', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Get a federated mapping.', operations=[ { 'path': '/v3/OS-FEDERATION/mappings/{mapping_id}', 'method': 'GET', }, { 'path': '/v3/OS-FEDERATION/mappings/{mapping_id}', 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_mapping, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_mappings', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List federated mappings.', operations=[ {'path': '/v3/OS-FEDERATION/mappings', 'method': 'GET'}, {'path': '/v3/OS-FEDERATION/mappings', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_mappings, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_mapping', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete a federated mapping.', operations=[ { 'path': '/v3/OS-FEDERATION/mappings/{mapping_id}', 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_mapping, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_mapping', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update a federated mapping.', operations=[ { 'path': '/v3/OS-FEDERATION/mappings/{mapping_id}', 'method': 'PATCH', } ], deprecated_rule=deprecated_update_mapping, ), ] def list_rules(): return mapping_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/policy.py0000664000175000017500000000732600000000000022304 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The policy API is now aware of system scope and default roles." ) deprecated_get_policy = policy.DeprecatedRule( name=base.IDENTITY % 'get_policy', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_policies = policy.DeprecatedRule( name=base.IDENTITY % 'list_policies', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_update_policy = policy.DeprecatedRule( name=base.IDENTITY % 'update_policy', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_policy = policy.DeprecatedRule( name=base.IDENTITY % 'create_policy', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_policy = policy.DeprecatedRule( name=base.IDENTITY % 'delete_policy', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) policy_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_policy', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, # This API isn't really exposed to usable, it's actually deprecated. # More-or-less adding scope_types to be consistent with other policies. scope_types=['system', 'project'], description='Show policy details.', operations=[{'path': '/v3/policies/{policy_id}', 'method': 'GET'}], deprecated_rule=deprecated_get_policy, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_policies', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List policies.', operations=[{'path': '/v3/policies', 'method': 'GET'}], deprecated_rule=deprecated_list_policies, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_policy', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create policy.', operations=[{'path': '/v3/policies', 'method': 'POST'}], deprecated_rule=deprecated_create_policy, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_policy', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update policy.', operations=[{'path': '/v3/policies/{policy_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_policy, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_policy', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete policy.', operations=[{'path': '/v3/policies/{policy_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_policy, ), ] def list_rules(): return policy_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/policy_association.py0000664000175000017500000002623000000000000024673 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base # NOTE(lbragstad): Both endpoints and services are system-level resources. # System-scoped tokens should be required to manage policy associations to # existing system-level resources. DEPRECATED_REASON = ( "The policy association API is now aware of system scope and default " "roles." ) deprecated_check_policy_assoc_for_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'check_policy_association_for_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_check_policy_assoc_for_service = policy.DeprecatedRule( name=base.IDENTITY % 'check_policy_association_for_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_check_policy_assoc_for_region_and_service = policy.DeprecatedRule( name=base.IDENTITY % 'check_policy_association_for_region_and_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_policy_for_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'get_policy_for_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_endpoints_for_policy = policy.DeprecatedRule( name=base.IDENTITY % 'list_endpoints_for_policy', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_policy_assoc_for_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'create_policy_association_for_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_policy_assoc_for_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'delete_policy_association_for_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_policy_assoc_for_service = policy.DeprecatedRule( name=base.IDENTITY % 'create_policy_association_for_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_policy_assoc_for_service = policy.DeprecatedRule( name=base.IDENTITY % 'delete_policy_association_for_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_policy_assoc_for_region_and_service = policy.DeprecatedRule( name=base.IDENTITY % 'create_policy_association_for_region_and_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_policy_assoc_for_region_and_service = policy.DeprecatedRule( name=base.IDENTITY % 'delete_policy_association_for_region_and_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) policy_association_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_policy_association_for_endpoint', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Associate a policy to a specific endpoint.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints/{endpoint_id}' ), 'method': 'PUT', } ], deprecated_rule=deprecated_create_policy_assoc_for_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_policy_association_for_endpoint', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Check policy association for endpoint.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints/{endpoint_id}' ), 'method': 'GET', }, { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints/{endpoint_id}' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_check_policy_assoc_for_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_policy_association_for_endpoint', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete policy association for endpoint.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints/{endpoint_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_policy_assoc_for_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_policy_association_for_service', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Associate a policy to a specific service.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}' ), 'method': 'PUT', } ], deprecated_rule=deprecated_create_policy_assoc_for_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_policy_association_for_service', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Check policy association for service.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}' ), 'method': 'GET', }, { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_check_policy_assoc_for_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_policy_association_for_service', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete policy association for service.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_policy_assoc_for_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % ('create_policy_association_for_region_and_service'), check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description=( 'Associate a policy to a specific region and service ' 'combination.' ), operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}/regions/{region_id}' ), 'method': 'PUT', } ], deprecated_rule=deprecated_create_policy_assoc_for_region_and_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_policy_association_for_region_and_service', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Check policy association for region and service.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}/regions/{region_id}' ), 'method': 'GET', }, { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}/regions/{region_id}' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_check_policy_assoc_for_region_and_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % ('delete_policy_association_for_region_and_service'), check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete policy association for region and service.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}/regions/{region_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_policy_assoc_for_region_and_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_policy_for_endpoint', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Get policy for endpoint.', operations=[ { 'path': ( '/v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy' ), 'method': 'GET', }, { 'path': ( '/v3/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_policy_for_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_endpoints_for_policy', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List endpoints for policy.', operations=[ { 'path': ( '/v3/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints' ), 'method': 'GET', } ], deprecated_rule=deprecated_list_endpoints_for_policy, ), ] def list_rules(): return policy_association_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/project.py0000664000175000017500000002503500000000000022450 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER = ( '(' + base.SYSTEM_READER + ') or ' '(role:reader and domain_id:%(target.project.domain_id)s) or ' 'project_id:%(target.project.id)s' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER ) SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN = ( '(' + base.SYSTEM_ADMIN + ') or ' '(role:admin and domain_id:%(target.project.domain_id)s) or ' '(role:admin and project_id:%(target.project.id)s)' ) # This policy is only written to be used to protect the # /v3/users/{user_id}/projects API. It should not be used to protect # /v3/project APIs because the target information contained in the last check # is specific to user targets from the user id passed in the # /v3/users/{user_id}/project path. SYSTEM_READER_OR_DOMAIN_READER_OR_OWNER = ( # System reader policy '(' + base.SYSTEM_READER + ') or ' # Domain reader policy '(role:reader and domain_id:%(target.user.domain_id)s) or ' # User accessing the API with a token they've obtained, matching # the context user_id to the target user id. 'user_id:%(target.user.id)s' ) ADMIN_SYSTEM_READER_OR_DOMAIN_READER_OR_OWNER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER_OR_OWNER ) SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.SYSTEM_READER + ') or ' '(role:reader and domain_id:%(target.domain_id)s)' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER ) ADMIN_OR_DOMAIN_MANAGER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' '(role:manager and domain_id:%(target.project.domain_id)s)' ) DEPRECATED_REASON = ( "The project API is now aware of system scope and default roles." ) deprecated_list_projects = policy.DeprecatedRule( name=base.IDENTITY % 'list_projects', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_get_project = policy.DeprecatedRule( name=base.IDENTITY % 'get_project', check_str=base.RULE_ADMIN_OR_TARGET_PROJECT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_user_projects = policy.DeprecatedRule( name=base.IDENTITY % 'list_user_projects', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_project = policy.DeprecatedRule( name=base.IDENTITY % 'create_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_project = policy.DeprecatedRule( name=base.IDENTITY % 'update_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_project = policy.DeprecatedRule( name=base.IDENTITY % 'delete_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_project_tags = policy.DeprecatedRule( name=base.IDENTITY % 'list_project_tags', check_str=base.RULE_ADMIN_OR_TARGET_PROJECT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_project_tag = policy.DeprecatedRule( name=base.IDENTITY % 'get_project_tag', check_str=base.RULE_ADMIN_OR_TARGET_PROJECT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_update_project_tag = policy.DeprecatedRule( name=base.IDENTITY % 'update_project_tags', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_project_tag = policy.DeprecatedRule( name=base.IDENTITY % 'create_project_tag', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_project_tag = policy.DeprecatedRule( name=base.IDENTITY % 'delete_project_tag', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_project_tags = policy.DeprecatedRule( name=base.IDENTITY % 'delete_project_tags', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) TAGS_DEPRECATED_REASON = """ As of the Train release, the project tags API understands how to handle system-scoped tokens in addition to project and domain tokens, making the API more accessible to users without compromising security or manageability for administrators. The new default policies for this API account for these changes automatically. """ project_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_project', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER, scope_types=['system', 'domain', 'project'], description='Show project details.', operations=[{'path': '/v3/projects/{project_id}', 'method': 'GET'}], deprecated_rule=deprecated_get_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_projects', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description='List projects.', operations=[{'path': '/v3/projects', 'method': 'GET'}], deprecated_rule=deprecated_list_projects, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_user_projects', check_str=ADMIN_SYSTEM_READER_OR_DOMAIN_READER_OR_OWNER, scope_types=['system', 'domain', 'project'], description='List projects for user.', operations=[{'path': '/v3/users/{user_id}/projects', 'method': 'GET'}], deprecated_rule=deprecated_list_user_projects, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_project', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Create project.', operations=[{'path': '/v3/projects', 'method': 'POST'}], deprecated_rule=deprecated_create_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_project', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Update project.', operations=[{'path': '/v3/projects/{project_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_project', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Delete project.', operations=[{'path': '/v3/projects/{project_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_project_tags', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER, scope_types=['system', 'domain', 'project'], description='List tags for a project.', operations=[ {'path': '/v3/projects/{project_id}/tags', 'method': 'GET'}, {'path': '/v3/projects/{project_id}/tags', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_project_tags, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_project_tag', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER, scope_types=['system', 'domain', 'project'], description='Check if project contains a tag.', operations=[ { 'path': '/v3/projects/{project_id}/tags/{value}', 'method': 'GET', }, { 'path': '/v3/projects/{project_id}/tags/{value}', 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_project_tag, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_project_tags', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Replace all tags on a project with the new set of tags.', operations=[ {'path': '/v3/projects/{project_id}/tags', 'method': 'PUT'} ], deprecated_rule=deprecated_update_project_tag, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_project_tag', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Add a single tag to a project.', operations=[ {'path': '/v3/projects/{project_id}/tags/{value}', 'method': 'PUT'} ], deprecated_rule=deprecated_create_project_tag, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_project_tags', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Remove all tags from a project.', operations=[ {'path': '/v3/projects/{project_id}/tags', 'method': 'DELETE'} ], deprecated_rule=deprecated_delete_project_tags, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_project_tag', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Delete a specified tag from project.', operations=[ { 'path': '/v3/projects/{project_id}/tags/{value}', 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_project_tag, ), ] def list_rules(): return project_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/project_endpoint.py0000664000175000017500000001245400000000000024351 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = """ As of the Train release, the project endpoint API now understands default roles and system-scoped tokens, making the API more granular by default without compromising security. The new policy defaults account for these changes automatically. Be sure to take these new defaults into consideration if you are relying on overrides in your deployment for the project endpoint API. """ deprecated_list_projects_for_endpoint = policy.DeprecatedRule( name=base.IDENTITY % 'list_projects_for_endpoint', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_add_endpoint_to_project = policy.DeprecatedRule( name=base.IDENTITY % 'add_endpoint_to_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_check_endpoint_in_project = policy.DeprecatedRule( name=base.IDENTITY % 'check_endpoint_in_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_endpoints_for_project = policy.DeprecatedRule( name=base.IDENTITY % 'list_endpoints_for_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_remove_endpoint_from_project = policy.DeprecatedRule( name=base.IDENTITY % 'remove_endpoint_from_project', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) project_endpoint_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_projects_for_endpoint', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List projects allowed to access an endpoint.', operations=[ { 'path': ('/v3/OS-EP-FILTER/endpoints/{endpoint_id}/projects'), 'method': 'GET', } ], deprecated_rule=deprecated_list_projects_for_endpoint, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'add_endpoint_to_project', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Allow project to access an endpoint.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/projects/{project_id}/' 'endpoints/{endpoint_id}' ), 'method': 'PUT', } ], deprecated_rule=deprecated_add_endpoint_to_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_endpoint_in_project', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Check if a project is allowed to access an endpoint.', operations=[ { 'path': ( '/v3/OS-EP-FILTER/projects/{project_id}/' 'endpoints/{endpoint_id}' ), 'method': 'GET', }, { 'path': ( '/v3/OS-EP-FILTER/projects/{project_id}/' 'endpoints/{endpoint_id}' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_check_endpoint_in_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_endpoints_for_project', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List the endpoints a project is allowed to access.', operations=[ { 'path': ('/v3/OS-EP-FILTER/projects/{project_id}/endpoints'), 'method': 'GET', } ], deprecated_rule=deprecated_list_endpoints_for_project, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'remove_endpoint_from_project', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description=( 'Remove access to an endpoint from a project that has ' 'previously been given explicit access.' ), operations=[ { 'path': ( '/v3/OS-EP-FILTER/projects/{project_id}/' 'endpoints/{endpoint_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_remove_endpoint_from_project, ), ] def list_rules(): return project_endpoint_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/protocol.py0000664000175000017500000001104000000000000022632 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The federated protocol API is now aware of system scope and default " "roles." ) deprecated_get_protocol = policy.DeprecatedRule( name=base.IDENTITY % 'get_protocol', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_protocols = policy.DeprecatedRule( name=base.IDENTITY % 'list_protocols', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_protocol = policy.DeprecatedRule( name=base.IDENTITY % 'update_protocol', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_protocol = policy.DeprecatedRule( name=base.IDENTITY % 'create_protocol', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_protocol = policy.DeprecatedRule( name=base.IDENTITY % 'delete_protocol', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) protocol_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_protocol', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create federated protocol.', operations=[ { 'path': ( '/v3/OS-FEDERATION/identity_providers/{idp_id}/' 'protocols/{protocol_id}' ), 'method': 'PUT', } ], deprecated_rule=deprecated_create_protocol, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_protocol', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update federated protocol.', operations=[ { 'path': ( '/v3/OS-FEDERATION/identity_providers/{idp_id}/' 'protocols/{protocol_id}' ), 'method': 'PATCH', } ], deprecated_rule=deprecated_update_protocol, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_protocol', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Get federated protocol.', operations=[ { 'path': ( '/v3/OS-FEDERATION/identity_providers/{idp_id}/' 'protocols/{protocol_id}' ), 'method': 'GET', } ], deprecated_rule=deprecated_get_protocol, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_protocols', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List federated protocols.', operations=[ { 'path': ( '/v3/OS-FEDERATION/identity_providers/{idp_id}/' 'protocols' ), 'method': 'GET', } ], deprecated_rule=deprecated_list_protocols, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_protocol', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete federated protocol.', operations=[ { 'path': ( '/v3/OS-FEDERATION/identity_providers/{idp_id}/' 'protocols/{protocol_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_protocol, ), ] def list_rules(): return protocol_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/region.py0000664000175000017500000000716100000000000022265 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The region API is now aware of system scope and default roles." ) deprecated_create_region = policy.DeprecatedRule( name=base.IDENTITY % 'create_region', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_region = policy.DeprecatedRule( name=base.IDENTITY % 'update_region', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_region = policy.DeprecatedRule( name=base.IDENTITY % 'delete_region', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) region_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_region', check_str='', # NOTE(lbragstad): Both get_region and list_regions were accessible # with a valid token. By including both `system` and `project` # scope types, we're ensuring anyone with a valid token can still # pass these policies. Since the administrative policies of regions # require and administrator, it makes sense to isolate those to # `system` scope. scope_types=['system', 'domain', 'project'], description='Show region details.', operations=[ {'path': '/v3/regions/{region_id}', 'method': 'GET'}, {'path': '/v3/regions/{region_id}', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_regions', check_str='', scope_types=['system', 'domain', 'project'], description='List regions.', operations=[ {'path': '/v3/regions', 'method': 'GET'}, {'path': '/v3/regions', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_region', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create region.', operations=[ {'path': '/v3/regions', 'method': 'POST'}, {'path': '/v3/regions/{region_id}', 'method': 'PUT'}, ], deprecated_rule=deprecated_create_region, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_region', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update region.', operations=[{'path': '/v3/regions/{region_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_region, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_region', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete region.', operations=[{'path': '/v3/regions/{region_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_region, ), ] def list_rules(): return region_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/registered_limit.py0000664000175000017500000000523400000000000024334 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from keystone.common.policies import base registered_limit_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_registered_limit', check_str='', scope_types=['system', 'domain', 'project'], description='Show registered limit details.', operations=[ { 'path': '/v3/registered_limits/{registered_limit_id}', 'method': 'GET', }, { 'path': '/v3/registered_limits/{registered_limit_id}', 'method': 'HEAD', }, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_registered_limits', check_str='', scope_types=['system', 'domain', 'project'], description='List registered limits.', operations=[ {'path': '/v3/registered_limits', 'method': 'GET'}, {'path': '/v3/registered_limits', 'method': 'HEAD'}, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_registered_limits', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create registered limits.', operations=[{'path': '/v3/registered_limits', 'method': 'POST'}], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_registered_limit', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update registered limit.', operations=[ { 'path': '/v3/registered_limits/{registered_limit_id}', 'method': 'PATCH', } ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_registered_limit', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete registered limit.', operations=[ { 'path': '/v3/registered_limits/{registered_limit_id}', 'method': 'DELETE', } ], ), ] def list_rules(): return registered_limit_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/revoke_event.py0000664000175000017500000000175100000000000023475 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from keystone.common.policies import base revoke_event_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_revoke_events', check_str=base.RULE_SERVICE_OR_ADMIN, scope_types=['system', 'project'], description='List revocation events.', operations=[{'path': '/v3/OS-REVOKE/events', 'method': 'GET'}], ) ] def list_rules(): return revoke_event_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/role.py0000664000175000017500000001636100000000000021745 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base ADMIN_OR_SYSTEM_READER_OR_DOMAIN_MANAGER_ROLE = ( '(' + base.RULE_ADMIN_OR_SYSTEM_READER + ') or ' '(role:manager and rule:domain_managed_target_role)' ) # For the domain manager persona we only check for a domain id in the token # that is not None here to exclude scopes like a project manager. Since most # roles are global we do not have a target domain attribute to match against. ADMIN_OR_SYSTEM_READER_OR_DOMAIN_MANAGER = ( '(' + base.RULE_ADMIN_OR_SYSTEM_READER + ') or ' '(role:manager and not domain_id:None)' ) DEPRECATED_REASON = ( "The role API is now aware of system scope and default roles." ) deprecated_get_role = policy.DeprecatedRule( name=base.IDENTITY % 'get_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_role = policy.DeprecatedRule( name=base.IDENTITY % 'list_roles', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_role = policy.DeprecatedRule( name=base.IDENTITY % 'update_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_role = policy.DeprecatedRule( name=base.IDENTITY % 'create_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_role = policy.DeprecatedRule( name=base.IDENTITY % 'delete_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_get_domain_role = policy.DeprecatedRule( name=base.IDENTITY % 'get_domain_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_domain_roles = policy.DeprecatedRule( name=base.IDENTITY % 'list_domain_roles', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_update_domain_role = policy.DeprecatedRule( name=base.IDENTITY % 'update_domain_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_create_domain_role = policy.DeprecatedRule( name=base.IDENTITY % 'create_domain_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_domain_role = policy.DeprecatedRule( name=base.IDENTITY % 'delete_domain_role', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) role_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_role', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_MANAGER_ROLE, scope_types=['system', 'domain', 'project'], description='Show role details.', operations=[ {'path': '/v3/roles/{role_id}', 'method': 'GET'}, {'path': '/v3/roles/{role_id}', 'method': 'HEAD'}, ], deprecated_rule=deprecated_get_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_roles', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='List roles.', operations=[ {'path': '/v3/roles', 'method': 'GET'}, {'path': '/v3/roles', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_role', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create role.', operations=[{'path': '/v3/roles', 'method': 'POST'}], deprecated_rule=deprecated_create_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_role', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update role.', operations=[{'path': '/v3/roles/{role_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_role', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete role.', operations=[{'path': '/v3/roles/{role_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_domain_role', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Show domain role.', operations=[ {'path': '/v3/roles/{role_id}', 'method': 'GET'}, {'path': '/v3/roles/{role_id}', 'method': 'HEAD'}, ], deprecated_rule=deprecated_get_domain_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_domain_roles', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, description='List domain roles.', scope_types=['system', 'project'], operations=[ {'path': '/v3/roles?domain_id={domain_id}', 'method': 'GET'}, {'path': '/v3/roles?domain_id={domain_id}', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_domain_roles, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_domain_role', check_str=base.RULE_ADMIN_REQUIRED, description='Create domain role.', scope_types=['system', 'project'], operations=[{'path': '/v3/roles', 'method': 'POST'}], deprecated_rule=deprecated_create_domain_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_domain_role', check_str=base.RULE_ADMIN_REQUIRED, description='Update domain role.', scope_types=['system', 'project'], operations=[{'path': '/v3/roles/{role_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_domain_role, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_domain_role', check_str=base.RULE_ADMIN_REQUIRED, description='Delete domain role.', scope_types=['system', 'project'], operations=[{'path': '/v3/roles/{role_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_domain_role, ), ] def list_rules(): return role_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/role_assignment.py0000664000175000017500000000552600000000000024176 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.SYSTEM_READER + ') or ' '(role:reader and domain_id:%(target.domain_id)s)' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER ) SYSTEM_READER_OR_PROJECT_DOMAIN_READER_OR_PROJECT_ADMIN = ( '(' + base.SYSTEM_READER + ') or ' '(role:reader and domain_id:%(target.project.domain_id)s) or ' '(role:admin and project_id:%(target.project.id)s)' ) DEPRECATED_REASON = ( "The assignment API is now aware of system scope and default roles." ) deprecated_list_role_assignments = policy.DeprecatedRule( name=base.IDENTITY % 'list_role_assignments', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_role_assignments_for_tree = policy.DeprecatedRule( name=base.IDENTITY % 'list_role_assignments_for_tree', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) role_assignment_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_role_assignments', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description='List role assignments.', operations=[ {'path': '/v3/role_assignments', 'method': 'GET'}, {'path': '/v3/role_assignments', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_role_assignments, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_role_assignments_for_tree', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description=( 'List all role assignments for a given tree of ' 'hierarchical projects.' ), operations=[ {'path': '/v3/role_assignments?include_subtree', 'method': 'GET'}, {'path': '/v3/role_assignments?include_subtree', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_role_assignments_for_tree, ), ] def list_rules(): return role_assignment_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/service.py0000664000175000017500000000711500000000000022441 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The service API is now aware of system scope and default roles." ) deprecated_get_service = policy.DeprecatedRule( name=base.IDENTITY % 'get_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_service = policy.DeprecatedRule( name=base.IDENTITY % 'list_services', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_service = policy.DeprecatedRule( name=base.IDENTITY % 'update_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_service = policy.DeprecatedRule( name=base.IDENTITY % 'create_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_service = policy.DeprecatedRule( name=base.IDENTITY % 'delete_service', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) service_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_service', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Show service details.', operations=[{'path': '/v3/services/{service_id}', 'method': 'GET'}], deprecated_rule=deprecated_get_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_services', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List services.', operations=[{'path': '/v3/services', 'method': 'GET'}], deprecated_rule=deprecated_list_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_service', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create service.', operations=[{'path': '/v3/services', 'method': 'POST'}], deprecated_rule=deprecated_create_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_service', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update service.', operations=[{'path': '/v3/services/{service_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_service, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_service', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete service.', operations=[{'path': '/v3/services/{service_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_service, ), ] def list_rules(): return service_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/service_provider.py0000664000175000017500000001132300000000000024347 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The service provider API is now aware of system scope and default roles." ) deprecated_get_sp = policy.DeprecatedRule( name=base.IDENTITY % 'get_service_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_sp = policy.DeprecatedRule( name=base.IDENTITY % 'list_service_providers', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_sp = policy.DeprecatedRule( name=base.IDENTITY % 'update_service_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_sp = policy.DeprecatedRule( name=base.IDENTITY % 'create_service_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_sp = policy.DeprecatedRule( name=base.IDENTITY % 'delete_service_provider', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) service_provider_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_service_provider', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Create federated service provider.', operations=[ { 'path': ( '/v3/OS-FEDERATION/service_providers/' '{service_provider_id}' ), 'method': 'PUT', } ], deprecated_rule=deprecated_create_sp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_service_providers', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List federated service providers.', operations=[ {'path': '/v3/OS-FEDERATION/service_providers', 'method': 'GET'}, {'path': '/v3/OS-FEDERATION/service_providers', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_sp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_service_provider', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='Get federated service provider.', operations=[ { 'path': ( '/v3/OS-FEDERATION/service_providers/' '{service_provider_id}' ), 'method': 'GET', }, { 'path': ( '/v3/OS-FEDERATION/service_providers/' '{service_provider_id}' ), 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_sp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_service_provider', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Update federated service provider.', operations=[ { 'path': ( '/v3/OS-FEDERATION/service_providers/' '{service_provider_id}' ), 'method': 'PATCH', } ], deprecated_rule=deprecated_update_sp, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_service_provider', check_str=base.RULE_ADMIN_REQUIRED, scope_types=['system', 'project'], description='Delete federated service provider.', operations=[ { 'path': ( '/v3/OS-FEDERATION/service_providers/' '{service_provider_id}' ), 'method': 'DELETE', } ], deprecated_rule=deprecated_delete_sp, ), ] def list_rules(): return service_provider_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/token.py0000664000175000017500000000573100000000000022123 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = ( "The token API is now aware of system scope and default roles." ) deprecated_check_token = policy.DeprecatedRule( name=base.IDENTITY % 'check_token', check_str=base.RULE_ADMIN_OR_TOKEN_SUBJECT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_validate_token = policy.DeprecatedRule( name=base.IDENTITY % 'validate_token', check_str=base.RULE_SERVICE_ADMIN_OR_TOKEN_SUBJECT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_revoke_token = policy.DeprecatedRule( name=base.IDENTITY % 'revoke_token', check_str=base.RULE_ADMIN_OR_TOKEN_SUBJECT, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) ADMIN_OR_TOKEN_SUBJECT = ( base.RULE_ADMIN_REQUIRED + ' or rule:token_subject' # nosec ) ADMIN_OR_SYSTEM_USER_OR_TOKEN_SUBJECT = ( base.RULE_ADMIN_REQUIRED + ' or ' '(role:reader and system_scope:all) or rule:token_subject' # nosec ) ADMIN_OR_SYSTEM_USER_OR_SERVICE_OR_TOKEN_SUBJECT = ( base.RULE_ADMIN_REQUIRED + ' or ' '(role:reader and system_scope:all) ' # nosec 'or rule:service_role or rule:token_subject' # nosec ) token_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'check_token', check_str=ADMIN_OR_SYSTEM_USER_OR_TOKEN_SUBJECT, scope_types=['system', 'domain', 'project'], description='Check a token.', operations=[{'path': '/v3/auth/tokens', 'method': 'HEAD'}], deprecated_rule=deprecated_check_token, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'validate_token', check_str=ADMIN_OR_SYSTEM_USER_OR_SERVICE_OR_TOKEN_SUBJECT, scope_types=['system', 'domain', 'project'], description='Validate a token.', operations=[{'path': '/v3/auth/tokens', 'method': 'GET'}], deprecated_rule=deprecated_validate_token, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'revoke_token', check_str=ADMIN_OR_TOKEN_SUBJECT, scope_types=['system', 'domain', 'project'], description='Revoke a token.', operations=[{'path': '/v3/auth/tokens', 'method': 'DELETE'}], deprecated_rule=deprecated_revoke_token, ), ] def list_rules(): return token_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/token_revocation.py0000664000175000017500000000336500000000000024355 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base DEPRECATED_REASON = """ The identity:revocation_list policy isn't used to protect any APIs in keystone now that the revocation list API has been deprecated and only returns a 410 or 403 depending on how keystone is configured. This policy can be safely removed from policy files. """ token_revocation_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'revocation_list', check_str=base.RULE_SERVICE_OR_ADMIN, # NOTE(lbragstad): Documenting scope_types here doesn't really make a # difference since this API is going to return an empty list regardless # of the token scope used in the API call. More-or-less just doing this # for consistency with other policies. scope_types=['system', 'project'], description='List revoked PKI tokens.', operations=[ {'path': '/v3/auth/tokens/OS-PKI/revoked', 'method': 'GET'} ], deprecated_for_removal=True, deprecated_since=versionutils.deprecated.TRAIN, deprecated_reason=DEPRECATED_REASON, ) ] def list_rules(): return token_revocation_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/trust.py0000664000175000017500000001551500000000000022165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base RULE_TRUSTOR = 'user_id:%(target.trust.trustor_user_id)s' RULE_TRUSTEE = 'user_id:%(target.trust.trustee_user_id)s' SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE = ( base.SYSTEM_READER + ' or ' + RULE_TRUSTOR + ' or ' + RULE_TRUSTEE ) SYSTEM_READER_OR_TRUSTOR = base.SYSTEM_READER + ' or ' + RULE_TRUSTOR SYSTEM_READER_OR_TRUSTEE = base.SYSTEM_READER + ' or ' + RULE_TRUSTEE SYSTEM_ADMIN_OR_TRUSTOR = base.SYSTEM_ADMIN + ' or ' + RULE_TRUSTOR ADMIN_OR_TRUSTOR = base.RULE_ADMIN_REQUIRED + ' or ' + RULE_TRUSTOR ADMIN_OR_SYSTEM_READER_OR_TRUSTOR = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + '(' + SYSTEM_READER_OR_TRUSTOR + ')' ) ADMIN_OR_SYSTEM_READER_OR_TRUSTEE = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + '(' + SYSTEM_READER_OR_TRUSTEE + ')' ) ADMIN_OR_SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + '(' + SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE + ')' ) DEPRECATED_REASON = ( "The trust API is now aware of system scope and default roles." ) deprecated_list_trusts = policy.DeprecatedRule( name=base.IDENTITY % 'list_trusts', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_list_roles_for_trust = policy.DeprecatedRule( name=base.IDENTITY % 'list_roles_for_trust', check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_role_for_trust = policy.DeprecatedRule( name=base.IDENTITY % 'get_role_for_trust', check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_delete_trust = policy.DeprecatedRule( name=base.IDENTITY % 'delete_trust', check_str=RULE_TRUSTOR, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) deprecated_get_trust = policy.DeprecatedRule( name=base.IDENTITY % 'get_trust', check_str=RULE_TRUSTOR + ' or ' + RULE_TRUSTEE, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.TRAIN, ) trust_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_trust', check_str=base.RULE_TRUST_OWNER, # FIXME(lbragstad): Trusts have the ability to optionally include a # project, but until trusts deal with system scope it's not really # useful. For now, this should be a project only operation. scope_types=['project'], description='Create trust.', operations=[{'path': '/v3/OS-TRUST/trusts', 'method': 'POST'}], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_trusts', check_str=base.RULE_ADMIN_OR_SYSTEM_READER, scope_types=['system', 'project'], description='List trusts.', operations=[ {'path': '/v3/OS-TRUST/trusts', 'method': 'GET'}, {'path': '/v3/OS-TRUST/trusts', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_trusts, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_trusts_for_trustor', check_str=ADMIN_OR_SYSTEM_READER_OR_TRUSTOR, scope_types=['system', 'project'], description='List trusts for trustor.', operations=[ { 'path': '/v3/OS-TRUST/trusts?' 'trustor_user_id={trustor_user_id}', 'method': 'GET', }, { 'path': '/v3/OS-TRUST/trusts?' 'trustor_user_id={trustor_user_id}', 'method': 'HEAD', }, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_trusts_for_trustee', check_str=ADMIN_OR_SYSTEM_READER_OR_TRUSTEE, scope_types=['system', 'project'], description='List trusts for trustee.', operations=[ { 'path': '/v3/OS-TRUST/trusts?' 'trustee_user_id={trustee_user_id}', 'method': 'GET', }, { 'path': '/v3/OS-TRUST/trusts?' 'trustee_user_id={trustee_user_id}', 'method': 'HEAD', }, ], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_roles_for_trust', check_str=ADMIN_OR_SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE, scope_types=['system', 'project'], description='List roles delegated by a trust.', operations=[ {'path': '/v3/OS-TRUST/trusts/{trust_id}/roles', 'method': 'GET'}, {'path': '/v3/OS-TRUST/trusts/{trust_id}/roles', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_roles_for_trust, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_role_for_trust', check_str=ADMIN_OR_SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE, scope_types=['system', 'project'], description='Check if trust delegates a particular role.', operations=[ { 'path': '/v3/OS-TRUST/trusts/{trust_id}/roles/{role_id}', 'method': 'GET', }, { 'path': '/v3/OS-TRUST/trusts/{trust_id}/roles/{role_id}', 'method': 'HEAD', }, ], deprecated_rule=deprecated_get_role_for_trust, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_trust', check_str=ADMIN_OR_TRUSTOR, scope_types=['system', 'project'], description='Revoke trust.', operations=[ {'path': '/v3/OS-TRUST/trusts/{trust_id}', 'method': 'DELETE'} ], deprecated_rule=deprecated_delete_trust, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_trust', check_str=ADMIN_OR_SYSTEM_READER_OR_TRUSTOR_OR_TRUSTEE, scope_types=['system', 'project'], description='Get trust.', operations=[ {'path': '/v3/OS-TRUST/trusts/{trust_id}', 'method': 'GET'}, {'path': '/v3/OS-TRUST/trusts/{trust_id}', 'method': 'HEAD'}, ], deprecated_rule=deprecated_get_trust, ), ] def list_rules(): return trust_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/policies/user.py0000664000175000017500000001331300000000000021754 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from keystone.common.policies import base SYSTEM_READER_OR_DOMAIN_READER_OR_USER = ( '(' + base.SYSTEM_READER + ') or ' '(role:reader and token.domain.id:%(target.user.domain_id)s) or ' 'user_id:%(target.user.id)s' ) ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_OR_USER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER_OR_USER ) SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.SYSTEM_READER + ') or (' + base.DOMAIN_READER + ')' ) ADMIN_SYSTEM_READER_OR_DOMAIN_READER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' + SYSTEM_READER_OR_DOMAIN_READER ) ADMIN_OR_DOMAIN_MANAGER = ( '(' + base.RULE_ADMIN_REQUIRED + ') or ' '(role:manager and token.domain.id:%(target.user.domain_id)s)' ) DEPRECATED_REASON = ( "The user API is now aware of system scope and default roles." ) deprecated_get_user = policy.DeprecatedRule( name=base.IDENTITY % 'get_user', check_str=base.RULE_ADMIN_OR_OWNER, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_list_users = policy.DeprecatedRule( name=base.IDENTITY % 'list_users', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_create_user = policy.DeprecatedRule( name=base.IDENTITY % 'create_user', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_update_user = policy.DeprecatedRule( name=base.IDENTITY % 'update_user', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) deprecated_delete_user = policy.DeprecatedRule( name=base.IDENTITY % 'delete_user', check_str=base.RULE_ADMIN_REQUIRED, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.STEIN, ) user_policies = [ policy.DocumentedRuleDefault( name=base.IDENTITY % 'get_user', check_str=ADMIN_OR_SYSTEM_READER_OR_DOMAIN_READER_OR_USER, scope_types=['system', 'domain', 'project'], description='Show user details.', operations=[ {'path': '/v3/users/{user_id}', 'method': 'GET'}, {'path': '/v3/users/{user_id}', 'method': 'HEAD'}, ], deprecated_rule=deprecated_get_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_users', check_str=ADMIN_SYSTEM_READER_OR_DOMAIN_READER, scope_types=['system', 'domain', 'project'], description='List users.', operations=[ {'path': '/v3/users', 'method': 'GET'}, {'path': '/v3/users', 'method': 'HEAD'}, ], deprecated_rule=deprecated_list_users, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_projects_for_user', check_str='', # NOTE(lbragstad): We explicitly omit scope_types from this policy # because it's meant to be called with an unscoped token, which doesn't # apply to scope_types or its purpose. So long as the user is in the # system and has a valid token, they should be able to generate a list # of projects they have access to. description=( 'List all projects a user has access to via role assignments.' ), operations=[{'path': ' /v3/auth/projects', 'method': 'GET'}], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'list_domains_for_user', check_str='', # NOTE(lbragstad): We explicitly omit scope_types from this policy # because it's meant to be called with an unscoped token, which doesn't # apply to scope_types or its purpose. So long as the user is in the # system and has a valid token, they should be able to generate a list # of domains they have access to. description=( 'List all domains a user has access to via role assignments.' ), operations=[{'path': '/v3/auth/domains', 'method': 'GET'}], ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'create_user', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Create a user.', operations=[{'path': '/v3/users', 'method': 'POST'}], deprecated_rule=deprecated_create_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'update_user', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Update a user, including administrative password resets.', operations=[{'path': '/v3/users/{user_id}', 'method': 'PATCH'}], deprecated_rule=deprecated_update_user, ), policy.DocumentedRuleDefault( name=base.IDENTITY % 'delete_user', check_str=ADMIN_OR_DOMAIN_MANAGER, scope_types=['system', 'domain', 'project'], description='Delete a user.', operations=[{'path': '/v3/users/{user_id}', 'method': 'DELETE'}], deprecated_rule=deprecated_delete_user, ), ] def list_rules(): return user_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/profiler.py0000664000175000017500000000323300000000000021011 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log import osprofiler.initializer import keystone.conf CONF = keystone.conf.CONF LOG = log.getLogger(__name__) def setup(name, host='0.0.0.0'): # nosec """Setup OSprofiler notifier and enable profiling. :param name: name of the service that will be profiled :param host: hostname or host IP address that the service will be running on. By default host will be set to 0.0.0.0, but more specified host name / address usage is highly recommended. """ if CONF.profiler.enabled: osprofiler.initializer.init_from_conf( conf=CONF, context={}, project="keystone", service=name, host=host ) LOG.info( "OSProfiler is enabled.\n" "Traces provided from the profiler " "can only be subscribed to using the same HMAC keys that " "are configured in Keystone's configuration file " "under the [profiler] section. \n To disable OSprofiler " "set in /etc/keystone/keystone.conf:\n" "[profiler]\n" "enabled=false" ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/provider_api.py0000664000175000017500000001053200000000000021652 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class ProviderAPIRegistry: __shared_object_state: dict = {} __registry: dict = {} __iter__ = __registry.__iter__ __getitem__ = __registry.__getitem__ locked = False def __init__(self): # NOTE(morgan): This rebinds __dict__ and allows all instances of # the provider API to share a common state. Any changes except # rebinding __dict__ will maintain the same state stored on the class # not the instance. This design pattern is preferable to # full singletons where state sharing is the important "feature" # derived from the "singleton" # # Use "super" to bypass the __setattr__ preventing changes to the # object itself. super().__setattr__('__dict__', self.__shared_object_state) def __getattr__(self, item): """Do attr lookup.""" try: return self.__registry[item] except KeyError: raise AttributeError("'ProviderAPIs' has no attribute %s" % item) def __setattr__(self, key, value): """Do not allow setting values on the registry object.""" raise RuntimeError( 'Programming Error: You may not set values on the ' 'ProviderAPIRegistry objects.' ) def _register_provider_api(self, name, obj): """Register an instance of a class as a provider api.""" if name == 'driver': raise ValueError('A provider may not be named "driver".') if self.locked: raise RuntimeError( 'Programming Error: The provider api registry has been ' 'locked (post configuration). Ensure all provider api ' 'managers are instantiated before locking.' ) if name in self.__registry: raise DuplicateProviderError( '`%(name)s` has already been registered as an api ' 'provider by `%(prov)r`' % {'name': name, 'prov': self.__registry[name]} ) self.__registry[name] = obj def _clear_registry_instances(self): """ONLY USED FOR TESTING.""" self.__registry.clear() # Use super to allow setting around class implementation of __setattr__ super().__setattr__('locked', False) def lock_provider_registry(self): # Use super to allow setting around class implementation of __setattr__ super().__setattr__('locked', True) def deferred_provider_lookup(self, api, method): """Create descriptor that performs lookup of api and method on demand. For specialized cases, such as the enforcer "get_member_from_driver" which needs to be effectively a "classmethod", this method returns a smart descriptor object that does the lookup at runtime instead of at import time. :param api: The api to use, e.g. "identity_api" :type api: str :param method: the method on the api to return :type method: str """ class DeferredProviderLookup: def __init__(self, api, method): self.__api = api self.__method = method def __get__(self, instance, owner): api = getattr(ProviderAPIs, self.__api) return getattr(api, self.__method) return DeferredProviderLookup(api, method) class DuplicateProviderError(Exception): """Attempting to register a duplicate API provider.""" class ProviderAPIMixin: """Allow referencing provider apis on self via __getattr__. Be sure this class is first in the class definition for inheritance. """ def __getattr__(self, item): """Magic getattr method.""" try: return getattr(ProviderAPIs, item) except AttributeError: return self.__getattribute__(item) ProviderAPIs = ProviderAPIRegistry() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.510114 keystone-26.0.0/keystone/common/rbac_enforcer/0000775000175000017500000000000000000000000021406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/rbac_enforcer/__init__.py0000664000175000017500000000121000000000000023511 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.rbac_enforcer.enforcer import RBACEnforcer # noqa __all__ = ('RBACEnforcer',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/rbac_enforcer/enforcer.py0000664000175000017500000005417500000000000023577 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import flask from oslo_log import log from oslo_policy import opts from oslo_policy import policy as common_policy from oslo_utils import strutils from keystone.common import authorization from keystone.common import context from keystone.common import policies from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDER_APIS = provider_api.ProviderAPIs _POSSIBLE_TARGET_ACTIONS = frozenset( [ rule.name for rule in policies.list_rules() if not rule.deprecated_for_removal ] ) _ENFORCEMENT_CHECK_ATTR = 'keystone:RBAC:enforcement_called' # TODO(gmann): Remove setting the default value of config policy_file # once oslo_policy change the default value to 'policy.yaml'. # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49 DEFAULT_POLICY_FILE = 'policy.yaml' opts.set_defaults(CONF, DEFAULT_POLICY_FILE) class RBACEnforcer: """Enforce RBAC on API calls.""" __shared_state__: dict = {} __ENFORCER = None ACTION_STORE_ATTR = 'keystone:RBAC:action_name' # FOR TESTS ONLY suppress_deprecation_warnings = False def __init__(self): # NOTE(morgan): All Enforcer Instances use the same shared state; # BORG pattern. self.__dict__ = self.__shared_state__ def _check_deprecated_rule(self, action): def _name_is_changing(rule): deprecated_rule = rule.deprecated_rule return ( deprecated_rule and deprecated_rule.name != rule.name and deprecated_rule.name in self._enforcer.file_rules ) def _check_str_is_changing(rule): deprecated_rule = rule.deprecated_rule return ( deprecated_rule and deprecated_rule.check_str != rule.check_str and rule.name not in self._enforcer.file_rules ) def _is_deprecated_for_removal(rule): return ( rule.deprecated_for_removal and rule.name in self._enforcer.file_rules ) def _emit_warning(): if not self._enforcer._warning_emitted: LOG.warning( "Deprecated policy rules found. Use " "oslopolicy-policy-generator and " "oslopolicy-policy-upgrade to detect and resolve " "deprecated policies in your configuration." ) self._enforcer._warning_emitted = True registered_rule = self._enforcer.registered_rules.get(action) if not registered_rule: return if ( _name_is_changing(registered_rule) or _check_str_is_changing(registered_rule) or _is_deprecated_for_removal(registered_rule) ): _emit_warning() def _enforce(self, credentials, action, target, do_raise=True): """Verify that the action is valid on the target in this context. This method is for cases that exceed the base enforcer functionality (notably for compatibility with `@protected` style decorators. :param credentials: user credentials :param action: string representing the action to be checked, which should be colon separated for clarity. :param target: dictionary representing the object of the action for object creation this should be a dictionary representing the location of the object e.g. {'project_id': object.project_id} :raises keystone.exception.Forbidden: If verification fails. Actions should be colon separated for clarity. For example: * identity:list_users """ # Add the exception arguments if asked to do a raise extra = {} if do_raise: extra.update( exc=exception.ForbiddenAction, action=action, do_raise=do_raise ) try: result = self._enforcer.enforce( rule=action, target=target, creds=credentials, **extra ) self._check_deprecated_rule(action) return result except common_policy.InvalidScope: raise exception.ForbiddenAction(action=action) def _reset(self): # NOTE(morgan): Used for TEST purposes only. self.__ENFORCER = None @property def _enforcer(self): # The raw oslo-policy enforcer object if self.__ENFORCER is None: self.__ENFORCER = common_policy.Enforcer(CONF) # NOTE(cmurphy) when running in the keystone server, suppress # deprecation warnings for individual policy rules. Instead, we log # a single notification at enforcement time indicating the # oslo.policy tools the operator can use to detect and resolve # deprecated policies. If there is no request context here, that # means external tooling such as the oslo.policy tools are running # this code, in which case we do want the full deprecation warnings # emitted for individual polcy rules. if flask.has_request_context(): self.__ENFORCER.suppress_deprecation_warnings = True # NOTE(cmurphy) Tests may explicitly disable these warnings to # prevent an explosion of test logs if self.suppress_deprecation_warnings: self.__ENFORCER.suppress_deprecation_warnings = True self.register_rules(self.__ENFORCER) self.__ENFORCER._warning_emitted = False return self.__ENFORCER @staticmethod def _extract_filter_values(filters): """Extract filter data from query params for RBAC enforcement.""" filters = filters or [] target = { i: flask.request.args[i] for i in filters if i in flask.request.args } if target: if LOG.logger.getEffectiveLevel() <= log.DEBUG: LOG.debug( 'RBAC: Adding query filter params (%s)', ', '.join([f'{k}={v}' for k, v in target.items()]), ) return target @staticmethod def _extract_member_target_data(member_target_type, member_target): """Build some useful target data. :param member_target_type: what type of target, e.g. 'user' :type member_target_type: str or None :param member_target: reference of the target data :type member_target: dict or None :returns: constructed target dict or empty dict :rtype: dict """ ret_dict = {} if (member_target is not None and member_target_type is None) or ( member_target is None and member_target_type is not None ): LOG.warning( 'RBAC: Unknown target type or target reference. ' 'Rejecting as unauthorized. ' '(member_target_type=%(target_type)r, ' 'member_target=%(target_ref)r)', { 'target_type': member_target_type, 'target_ref': member_target, }, ) # Fast exit. return ret_dict if member_target is not None and member_target_type is not None: ret_dict['target'] = {member_target_type: member_target} else: # Try and do some magic loading based upon the resource we've # matched in our route. This is mostly so we can have a level of # automatic pulling in the resource; strictly for some added # DRY capabilities. In an ideal world the target is always passed # in explicitly. if flask.request.endpoint: # This only works for cases of Flask-RESTful, or carefully # crafted endpoints that live on a class. Ultimately, there # should be more protection against something wonky # here. resource = flask.current_app.view_functions[ flask.request.endpoint ].view_class try: member_name = getattr(resource, 'member_key', None) except ValueError: # NOTE(morgan): In the case that the ResourceBase keystone # class is used, we raise a value error when member_key # has not been set on the class. This is perfectly # normal and acceptable. Set member_name to None as though # it wasn't set. member_name = None func = getattr(resource, 'get_member_from_driver', None) if member_name is not None and callable(func): key = '%s_id' % member_name if key in (flask.request.view_args or {}): # NOTE(morgan): For most correct setup, instantiate the # view_class. There is no current support for passing # extra args to the constructor of the view_class like # .as_view() method would actually do. In this case # perform a simple instantiation to represent the # `self` pass to the unbound method. # # TODO(morgan): add (future) support for passing class # instantiation args. ret_dict['target'] = { member_name: func(flask.request.view_args[key]) } return ret_dict @staticmethod def _extract_policy_check_credentials(): # Pull out the auth context return flask.request.environ.get(authorization.AUTH_CONTEXT_ENV, {}) @classmethod def _extract_subject_token_target_data(cls): ret_dict = {} window_seconds = 0 # NOTE(morgan): Populate the subject token data into # the policy dict at "target.token". In all liklyhood # it is un-interesting to populate this data outside # of the auth paths. target = 'token' subject_token = flask.request.headers.get('X-Subject-Token') access_rules_support = flask.request.headers.get( authorization.ACCESS_RULES_HEADER ) if subject_token is not None: allow_expired = strutils.bool_from_string( flask.request.args.get('allow_expired', False), default=False ) if allow_expired: window_seconds = CONF.token.allow_expired_window token = PROVIDER_APIS.token_provider_api.validate_token( subject_token, window_seconds=window_seconds, access_rules_support=access_rules_support, ) # TODO(morgan): Expand extracted data from the subject token. ret_dict[target] = {} ret_dict[target]['user_id'] = token.user_id try: user_domain_id = token.user['domain_id'] except exception.UnexpectedError: user_domain_id = None if user_domain_id: ret_dict[target].setdefault('user', {}) ret_dict[target]['user'].setdefault('domain', {}) ret_dict[target]['user']['domain']['id'] = user_domain_id return ret_dict @staticmethod def _get_oslo_req_context(): return flask.request.environ.get(context.REQUEST_CONTEXT_ENV, None) @classmethod def _assert_is_authenticated(cls): ctx = cls._get_oslo_req_context() if ctx is None: LOG.warning( 'RBAC: Error reading the request context generated by ' 'the Auth Middleware (there is no context). Rejecting ' 'request as unauthorized.' ) raise exception.Unauthorized( _( 'Internal error processing authentication and ' 'authorization.' ) ) if not ctx.authenticated: raise exception.Unauthorized( _('auth_context did not decode anything useful') ) @classmethod def _shared_admin_auth_token_set(cls): ctx = cls._get_oslo_req_context() return getattr(ctx, 'is_admin', False) @classmethod def enforce_call( cls, enforcer=None, action=None, target_attr=None, member_target_type=None, member_target=None, filters=None, build_target=None, ): """Enforce RBAC on the current request. This will do some legwork and then instantiate the Enforcer if an enforcer is not passed in. :param enforcer: A pre-instantiated Enforcer object (optional) :type enforcer: :class:`RBACEnforcer` :param action: the name of the rule/policy enforcement to be checked against, e.g. `identity:get_user` (optional may be replaced by decorating the method/function with `policy_enforcer_action`. :type action: str :param target_attr: complete override of the target data. This will replace all other generated target data meaning `member_target_type` and `member_target` are ignored. This will also prevent extraction of data from the X-Subject-Token. The `target` dict should contain a series of key-value pairs such as `{'user': user_ref_dict}`. :type target_attr: dict :param member_target_type: the type of the target, e.g. 'user'. Both this and `member_target` must be passed if either is passed. :type member_target_type: str :param member_target: the (dict form) reference of the member object. Both this and `member_target_type` must be passed if either is passed. :type member_target: dict :param filters: A variable number of optional string filters, these are used to extract values from the query params. The filters are added to the request data that is passed to the enforcer and may be used to determine policy action. In practice these are mainly supplied in the various "list" APIs and are un-used in the default supplied policies. :type filters: iterable :param build_target: A function to build the target for enforcement. This is explicitly done after authentication in order to not leak existance data before auth. :type build_target: function """ # NOTE(morgan) everything in the policy_dict may be used by the policy # DSL to action on RBAC and request information/response data. policy_dict = {} # If "action" has not explicitly been overridden, see if it is set in # Flask.g app-context (per-request thread local) meaning the # @policy_enforcer_action decorator was used. action = action or getattr(flask.g, cls.ACTION_STORE_ATTR, None) if action not in _POSSIBLE_TARGET_ACTIONS: LOG.warning( 'RBAC: Unknown enforcement action name `%s`. ' 'Rejecting as Forbidden, this is a programming error ' 'and a bug should be filed with as much information ' 'about the request that caused this as possible.', action, ) # NOTE(morgan): While this is an internal error, a 500 is never # desirable, we have handled the case and the most appropriate # response here is to issue a 403 (FORBIDDEN) to any API calling # enforce_call with an inappropriate action/name to look up the # policy rule. This is simply a short-circuit as the enforcement # code raises a 403 on an unknown action (in keystone) by default. raise exception.Forbidden( message=_( 'Internal RBAC enforcement error, invalid rule (action) ' 'name.' ) ) # Mark flask.g as "enforce_call" has been called. This should occur # before anything except the "is this a valid action" check, ensuring # all proper "after request" checks pass, showing that the API has # enforcement. setattr(flask.g, _ENFORCEMENT_CHECK_ATTR, True) # Assert we are actually authenticated cls._assert_is_authenticated() # Check if "is_admin", this is in support of the old "admin auth token" # middleware with a shared "admin" token for auth if cls._shared_admin_auth_token_set(): LOG.warning('RBAC: Bypassing authorization') return # NOTE(morgan): !!! ORDER OF THESE OPERATIONS IS IMPORTANT !!! # The lowest priority values are set first and the highest priority # values are set last. # Populate the input attributes (view args) directly to the policy # dict. This is to allow the policy engine to have access to the # view args for substitution. This is to mirror the old @protected # mechanism and ensure current policy files continue to work as # expected. policy_dict.update(flask.request.view_args) # Get the Target Data Set. if target_attr is None and build_target is None: try: policy_dict.update( cls._extract_member_target_data( member_target_type, member_target ) ) except exception.NotFound: # DEBUG LOG and bubble up the 404 error. This is expected # behavior. This likely should be specific in each API. This # should be revisited in the future and each API should make # the explicit "existence" checks before enforcement. LOG.debug( 'Extracting inferred target data resulted in ' '"NOT FOUND (404)".' ) raise except Exception as e: # nosec # NOTE(morgan): Errors should never bubble up at this point, # if there is an error getting the target, log it and move # on. Raise an explicit 403, we have failed policy checks. LOG.warning( 'Unable to extract inferred target data during ' 'enforcement' ) LOG.debug(e, exc_info=True) raise exception.ForbiddenAction(action=action) # Special Case, extract and add subject_token data. subj_token_target_data = cls._extract_subject_token_target_data() if subj_token_target_data: policy_dict.setdefault('target', {}).update( subj_token_target_data ) else: if target_attr and build_target: raise ValueError( 'Programming Error: A target_attr or ' 'build_target must be provided, but not both' ) policy_dict['target'] = target_attr or build_target() # Pull the data from the submitted json body to generate # appropriate input/target attributes, we take an explicit copy here # to ensure we're not somehow corrupting json_input = flask.request.get_json(force=True, silent=True) or {} policy_dict.update(json_input.copy()) # Generate the filter_attr dataset. policy_dict.update(cls._extract_filter_values(filters)) flattened = utils.flatten_dict(policy_dict) if LOG.logger.getEffectiveLevel() <= log.DEBUG: # LOG the Args args_str = ', '.join( [ f'{k}={v}' for k, v in (flask.request.view_args or {}).items() ] ) args_str = strutils.mask_password(args_str) LOG.debug( 'RBAC: Authorizing `%(action)s(%(args)s)`', {'action': action, 'args': args_str}, ) ctxt = cls._get_oslo_req_context() # Instantiate the enforcer object if needed. enforcer_obj = enforcer or cls() enforcer_obj._enforce( credentials=ctxt, action=action, target=flattened ) LOG.debug('RBAC: Authorization granted') @classmethod def policy_enforcer_action(cls, action): """Decorator to set policy enforcement action name.""" if action not in _POSSIBLE_TARGET_ACTIONS: raise ValueError( 'PROGRAMMING ERROR: Action must reference a ' 'valid Keystone policy enforcement name.' ) def wrapper(f): @functools.wraps(f) def inner(*args, **kwargs): # Set the action in g on a known attr so we can reference it # later. setattr(flask.g, cls.ACTION_STORE_ATTR, action) return f(*args, **kwargs) return inner return wrapper @staticmethod def register_rules(enforcer): enforcer.register_defaults(policies.list_rules()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/rbac_enforcer/policy.py0000664000175000017500000000234500000000000023263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morgan): This entire module is to provide compatibility for the old # @protected style decorator enforcement. All new enforcement should directly # reference the Enforcer object itself. from keystone.common.rbac_enforcer import enforcer from keystone import conf CONF = conf.CONF # NOTE(morgan): Shared-state enforcer object _ENFORCER = enforcer.RBACEnforcer() def reset(): _ENFORCER._reset() def get_enforcer(): """Entrypoint that must return the raw oslo.policy enforcer obj. This is utilized by the command-line policy tools. :returns: :class:`oslo_policy.policy.Enforcer` """ CONF(project='keystone') return _ENFORCER._enforcer enforce = _ENFORCER._enforce ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/render_token.py0000664000175000017500000001350300000000000021647 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import provider_api import keystone.conf CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def render_token_response_from_model(token, include_catalog=True): token_reference = { 'token': { 'methods': token.methods, 'user': { 'domain': { 'id': token.user_domain['id'], 'name': token.user_domain['name'], }, 'id': token.user_id, 'name': token.user['name'], 'password_expires_at': token.user['password_expires_at'], }, 'audit_ids': token.audit_ids, 'expires_at': token.expires_at, 'issued_at': token.issued_at, } } if token.system_scoped: token_reference['token']['roles'] = token.roles token_reference['token']['system'] = {'all': True} elif token.domain_scoped: token_reference['token']['domain'] = { 'id': token.domain['id'], 'name': token.domain['name'], } token_reference['token']['roles'] = token.roles elif token.trust_scoped: token_reference['token']['OS-TRUST:trust'] = { 'id': token.trust_id, 'trustor_user': {'id': token.trustor['id']}, 'trustee_user': {'id': token.trustee['id']}, 'impersonation': token.trust['impersonation'], } token_reference['token']['project'] = { 'domain': { 'id': token.project_domain['id'], 'name': token.project_domain['name'], }, 'id': token.trust_project['id'], 'name': token.trust_project['name'], } if token.trust.get('impersonation'): trustor_domain = PROVIDERS.resource_api.get_domain( token.trustor['domain_id'] ) token_reference['token']['user'] = { 'domain': { 'id': trustor_domain['id'], 'name': trustor_domain['name'], }, 'id': token.trustor['id'], 'name': token.trustor['name'], 'password_expires_at': token.trustor['password_expires_at'], } token_reference['token']['roles'] = token.roles elif token.project_scoped: token_reference['token']['project'] = { 'domain': { 'id': token.project_domain['id'], 'name': token.project_domain['name'], }, 'id': token.project['id'], 'name': token.project['name'], } token_reference['token']['is_domain'] = token.project.get( 'is_domain', False ) token_reference['token']['roles'] = token.roles ap_name = CONF.resource.admin_project_name ap_domain_name = CONF.resource.admin_project_domain_name if ap_name and ap_domain_name: is_ap = ( token.project['name'] == ap_name and ap_domain_name == token.project_domain['name'] ) token_reference['token']['is_admin_project'] = is_ap if include_catalog and not token.unscoped: user_id = token.user_id if token.trust_id: user_id = token.trust['trustor_user_id'] catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, token.project_id ) token_reference['token']['catalog'] = catalog sps = PROVIDERS.federation_api.get_enabled_service_providers() if sps: token_reference['token']['service_providers'] = sps if token.is_federated: PROVIDERS.federation_api.get_idp(token.identity_provider_id) federated_dict = dict( groups=token.federated_groups, identity_provider={'id': token.identity_provider_id}, protocol={'id': token.protocol_id}, ) token_reference['token']['user']['OS-FEDERATION'] = federated_dict del token_reference['token']['user']['password_expires_at'] if token.access_token_id: token_reference['token']['OS-OAUTH1'] = { 'access_token_id': token.access_token_id, 'consumer_id': token.access_token['consumer_id'], } if token.application_credential_id: key = 'application_credential' token_reference['token'][key] = {} token_reference['token'][key]['id'] = token.application_credential[ 'id' ] token_reference['token'][key]['name'] = token.application_credential[ 'name' ] restricted = not token.application_credential['unrestricted'] token_reference['token'][key]['restricted'] = restricted if token.application_credential.get('access_rules'): token_reference['token'][key]['access_rules'] = ( token.application_credential['access_rules'] ) # NOTE(noonedeadpunk): We are using getattr as previously cached tokens # won't have the attribute and keystone will fail # with AttributeError for TTL of the cache. token_oauth2_thumbprint = getattr(token, 'oauth2_thumbprint', None) if token_oauth2_thumbprint: token_reference['token']['oauth2_credential'] = { 'x5t#S256': token.oauth2_thumbprint } return token_reference ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.510114 keystone-26.0.0/keystone/common/resource_options/0000775000175000017500000000000000000000000022216 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/resource_options/__init__.py0000664000175000017500000000113100000000000024323 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.resource_options.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/resource_options/core.py0000664000175000017500000002236400000000000023527 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Options specific to resources managed by Keystone (Domain, User, etc).""" from keystone.common import validation from keystone.i18n import _ def _validator(value): return def boolean_validator(value): if value not in (True, False): raise TypeError(_('Expected boolean value, got %r') % type(value)) def ref_mapper_to_dict_options(ref): """Convert the values in _resource_option_mapper to options dict. NOTE: this is to be called from the relevant `to_dict` methods or similar and must be called from within the active session context. :param ref: the DB model ref to extract options from :returns: Dict of options as expected to be returned out of to_dict in the `options` key. """ options = {} for opt in ref._resource_option_mapper.values(): if opt.option_id in ref.resource_options_registry.option_ids: r_opt = ref.resource_options_registry.get_option_by_id( opt.option_id ) if r_opt is not None: options[r_opt.option_name] = opt.option_value return options def get_resource_option(model, option_id): """Get the resource option information from the model's mapper.""" if option_id in model._resource_option_mapper.keys(): return model._resource_option_mapper[option_id] return None def resource_options_ref_to_mapper(ref, option_class): """Convert the _resource_options property-dict to options attr map. The model must have the resource option mapper located in the ``_resource_option_mapper`` attribute. The model must have the resource option registry located in the ``resource_options_registry`` attribute. The option dict with key(opt_id), value(opt_value) will be pulled from ``ref._resource_options``. NOTE: This function MUST be called within the active writer session context! :param ref: The DB model reference that is actually stored to the backend. :param option_class: Class that is used to store the resource option in the DB. """ options = getattr(ref, '_resource_options', None) if options is not None: # To ensure everything is clean, no lingering refs. delattr(ref, '_resource_options') else: # _resource_options didn't exist. Work from an empty set. options = {} # NOTE(notmorgan): explicitly use .keys() here as the attribute mapper # has some oddities at times. This guarantees we are working with keys. set_options = set(ref._resource_option_mapper.keys()) # Get any options that are not registered and slate them for removal from # the DB. This will delete unregistered options. clear_options = set_options.difference( ref.resource_options_registry.option_ids ) options.update({x: None for x in clear_options}) # Set the resource options for user in the Attribute Mapping. for r_opt_id, r_opt_value in options.items(): if r_opt_value is None: # Delete any option set explicitly to None, ignore unset # options. ref._resource_option_mapper.pop(r_opt_id, None) else: # Set any options on the user_ref itself. opt_obj = option_class( option_id=r_opt_id, option_value=r_opt_value ) ref._resource_option_mapper[r_opt_id] = opt_obj class ResourceOptionRegistry: def __init__(self, registry_name): self._registered_options = {} self._registry_type = registry_name @property def option_names(self): return {opt.option_name for opt in self.options} @property def options_by_name(self): return { opt.option_name: opt for opt in self._registered_options.values() } @property def options(self): return self._registered_options.values() @property def option_ids(self): return set(self._registered_options.keys()) def get_option_by_id(self, opt_id): return self._registered_options.get(opt_id, None) def get_option_by_name(self, name): for option in self._registered_options.values(): if name == option.option_name: return option return None @property def json_schema(self): schema = { 'type': 'object', 'properties': {}, 'additionalProperties': False, } for opt in self.options: if opt.json_schema is not None: # NOTE(notmorgan): All options are nullable. Null indicates # the option should be reset and removed from the DB store. schema['properties'][opt.option_name] = validation.nullable( opt.json_schema ) else: # NOTE(notmorgan): without 'type' being specified, this # can be of any-type. We are simply specifying no interesting # values beyond that the property may exist here. schema['properties'][opt.option_name] = {} return schema def register_option(self, option): if option in self.options: # Re-registering the exact same option does nothing. return if option.option_id in self._registered_options: raise ValueError( _('Option %(option_id)s already defined in %(registry)s.') % { 'option_id': option.option_id, 'registry': self._registry_type, } ) if option.option_name in self.option_names: raise ValueError( _('Option %(option_name)s already defined in %(registry)s') % { 'option_name': option.option_name, 'registry': self._registry_type, } ) self._registered_options[option.option_id] = option class ResourceOption: def __init__( self, option_id, option_name, validator=_validator, json_schema_validation=None, ): """The base object to define the option(s) to be stored in the DB. :param option_id: The ID of the option. This will be used to lookup the option value from the DB and should not be changed once defined as the values will no longer be correctly mapped to the keys in the user_ref when retrieving the data from the DB. :type option_id: str :param option_name: The name of the option. This value will be used to map the value from the user request on a resource update to the correct option id to be stored in the database. This value should not be changed once defined as it will change the resulting keys in the user_ref. :type option_name: str :param validator: A callable that raises TypeError if the value to be persisted is incorrect. A single argument of the value to be persisted will be passed to it. No return value is expected. :type validator: callable :param json_schema_validation: Dictionary defining the JSON schema validation for the option itself. This is used to generate the JSON Schema validator(s) used at the API layer :type json_schema_validation: dict """ if not isinstance(option_id, str) and len(option_id) == 4: raise TypeError( _('`option_id` must be a string, got %r') % option_id ) elif len(option_id) != 4: raise ValueError( _('`option_id` must be 4 characters in length. Got %r') % option_id ) if not isinstance(option_name, str): raise TypeError( _('`option_name` must be a string. Got %r') % option_name ) self._option_id = option_id self._option_name = option_name self.validator = validator self._json_schema_validation = json_schema_validation @property def json_schema(self): return self._json_schema_validation or None @property def option_name(self): # NOTE(notmorgan) Option IDs should never be set outside of definition # time. return self._option_name @property def option_id(self): # NOTE(notmorgan) Option IDs should never be set outside of definition # time. return self._option_id ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.510114 keystone-26.0.0/keystone/common/resource_options/options/0000775000175000017500000000000000000000000023711 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/resource_options/options/__init__.py0000664000175000017500000000240400000000000026022 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # All resource options are defined in this module. The individual resource # implementations explicitly register the options that are desired directly # in their individual registry. Each entry is imported from it's own # module directly to allow for custom implementation details as needed. from keystone.common.resource_options.options import immutable __all__ = ( 'IMMUTABLE_OPT', 'check_resource_immutable', 'check_immutable_update', 'check_immutable_delete', ) # Immutable Option and helper functions IMMUTABLE_OPT = immutable.IMMUTABLE_OPT check_resource_immutable = immutable.check_resource_immutable check_immutable_update = immutable.check_immutable_update check_immutable_delete = immutable.check_immutable_delete ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/resource_options/options/immutable.py0000664000175000017500000000551400000000000026247 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Implement the "Immutable" resource option from keystone.common.resource_options import core as ro_core from keystone.common.validation import parameter_types from keystone import exception IMMUTABLE_OPT = ro_core.ResourceOption( option_id='IMMU', option_name='immutable', validator=ro_core.boolean_validator, json_schema_validation=parameter_types.boolean, ) def check_resource_immutable(resource_ref): """Check to see if a resource is immutable. :param resource_ref: a dict reference of a resource to inspect """ return resource_ref.get('options', {}).get( IMMUTABLE_OPT.option_name, False ) def check_immutable_update( original_resource_ref, new_resource_ref, type, resource_id ): """Check if an update is allowed to an immutable resource. Valid cases where an update is allowed: * Resource is not immutable * Resource is immutable, and update to set immutable to False or None :param original_resource_ref: a dict resource reference representing the current resource :param new_resource_ref: a dict reference of the updates to perform :param type: the resource type, e.g. 'project' :param resource_id: the id of the resource (e.g. project['id']), usually a UUID :raises: ResourceUpdateForbidden """ immutable = check_resource_immutable(original_resource_ref) if immutable: new_options = new_resource_ref.get('options', {}) if ( (len(new_resource_ref.keys()) > 1) or (IMMUTABLE_OPT.option_name not in new_options) or (new_options[IMMUTABLE_OPT.option_name] not in (False, None)) ): raise exception.ResourceUpdateForbidden( type=type, resource_id=resource_id ) def check_immutable_delete(resource_ref, resource_type, resource_id): """Check if a delete is allowed on a resource. :param resource_ref: dict reference of the resource :param resource_type: resource type (str) e.g. 'project' :param resource_id: id of the resource (str) e.g. project['id'] :raises: ResourceDeleteForbidden """ if check_resource_immutable(resource_ref): raise exception.ResourceDeleteForbidden( type=resource_type, resource_id=resource_id ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.510114 keystone-26.0.0/keystone/common/sql/0000775000175000017500000000000000000000000017413 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/__init__.py0000664000175000017500000000117100000000000021524 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common.sql.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/alembic.ini0000664000175000017500000000516600000000000021520 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # sys.path path, will be prepended to sys.path if present. # defaults to the current working directory. prepend_sys_path = . # timezone to use when rendering the date within the migration file # as well as the filename. # If specified, requires the python-dateutil library that can be # installed by adding `alembic[tz]` to the pip requirements # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field # truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; This defaults # to keystone/common/sql/migrations/versions. When using multiple version # directories, initial revisions must be specified with --version-path. # The path separator used here should be the separator specified by "version_path_separator" # version_locations = %(here)s/bar:%(here)s/bat:keystone/common/sql/migrations/versions # version path separator; As mentioned above, this is the character used to split # version_locations. Valid values are: # # version_path_separator = : # version_path_separator = ; # version_path_separator = space # version_path_separator = os # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = sqlite:///keystone.db [post_write_hooks] # post_write_hooks defines scripts or Python functions that are run # on newly generated revision scripts. See the documentation for further # detail and examples # format using "black" - use the console_scripts runner, against the "black" entrypoint # hooks = black # black.type = console_scripts # black.entrypoint = black # black.options = -l 79 REVISION_SCRIPT_FILENAME # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/core.py0000664000175000017500000005237100000000000020725 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQL backends for the various services. Before using this module, call initialize(). This has to be done before CONF() because it sets up configuration options. """ import datetime import functools from oslo_db import exception as db_exception from oslo_db import options as db_options from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import models from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils from osprofiler import opts as profiler import osprofiler.sqlalchemy import sqlalchemy as sql from sqlalchemy.ext import declarative from sqlalchemy.orm.attributes import flag_modified from sqlalchemy.orm.attributes import InstrumentedAttribute from sqlalchemy import types as sql_types from keystone.common import driver_hints from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) ModelBase = declarative.declarative_base() # For exporting to other modules Column = sql.Column Index = sql.Index String = sql.String Integer = sql.Integer Enum = sql.Enum ForeignKey = sql.ForeignKey DateTime = sql.DateTime Date = sql.Date TIMESTAMP = sql.TIMESTAMP IntegrityError = sql.exc.IntegrityError DBDuplicateEntry = db_exception.DBDuplicateEntry OperationalError = sql.exc.OperationalError NotFound = sql.orm.exc.NoResultFound Boolean = sql.Boolean Text = sql.Text UniqueConstraint = sql.UniqueConstraint PrimaryKeyConstraint = sql.PrimaryKeyConstraint joinedload = sql.orm.joinedload # Suppress flake8's unused import warning for flag_modified: flag_modified = flag_modified Unicode = sql.Unicode def initialize(): """Initialize the module.""" db_options.set_defaults(CONF, connection="sqlite:///keystone.db") # Configure OSprofiler options profiler.set_defaults(CONF, enabled=False, trace_sqlalchemy=False) def initialize_decorator(init): """Ensure that the length of string field do not exceed the limit. This decorator check the initialize arguments, to make sure the length of string field do not exceed the length limit, or raise a 'StringLengthExceeded' exception. Use decorator instead of inheritance, because the metaclass will check the __tablename__, primary key columns, etc. at the class definition. """ def initialize(self, *args, **kwargs): cls = type(self) for k, v in kwargs.items(): if hasattr(cls, k): attr = getattr(cls, k) if isinstance(attr, InstrumentedAttribute): column = attr.property.columns[0] if isinstance(column.type, String): if not isinstance(v, str): v = str(v) if column.type.length and column.type.length < len(v): raise exception.StringLengthExceeded( string=v, type=k, length=column.type.length ) init(self, *args, **kwargs) return initialize ModelBase.__init__ = initialize_decorator(ModelBase.__init__) # Special Fields class JsonBlob(sql_types.TypeDecorator): impl = sql.Text # NOTE(ralonsoh): set to True as any other TypeDecorator in SQLAlchemy # https://docs.sqlalchemy.org/en/14/core/custom_types.html# \ # sqlalchemy.types.TypeDecorator.cache_ok cache_ok = True """This type is safe to cache.""" def process_bind_param(self, value, dialect): return jsonutils.dumps(value) def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class DateTimeInt(sql_types.TypeDecorator): """A column that automatically converts a datetime object to an Int. Keystone relies on accurate (sub-second) datetime objects. In some cases the RDBMS drop sub-second accuracy (some versions of MySQL). This field automatically converts the value to an INT when storing the data and back to a datetime object when it is loaded from the database. NOTE: Any datetime object that has timezone data will be converted to UTC. Any datetime object that has no timezone data will be assumed to be UTC and loaded from the DB as such. """ impl = sql.BigInteger epoch = datetime.datetime.fromtimestamp(0, tz=datetime.timezone.utc) # NOTE(ralonsoh): set to True as any other TypeDecorator in SQLAlchemy # https://docs.sqlalchemy.org/en/14/core/custom_types.html# \ # sqlalchemy.types.TypeDecorator.cache_ok cache_ok = True """This type is safe to cache.""" def process_bind_param(self, value, dialect): if value is None: return value else: if not isinstance(value, datetime.datetime): raise ValueError( _( 'Programming Error: value to be stored ' 'must be a datetime object.' ) ) value = timeutils.normalize_time(value) value = value.replace(tzinfo=datetime.timezone.utc) # NOTE(morgan): We are casting this to an int, and ensuring we # preserve microsecond data by moving the decimal. This is easier # than being concerned with the differences in Numeric types in # different SQL backends. return int((value - self.epoch).total_seconds() * 1000000) def process_result_value(self, value, dialect): if value is None: return None else: # Convert from INT to appropriate micro-second float (microseconds # after the decimal) from what was stored to the DB value = float(value) / 1000000 # NOTE(morgan): Explictly use timezone "datetime.timezone.utc" to # ensure we are not adjusting the actual datetime object from what # we stored. dt_obj = datetime.datetime.fromtimestamp( value, tz=datetime.timezone.utc ) # Return non-tz aware datetime object (as keystone expects) return timeutils.normalize_time(dt_obj) class ModelDictMixinWithExtras(models.ModelBase): """Mixin making model behave with dict-like interfaces includes extras. NOTE: DO NOT USE THIS FOR FUTURE SQL MODELS. "Extra" column is a legacy concept that should not be carried forward with new SQL models as the concept of "arbitrary" properties is not in line with the design philosophy of Keystone. """ attributes: list[str] = [] _msg = ( 'Programming Error: Model does not have an "extra" column. ' 'Unless the model already has an "extra" column and has ' 'existed in a previous released version of keystone with ' 'the extra column included, the model should use ' '"ModelDictMixin" instead.' ) @classmethod def from_dict(cls, d): new_d = d.copy() if not hasattr(cls, 'extra'): # NOTE(notmorgan): No translation here, This is an error for # programmers NOT end users. raise AttributeError(cls._msg) # no qa new_d['extra'] = { k: new_d.pop(k) for k in d.keys() if k not in cls.attributes and k != 'extra' } return cls(**new_d) def to_dict(self, include_extra_dict=False): """Return the model's attributes as a dictionary. If include_extra_dict is True, 'extra' attributes are literally included in the resulting dictionary twice, for backwards-compatibility with a broken implementation. """ if not hasattr(self, 'extra'): # NOTE(notmorgan): No translation here, This is an error for # programmers NOT end users. raise AttributeError(self._msg) # no qa d = self.extra.copy() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) if include_extra_dict: d['extra'] = self.extra.copy() return d def __getitem__(self, key): """Evaluate if key is in extra or not, to return correct item.""" if key in self.extra: return self.extra[key] return getattr(self, key) class ModelDictMixin(models.ModelBase): @classmethod def from_dict(cls, d): """Return a model instance from a dictionary.""" return cls(**d) def to_dict(self): """Return the model's attributes as a dictionary.""" names = (column.name for column in self.__table__.columns) return {name: getattr(self, name) for name in names} _main_context_manager = None def _get_main_context_manager(): global _main_context_manager if not _main_context_manager: _main_context_manager = enginefacade.transaction_context() return _main_context_manager # Now this function is only used for testing FK with sqlite. def enable_sqlite_foreign_key(): global _main_context_manager if not _main_context_manager: _main_context_manager = enginefacade.transaction_context() _main_context_manager.configure(sqlite_fk=True) def cleanup(): global _main_context_manager _main_context_manager = None _CONTEXT = None def _get_context(): global _CONTEXT if _CONTEXT is None: # NOTE(dims): Delay the `threading.local` import to allow for # eventlet/gevent monkeypatching to happen import threading _CONTEXT = threading.local() return _CONTEXT # Unit tests set this to True so that oslo.db's global engine is used. # This allows oslo_db.test_base.DbTestCase to override the transaction manager # with its test transaction manager. _TESTING_USE_GLOBAL_CONTEXT_MANAGER = False def session_for_read(): if _TESTING_USE_GLOBAL_CONTEXT_MANAGER: reader = enginefacade.reader else: reader = _get_main_context_manager().reader return _wrap_session(reader.using(_get_context())) def session_for_write(): if _TESTING_USE_GLOBAL_CONTEXT_MANAGER: writer = enginefacade.writer else: writer = _get_main_context_manager().writer return _wrap_session(writer.using(_get_context())) def _wrap_session(sess): if CONF.profiler.enabled and CONF.profiler.trace_sqlalchemy: sess = osprofiler.sqlalchemy.wrap_session(sql, sess) return sess def truncated(f): return driver_hints.truncated(f) class _WontMatch(Exception): """Raised to indicate that the filter won't match. This is raised to short-circuit the computation of the filter as soon as it's discovered that the filter requested isn't going to match anything. A filter isn't going to match anything if the value is too long for the field, for example. """ @classmethod def check(cls, value, col_attr): """Check if the value can match given the column attributes. Raises this class if the value provided can't match any value in the column in the table given the column's attributes. For example, if the column is a string and the value is longer than the column then it won't match any value in the column in the table. """ if value is None: return col = col_attr.property.columns[0] if isinstance(col.type, sql.types.Boolean): # The column is a Boolean, we should have already validated input. return if not col.type.length: # The column doesn't have a length so can't validate anymore. return if len(value) > col.type.length: raise cls() # Otherwise the value could match a value in the column. def _filter(model, query, hints): """Apply filtering to a query. :param model: the table model in question :param query: query to apply filters to :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: query updated with any filters satisfied """ def inexact_filter(model, query, filter_, satisfied_filters): """Apply an inexact filter to a query. :param model: the table model in question :param query: query to apply filters to :param dict filter_: describes this filter :param list satisfied_filters: filter_ will be added if it is satisfied. :returns: query updated to add any inexact filters satisfied """ column_attr = getattr(model, filter_['name']) # TODO(henry-nash): Sqlalchemy 0.7 defaults to case insensitivity # so once we find a way of changing that (maybe on a call-by-call # basis), we can add support for the case sensitive versions of # the filters below. For now, these case sensitive versions will # be handled at the controller level. if filter_['case_sensitive']: return query if filter_['comparator'] == 'contains': _WontMatch.check(filter_['value'], column_attr) query_term = column_attr.ilike('%%%s%%' % filter_['value']) elif filter_['comparator'] == 'startswith': _WontMatch.check(filter_['value'], column_attr) query_term = column_attr.ilike('%s%%' % filter_['value']) elif filter_['comparator'] == 'endswith': _WontMatch.check(filter_['value'], column_attr) query_term = column_attr.ilike('%%%s' % filter_['value']) else: # It's a filter we don't understand, so let the caller # work out if they need to do something with it. return query satisfied_filters.append(filter_) return query.filter(query_term) def exact_filter(model, query, filter_, satisfied_filters): """Apply an exact filter to a query. :param model: the table model in question :param query: query to apply filters to :param dict filter_: describes this filter :param list satisfied_filters: filter_ will be added if it is satisfied. :returns: query updated to add any exact filters satisfied """ key = filter_['name'] col = getattr(model, key) if isinstance(col.property.columns[0].type, sql.types.Boolean): filter_val = utils.attr_as_boolean(filter_['value']) else: _WontMatch.check(filter_['value'], col) filter_val = filter_['value'] satisfied_filters.append(filter_) return query.filter(col == filter_val) try: satisfied_filters = [] for filter_ in hints.filters: if filter_['name'] not in model.attributes: continue if filter_['comparator'] == 'equals': query = exact_filter(model, query, filter_, satisfied_filters) else: query = inexact_filter( model, query, filter_, satisfied_filters ) # Remove satisfied filters, then the caller will know remaining filters for filter_ in satisfied_filters: hints.filters.remove(filter_) return query except _WontMatch: hints.cannot_match = True return def _limit(query, hints): """Apply a limit to a query. :param query: query to apply filters to :param hints: contains the list of filters and limit details. :returns: query updated with any limits satisfied """ # NOTE(henry-nash): If we were to implement pagination, then we # we would expand this method to support pagination and limiting. # If we satisfied all the filters, set an upper limit if supplied if hints.limit: original_len = query.count() limit_query = query.limit(hints.limit['limit']) if limit_query.count() < original_len: hints.limit['truncated'] = True query = limit_query return query def filter_limit_query(model, query, hints): """Apply filtering and limit to a query. :param model: table model :param query: query to apply filters to :param hints: contains the list of filters and limit details. This may be None, indicating that there are no filters or limits to be applied. If it's not None, then any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: query updated with any filters and limits satisfied """ if hints is None: return query # First try and satisfy any filters query = _filter(model, query, hints) if hints.cannot_match: # Nothing's going to match, so don't bother with the query. return [] # NOTE(henry-nash): Any unsatisfied filters will have been left in # the hints list for the controller to handle. We can only try and # limit here if all the filters are already satisfied since, if not, # doing so might mess up the final results. If there are still # unsatisfied filters, we have to leave any limiting to the controller # as well. if not hints.filters: return _limit(query, hints) else: return query def handle_conflicts(conflict_type='object'): """Convert select sqlalchemy exceptions into HTTP 409 Conflict.""" _conflict_msg = 'Conflict %(conflict_type)s: %(details)s' def decorator(method): @functools.wraps(method) def wrapper(*args, **kwargs): try: return method(*args, **kwargs) except db_exception.DBDuplicateEntry as e: # LOG the exception for debug purposes, do not send the # exception details out with the raised Conflict exception # as it can contain raw SQL. LOG.debug( _conflict_msg, {'conflict_type': conflict_type, 'details': e}, ) name = None field = None domain_id = None # First element is unnecessary for extracting name and causes # object not iterable error. Remove it. params = args[1:] # We want to store the duplicate objects name in the error # message for the user. If name is not available we use the id. for arg in params: if isinstance(arg, dict): if 'name' in arg: field = 'name' name = arg['name'] elif 'id' in arg: field = 'ID' name = arg['id'] if 'domain_id' in arg: domain_id = arg['domain_id'] msg = _('Duplicate entry') if name and domain_id: msg = _( 'Duplicate entry found with %(field)s %(name)s ' 'at domain ID %(domain_id)s' ) % {'field': field, 'name': name, 'domain_id': domain_id} elif name: msg = _( 'Duplicate entry found with %(field)s %(name)s' ) % {'field': field, 'name': name} elif domain_id: msg = _('Duplicate entry at domain ID %s') % domain_id raise exception.Conflict(type=conflict_type, details=msg) except db_exception.DBError as e: # TODO(blk-u): inspecting inner_exception breaks encapsulation; # oslo_db should provide exception we need. if isinstance(e.inner_exception, IntegrityError): # LOG the exception for debug purposes, do not send the # exception details out with the raised Conflict exception # as it can contain raw SQL. LOG.debug( _conflict_msg, {'conflict_type': conflict_type, 'details': e}, ) # NOTE(morganfainberg): This is really a case where the SQL # failed to store the data. This is not something that the # user has done wrong. Example would be a ForeignKey is # missing; the code that is executed before reaching the # SQL writing to the DB should catch the issue. raise exception.UnexpectedError( _( 'An unexpected error occurred when trying to ' 'store %s' ) % conflict_type ) raise return wrapper return decorator ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/sql/migrations/0000775000175000017500000000000000000000000021567 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/README.rst0000664000175000017500000000103200000000000023252 0ustar00zuulzuul00000000000000Migrations for the database =========================== This directory contains migrations for the database. These are implemented using `alembic`__, a lightweight database migration tool designed for usage with `SQLAlchemy`__. The best place to start understanding Alembic is with its own `tutorial`__. You can also play around with the :command:`alembic` command:: $ alembic --help .. __: https://alembic.sqlalchemy.org/en/latest/ .. __: https://www.sqlalchemy.org/ .. __: https://alembic.sqlalchemy.org/en/latest/tutorial.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/__init__.py0000664000175000017500000000000000000000000023666 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/autogen.py0000664000175000017500000001104700000000000023606 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from alembic.operations import ops from alembic.util import Dispatcher from alembic.util import rev_id as new_rev_id from keystone.common.sql import upgrades from keystone.i18n import _ _ec_dispatcher = Dispatcher() def process_revision_directives(context, revision, directives): directives[:] = list(_assign_directives(context, directives)) def _assign_directives(context, directives, phase=None): for directive in directives: decider = _ec_dispatcher.dispatch(directive) if phase is None: phases = upgrades.MIGRATION_BRANCHES else: phases = (phase,) for phase in phases: decided = decider(context, directive, phase) if decided: yield decided @_ec_dispatcher.dispatch_for(ops.MigrationScript) def _migration_script_ops(context, directive, phase): """Generate a new ops.MigrationScript() for a given phase. E.g. given an ops.MigrationScript() directive from a vanilla autogenerate and an expand/contract phase name, produce a new ops.MigrationScript() which contains only those sub-directives appropriate to "expand" or "contract". Also ensure that the branch directory exists and that the correct branch labels/depends_on/head revision are set up. """ autogen_kwargs = {} version_path = upgrades.get_version_branch_path( release=upgrades.CURRENT_RELEASE, branch=phase, ) upgrades.check_bootstrap_new_branch(phase, version_path, autogen_kwargs) op = ops.MigrationScript( new_rev_id(), ops.UpgradeOps( ops=list( _assign_directives(context, directive.upgrade_ops.ops, phase) ) ), ops.DowngradeOps(ops=[]), message=directive.message, **autogen_kwargs ) if not op.upgrade_ops.is_empty(): return op @_ec_dispatcher.dispatch_for(ops.AddConstraintOp) @_ec_dispatcher.dispatch_for(ops.CreateIndexOp) @_ec_dispatcher.dispatch_for(ops.CreateTableOp) @_ec_dispatcher.dispatch_for(ops.AddColumnOp) def _expands(context, directive, phase): if phase == 'expand': return directive else: return None @_ec_dispatcher.dispatch_for(ops.DropConstraintOp) @_ec_dispatcher.dispatch_for(ops.DropIndexOp) @_ec_dispatcher.dispatch_for(ops.DropTableOp) @_ec_dispatcher.dispatch_for(ops.DropColumnOp) def _contracts(context, directive, phase): if phase == 'contract': return directive else: return None @_ec_dispatcher.dispatch_for(ops.AlterColumnOp) def _alter_column(context, directive, phase): is_expand = phase == 'expand' if is_expand and directive.modify_nullable is True: return directive elif not is_expand and directive.modify_nullable is False: return directive else: # TODO(stephenfin): This logic is taken from neutron but I don't think # it's correct. As-is, this prevents us from auto-generating migrations # that change the nullable value of a field since the modify_nullable # value will be either True or False and we run through both expand and # contract phases so it'll fail one of the above checks. However, # setting nullable=True is clearly an expand operation (it makes the # database more permissive) and the opposite is also true. As such, # shouldn't we simply emit the directive if we're in the relevant phase # and skip otherwise? This is only left because zzzeek wrote that # neutron code and I'm sure he had good reason for this. msg = _( "Don't know if operation is an expand or contract at the moment: " "%s" ) raise NotImplementedError(msg % directive) @_ec_dispatcher.dispatch_for(ops.ModifyTableOps) def _modify_table_ops(context, directive, phase): op = ops.ModifyTableOps( directive.table_name, ops=list(_assign_directives(context, directive.ops, phase)), schema=directive.schema, ) if not op.is_empty(): return op ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/env.py0000664000175000017500000001464000000000000022736 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging.config import fileConfig from alembic import context from sqlalchemy import engine_from_config from sqlalchemy import pool from keystone.common.sql import core from keystone.common.sql.migrations import autogen # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # interpret the config file for Python logging unless we're told not to; # this line sets up loggers basically. if config.attributes.get('configure_logger', True): fileConfig(config.config_file_name) # keystone model MetaData object target_metadata = core.ModelBase.metadata def include_object(object, name, type_, reflected, compare_to): BORKED_COLUMNS = () BORKED_UNIQUE_CONSTRAINTS = () BORKED_FK_CONSTRAINTS = ( # removed fks ('application_credential_access_rule', ['access_rule_id']), ('limit', ['registered_limit_id']), ('registered_limit', ['service_id']), ('registered_limit', ['region_id']), ('endpoint', ['region_id']), # added fks ('application_credential_access_rule', ['access_rule_id']), ('endpoint', ['region_id']), ('assignment', ['role_id']), ) BORKED_INDEXES = ( # removed indexes ('access_rule', ['external_id']), ('access_rule', ['user_id']), ('revocation_event', ['revoked_at']), ('system_assignment', ['actor_id']), ('user', ['default_project_id']), # added indexes ('access_rule', ['external_id']), ('access_rule', ['user_id']), ('access_token', ['consumer_id']), ('endpoint', ['service_id']), ('revocation_event', ['revoked_at']), ('user', ['default_project_id']), ('user_group_membership', ['group_id']), ( 'trust', [ 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', 'expires_at_int', ], ), ) # NOTE(stephenfin): By skipping these items, we skip *all* changes to the # affected item. However, we only want to skip the actual things we know # about untl we have enough time to fix them. These issues are listed in # keystone.tests.unit.common.sql.test_upgrades.KeystoneModelsMigrationsSync # However, this isn't an issue since the test is more specific and will # catch other issues and anyone making changes to the columns and hoping to # autogenerate them would need to fix the latent issue first anyway. if type_ == 'column': return (object.table.name, name) not in BORKED_COLUMNS if type_ == 'unique_constraint': columns = [c.name for c in object.columns] return (object.table.name, columns) not in BORKED_UNIQUE_CONSTRAINTS if type_ == 'foreign_key_constraint': columns = [c.name for c in object.columns] return (object.table.name, columns) not in BORKED_FK_CONSTRAINTS if type_ == 'index': columns = [c.name for c in object.columns] return (object.table.name, columns) not in BORKED_INDEXES return True def include_name(name, type_, parent_names): """Determine which tables or columns to skip. This is used where we have migrations that are out-of-sync with the models. """ REMOVED_TABLES = ('token',) if type_ == 'table': return name not in REMOVED_TABLES return True def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, include_object=include_object, literal_binds=True, dialect_opts={"paramstyle": "named"}, ) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. This is modified from the default based on the below, since we want to share an engine when unit testing so in-memory database testing actually works. https://alembic.sqlalchemy.org/en/latest/cookbook.html#connection-sharing """ connectable = config.attributes.get('connection', None) if connectable is None: # only create Engine if we don't have a Connection from the outside connectable = engine_from_config( config.get_section(config.config_ini_section), prefix="sqlalchemy.", poolclass=pool.NullPool, ) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, include_object=include_object, process_revision_directives=autogen.process_revision_directives, # noqa: E501 ) with context.begin_transaction(): context.run_migrations() else: context.configure( connection=connectable, target_metadata=target_metadata, render_as_batch=True, include_name=include_name, include_object=include_object, process_revision_directives=autogen.process_revision_directives, ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/manage.py0000664000175000017500000002150200000000000023371 0ustar00zuulzuul00000000000000#!/usr/bin/env python3 # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import sys from alembic import command as alembic_command from alembic import script as alembic_script from alembic import util as alembic_util from oslo_config import cfg from oslo_log import log import pbr.version from keystone.common import sql from keystone.common.sql import upgrades import keystone.conf from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) def import_sql_modules(): # We need to import all of these so the tables are registered. It would be # easier if these were all in a central location :( import keystone.application_credential.backends.sql # noqa: F401 import keystone.assignment.backends.sql # noqa: F401 import keystone.assignment.role_backends.sql_model # noqa: F401 import keystone.catalog.backends.sql # noqa: F401 import keystone.credential.backends.sql # noqa: F401 import keystone.endpoint_policy.backends.sql # noqa: F401 import keystone.federation.backends.sql # noqa: F401 import keystone.identity.backends.sql_model # noqa: F401 import keystone.identity.mapping_backends.sql # noqa: F401 import keystone.limit.backends.sql # noqa: F401 import keystone.oauth1.backends.sql # noqa: F401 import keystone.policy.backends.sql # noqa: F401 import keystone.resource.backends.sql_model # noqa: F401 import keystone.resource.config_backends.sql # noqa: F401 import keystone.revoke.backends.sql # noqa: F401 import keystone.trust.backends.sql # noqa: F401 def do_alembic_command(config, cmd, revision=None, **kwargs): args = [] if revision: args.append(revision) try: getattr(alembic_command, cmd)(config, *args, **kwargs) except alembic_util.CommandError as e: alembic_util.err(str(e)) def do_generic_show(config, cmd): kwargs = {'verbose': CONF.command.verbose} do_alembic_command(config, cmd, **kwargs) def do_validate(config, cmd): do_alembic_command(config, 'branches') # TODO(stephenfin): Implement these # validate_revisions(config) # TODO(stephenfin): Implement these # validate_head_files(config) def _find_milestone_revisions(config, milestone, branch=None): """Return the revision(s) for a given milestone.""" script = alembic_script.ScriptDirectory.from_config(config) return [ (m.revision, label) for m in _get_revisions(script) for label in (m.branch_labels or [None]) if milestone in getattr(m.module, 'keystone_milestone', []) and (branch is None or branch in m.branch_labels) ] def _get_revisions(script): return list(script.walk_revisions(base='base', head='heads')) def do_upgrade(config, cmd): branch = None if (CONF.command.revision or CONF.command.delta) and ( CONF.command.expand or CONF.command.contract ): msg = _('Phase upgrade options do not accept revision specification') raise SystemExit(msg) if CONF.command.expand: branch = upgrades.EXPAND_BRANCH revision = f'{upgrades.EXPAND_BRANCH}@head' elif CONF.command.contract: branch = upgrades.CONTRACT_BRANCH revision = f'{upgrades.CONTRACT_BRANCH}@head' elif not CONF.command.revision and not CONF.command.delta: msg = _('You must provide a revision or relative delta') raise SystemExit(msg) else: revision = CONF.command.revision or '' if '-' in revision: msg = _('Negative relative revision (downgrade) not supported') raise SystemExit(msg) delta = CONF.command.delta if delta: if '+' in revision: msg = _('Use either --delta or relative revision, not both') raise SystemExit(msg) if delta < 0: msg = _('Negative delta (downgrade) not supported') raise SystemExit(msg) revision = '%s+%d' % (revision, delta) # leave branchless 'head' revision request backward compatible by # applying all heads in all available branches. if revision == 'head': revision = 'heads' if revision in upgrades.MILESTONES: expand_revisions = _find_milestone_revisions( config, revision, upgrades.EXPAND_BRANCH, ) contract_revisions = _find_milestone_revisions( config, revision, upgrades.CONTRACT_BRANCH, ) # Expand revisions must be run before contract revisions revisions = expand_revisions + contract_revisions else: revisions = [(revision, branch)] for revision, branch in revisions: # if not CONF.command.sql: # run_sanity_checks(config, revision) do_alembic_command( config, cmd, revision=revision, sql=CONF.command.sql, ) def do_revision(config, cmd): kwargs = { 'message': CONF.command.message, 'autogenerate': CONF.command.autogenerate, 'sql': CONF.command.sql, } branches = [] if CONF.command.expand: kwargs['head'] = 'expand@head' branches.append(upgrades.EXPAND_BRANCH) elif CONF.command.contract: kwargs['head'] = 'contract@head' branches.append(upgrades.CONTRACT_BRANCH) else: branches = upgrades.MIGRATION_BRANCHES if not CONF.command.autogenerate: for branch in branches: args = copy.copy(kwargs) version_path = upgrades.get_version_branch_path( release=upgrades.CURRENT_RELEASE, branch=branch, ) upgrades.check_bootstrap_new_branch(branch, version_path, args) do_alembic_command(config, cmd, **args) else: # CONF.command.autogenerate # autogeneration code will take care of enforcing proper directories do_alembic_command(config, cmd, **kwargs) # TODO(stephenfin): Implement these # update_head_files(config) def add_branch_options(parser): group = parser.add_mutually_exclusive_group() group.add_argument('--expand', action='store_true') group.add_argument('--contract', action='store_true') return group def add_alembic_subparser(sub, cmd): return sub.add_parser(cmd, help=getattr(alembic_command, cmd).__doc__) def add_command_parsers(subparsers): for name in ['current', 'history', 'branches', 'heads']: parser = add_alembic_subparser(subparsers, name) parser.set_defaults(func=do_generic_show) parser.add_argument( '--verbose', action='store_true', help='Display more verbose output for the specified command', ) parser = add_alembic_subparser(subparsers, 'upgrade') parser.add_argument('--delta', type=int) parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') add_branch_options(parser) parser.set_defaults(func=do_upgrade) parser = subparsers.add_parser( 'validate', help=alembic_command.branches.__doc__ + ' and validate head file', ) parser.set_defaults(func=do_validate) parser = add_alembic_subparser(subparsers, 'revision') parser.add_argument('-m', '--message') parser.add_argument('--sql', action='store_true') group = add_branch_options(parser) group.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=do_revision) command_opt = cfg.SubCommandOpt( 'command', title='Command', help=_('Available commands'), handler=add_command_parsers, ) def main(argv): CONF.register_cli_opt(command_opt) keystone.conf.configure() sql.initialize() keystone.conf.set_default_for_default_log_levels() user_supplied_config_file = False if argv: for argument in argv: if argument == '--config-file': user_supplied_config_file = True CONF( project='keystone', version=pbr.version.VersionInfo('keystone').version_string(), ) if not CONF.default_config_files and not user_supplied_config_file: LOG.warning('Config file not found, using default configs.') import_sql_modules() config = upgrades.get_alembic_config() return bool(CONF.command.func(config, CONF.command.name)) if __name__ == '__main__': main(sys.argv) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/script.py.mako0000664000175000017500000000172000000000000024373 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/sql/migrations/versions/0000775000175000017500000000000000000000000023437 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/common/sql/migrations/versions/2024.01/0000775000175000017500000000000000000000000024245 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/sql/migrations/versions/2024.01/expand/0000775000175000017500000000000000000000000025524 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000025300000000000011455 xustar0000000000000000149 path=keystone-26.0.0/keystone/common/sql/migrations/versions/2024.01/expand/47147121_add_identity_federation_attribute_mapping_schema_version.py 22 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/2024.01/expand/47147121_add_identity_federat0000664000175000017500000000206000000000000032572 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Add Identity Federation attribute mapping schema version. Revision ID: 47147121 Revises: 11c3b243b4cb Create Date: 2023-12-12 09:00:00 """ from alembic import op from sqlalchemy import Column from sqlalchemy import String # revision identifiers, used by Alembic. revision = '47147121' down_revision = '11c3b243b4cb' branch_labels = None depends_on = None def upgrade(): op.add_column( "mapping", Column( 'schema_version', String(5), nullable=False, server_default="1.0" ), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py0000664000175000017500000011106200000000000031110 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial version. Revision ID: 27e647c0fad4 Revises: Create Date: 2021-12-23 11:13:26.305412 """ import textwrap from alembic import op from oslo_log import log from oslo_utils import timeutils import sqlalchemy as sql from keystone.assignment.backends import sql as assignment_sql from keystone.common import sql as ks_sql import keystone.conf from keystone.identity.mapping_backends import mapping as mapping_backend # revision identifiers, used by Alembic. revision = '27e647c0fad4' down_revision = None depends_on = None CONF = keystone.conf.CONF LOG = log.getLogger(__name__) NULL_DOMAIN_ID = '<>' def upgrade(): bind = op.get_bind() if bind.engine.name == 'mysql': # Set default DB charset to UTF8. op.execute( 'ALTER DATABASE %s DEFAULT CHARACTER SET utf8' % bind.engine.url.database ) op.create_table( 'application_credential', sql.Column( 'internal_id', sql.Integer, primary_key=True, nullable=False ), sql.Column('id', sql.String(length=64), nullable=False), sql.Column('name', sql.String(length=255), nullable=False), sql.Column('secret_hash', sql.String(length=255), nullable=False), sql.Column('description', sql.Text), sql.Column('user_id', sql.String(length=64), nullable=False), sql.Column('project_id', sql.String(64), nullable=True), sql.Column('expires_at', ks_sql.DateTimeInt()), sql.Column('system', sql.String(64), nullable=True), sql.Column('unrestricted', sql.Boolean), sql.UniqueConstraint( 'user_id', 'name', name='duplicate_app_cred_constraint' ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'assignment', sql.Column( 'type', sql.Enum( assignment_sql.AssignmentType.USER_PROJECT, assignment_sql.AssignmentType.GROUP_PROJECT, assignment_sql.AssignmentType.USER_DOMAIN, assignment_sql.AssignmentType.GROUP_DOMAIN, name='type', ), nullable=False, ), sql.Column('actor_id', sql.String(64), nullable=False), sql.Column('target_id', sql.String(64), nullable=False), sql.Column('role_id', sql.String(64), nullable=False), sql.Column('inherited', sql.Boolean, default=False, nullable=False), sql.PrimaryKeyConstraint( 'type', 'actor_id', 'target_id', 'role_id', 'inherited', ), sql.Index('ix_actor_id', 'actor_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'access_rule', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column('service', sql.String(64)), sql.Column('path', sql.String(128)), sql.Column('method', sql.String(16)), sql.Column('external_id', sql.String(64)), sql.Column('user_id', sql.String(64)), sql.UniqueConstraint( 'external_id', name='access_rule_external_id_key', ), sql.UniqueConstraint( 'user_id', 'service', 'path', 'method', name='duplicate_access_rule_for_user_constraint', ), sql.Index('user_id', 'user_id'), sql.Index('external_id', 'external_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'config_register', sql.Column('type', sql.String(64), primary_key=True), sql.Column('domain_id', sql.String(64), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'consumer', sql.Column('id', sql.String(64), primary_key=True, nullable=False), sql.Column('description', sql.String(64), nullable=True), sql.Column('secret', sql.String(64), nullable=False), sql.Column('extra', sql.Text(), nullable=False), ) op.create_table( 'credential', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('user_id', sql.String(length=64), nullable=False), sql.Column('project_id', sql.String(length=64)), sql.Column('type', sql.String(length=255), nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('key_hash', sql.String(64), nullable=False), sql.Column( 'encrypted_blob', ks_sql.Text, nullable=False, ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'group', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('domain_id', sql.String(length=64), nullable=False), sql.Column('name', sql.String(length=64), nullable=False), sql.Column('description', sql.Text), sql.Column('extra', ks_sql.JsonBlob.impl), sql.UniqueConstraint( 'domain_id', 'name', name='ixu_group_name_domain_id', ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'id_mapping', sql.Column('public_id', sql.String(64), primary_key=True), sql.Column('domain_id', sql.String(64), nullable=False), sql.Column('local_id', sql.String(255), nullable=False), sql.Column( 'entity_type', sql.Enum( mapping_backend.EntityType.USER, mapping_backend.EntityType.GROUP, name='entity_type', ), nullable=False, ), sql.UniqueConstraint( 'domain_id', 'local_id', 'entity_type', name='domain_id', ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'identity_provider', sql.Column('id', sql.String(64), primary_key=True), sql.Column('enabled', sql.Boolean, nullable=False), sql.Column('description', sql.Text(), nullable=True), sql.Column('domain_id', sql.String(64), nullable=False), sql.Column('authorization_ttl', sql.Integer, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'idp_remote_ids', sql.Column( 'idp_id', sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), ), sql.Column('remote_id', sql.String(255), primary_key=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'mapping', sql.Column('id', sql.String(64), primary_key=True), sql.Column('rules', sql.Text(), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'policy', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('type', sql.String(length=255), nullable=False), sql.Column('blob', ks_sql.JsonBlob, nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'policy_association', sql.Column('id', sql.String(64), primary_key=True), sql.Column('policy_id', sql.String(64), nullable=False), sql.Column('endpoint_id', sql.String(64), nullable=True), sql.Column('service_id', sql.String(64), nullable=True), sql.Column('region_id', sql.String(64), nullable=True), sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'project', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('name', sql.String(length=64), nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('description', sql.Text), sql.Column('enabled', sql.Boolean), sql.Column( 'domain_id', sql.String(length=64), sql.ForeignKey( 'project.id', name='project_domain_id_fkey', ), nullable=False, ), sql.Column( 'parent_id', sql.String(64), sql.ForeignKey( 'project.id', name='project_parent_id_fkey', ), nullable=True, ), sql.Column( 'is_domain', sql.Boolean, nullable=False, server_default='0', default=False, ), sql.UniqueConstraint( 'domain_id', 'name', name='ixu_project_name_domain_id', ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'project_endpoint', sql.Column( 'endpoint_id', sql.String(64), primary_key=True, nullable=False ), sql.Column( 'project_id', sql.String(64), primary_key=True, nullable=False ), ) op.create_table( 'project_option', sql.Column( 'project_id', sql.String(64), sql.ForeignKey('project.id', ondelete='CASCADE'), nullable=False, primary_key=True, ), sql.Column( 'option_id', sql.String(4), nullable=False, primary_key=True ), sql.Column('option_value', ks_sql.JsonBlob, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) # NOTE(lamt) To allow tag name to be case sensitive for MySQL, the 'name' # column needs to use collation, which is incompatible with Postgresql. # Using unicode to mirror nova's server tag: # https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/models.py op.create_table( 'project_tag', sql.Column( 'project_id', sql.String(64), sql.ForeignKey('project.id', ondelete='CASCADE'), nullable=False, primary_key=True, ), sql.Column('name', sql.Unicode(255), nullable=False, primary_key=True), sql.UniqueConstraint('project_id', 'name'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'region', sql.Column('id', sql.String(255), primary_key=True), sql.Column('description', sql.String(255), nullable=False), sql.Column('parent_region_id', sql.String(255), nullable=True), sql.Column('extra', sql.Text()), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'registered_limit', sql.Column('id', sql.String(length=64), nullable=False), sql.Column('service_id', sql.String(255)), sql.Column('region_id', sql.String(64), nullable=True), sql.Column('resource_name', sql.String(255)), sql.Column('default_limit', sql.Integer, nullable=False), sql.Column('description', sql.Text), sql.Column('internal_id', sql.Integer, primary_key=True), # NOTE(stephenfin): Name chosen to preserve backwards compatibility # with names used for primary key unique constraints sql.UniqueConstraint('id', name='registered_limit_id_key'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'request_token', sql.Column('id', sql.String(64), primary_key=True, nullable=False), sql.Column('request_secret', sql.String(64), nullable=False), sql.Column('verifier', sql.String(64), nullable=True), sql.Column('authorizing_user_id', sql.String(64), nullable=True), sql.Column('requested_project_id', sql.String(64), nullable=False), sql.Column('role_ids', sql.Text(), nullable=True), sql.Column( 'consumer_id', sql.String(64), sql.ForeignKey('consumer.id'), nullable=False, index=True, ), sql.Column('expires_at', sql.String(64), nullable=True), ) op.create_table( 'revocation_event', sql.Column('id', sql.Integer, primary_key=True), sql.Column('domain_id', sql.String(64)), sql.Column('project_id', sql.String(64)), sql.Column('user_id', sql.String(64)), sql.Column('role_id', sql.String(64)), sql.Column('trust_id', sql.String(64)), sql.Column('consumer_id', sql.String(64)), sql.Column('access_token_id', sql.String(64)), sql.Column('issued_before', sql.DateTime(), nullable=False), sql.Column('expires_at', sql.DateTime()), sql.Column('revoked_at', sql.DateTime(), nullable=False), sql.Column('audit_id', sql.String(32), nullable=True), sql.Column('audit_chain_id', sql.String(32), nullable=True), # NOTE(stephenfin): The '_new' suffix here is due to migration 095, # which changed the 'id' column from String(64) to Integer. It did this # by creating a 'revocation_event_new' table and populating it with # data from the 'revocation_event' table before deleting the # 'revocation_event' table and renaming the 'revocation_event_new' # table to 'revocation_event'. Because the 'revoked_at' column had # 'index=True', sqlalchemy automatically generated the index name as # 'ix_{table}_{column}'. However, when intitially created, '{table}' # was 'revocation_event_new' so the index got that name. We may wish to # rename this eventually. sql.Index('ix_revocation_event_new_revoked_at', 'revoked_at'), sql.Index('ix_revocation_event_issued_before', 'issued_before'), sql.Index( 'ix_revocation_event_project_id_issued_before', 'project_id', 'issued_before', ), sql.Index( 'ix_revocation_event_user_id_issued_before', 'user_id', 'issued_before', ), sql.Index( 'ix_revocation_event_audit_id_issued_before', 'audit_id', 'issued_before', ), ) op.create_table( 'role', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('name', sql.String(length=255), nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column( 'domain_id', sql.String(64), nullable=False, server_default='<>', ), sql.Column('description', sql.String(255), nullable=True), sql.UniqueConstraint( 'name', 'domain_id', name='ixu_role_name_domain_id', ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'role_option', sql.Column( 'role_id', sql.String(64), sql.ForeignKey('role.id', ondelete='CASCADE'), nullable=False, primary_key=True, ), sql.Column( 'option_id', sql.String(4), nullable=False, primary_key=True ), sql.Column('option_value', ks_sql.JsonBlob, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'sensitive_config', sql.Column('domain_id', sql.String(64), primary_key=True), sql.Column('group', sql.String(255), primary_key=True), sql.Column('option', sql.String(255), primary_key=True), sql.Column('value', ks_sql.JsonBlob.impl, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'service', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('type', sql.String(length=255)), sql.Column( 'enabled', sql.Boolean, nullable=False, default=True, server_default='1', ), sql.Column('extra', ks_sql.JsonBlob.impl), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'service_provider', sql.Column('auth_url', sql.String(256), nullable=False), sql.Column('id', sql.String(64), primary_key=True), sql.Column('enabled', sql.Boolean, nullable=False), sql.Column('description', sql.Text(), nullable=True), sql.Column('sp_url', sql.String(256), nullable=False), sql.Column( 'relay_state_prefix', sql.String(256), nullable=False, server_default=CONF.saml.relay_state_prefix, ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'system_assignment', sql.Column('type', sql.String(64), nullable=False), sql.Column('actor_id', sql.String(64), nullable=False), sql.Column('target_id', sql.String(64), nullable=False), sql.Column('role_id', sql.String(64), nullable=False), sql.Column('inherited', sql.Boolean, default=False, nullable=False), sql.PrimaryKeyConstraint( 'type', 'actor_id', 'target_id', 'role_id', 'inherited' ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'token', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('expires', sql.DateTime, default=None), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('valid', sql.Boolean, default=True, nullable=False), sql.Column('trust_id', sql.String(length=64)), sql.Column('user_id', sql.String(length=64)), sql.Index('ix_token_expires', 'expires'), sql.Index('ix_token_expires_valid', 'expires', 'valid'), sql.Index('ix_token_user_id', 'user_id'), sql.Index('ix_token_trust_id', 'trust_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'trust', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('trustor_user_id', sql.String(length=64), nullable=False), sql.Column('trustee_user_id', sql.String(length=64), nullable=False), sql.Column('project_id', sql.String(length=64)), sql.Column('impersonation', sql.Boolean, nullable=False), sql.Column('deleted_at', sql.DateTime), sql.Column('expires_at', sql.DateTime), sql.Column('remaining_uses', sql.Integer, nullable=True), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('expires_at_int', ks_sql.DateTimeInt()), sql.UniqueConstraint( 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', 'expires_at_int', name='duplicate_trust_constraint_expanded', ), sql.Column( 'redelegated_trust_id', sql.String(64), nullable=True, ), sql.Column( 'redelegation_count', sql.Integer, nullable=True, ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'trust_role', sql.Column( 'trust_id', sql.String(length=64), primary_key=True, nullable=False ), sql.Column( 'role_id', sql.String(length=64), primary_key=True, nullable=False ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'user', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column('enabled', sql.Boolean), sql.Column('default_project_id', sql.String(length=64)), sql.Column('created_at', sql.DateTime(), nullable=True), sql.Column('last_active_at', sql.Date(), nullable=True), sql.Column('domain_id', sql.String(64), nullable=False), sql.UniqueConstraint('id', 'domain_id', name='ixu_user_id_domain_id'), sql.Index('ix_default_project_id', 'default_project_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'user_group_membership', sql.Column( 'user_id', sql.String(length=64), sql.ForeignKey( 'user.id', name='fk_user_group_membership_user_id', ), primary_key=True, ), sql.Column( 'group_id', sql.String(length=64), sql.ForeignKey( 'group.id', name='fk_user_group_membership_group_id', ), primary_key=True, ), # NOTE(stevemar): The index was named 'group_id' in # 050_fk_consistent_indexes.py and needs to be preserved sql.Index('group_id', 'group_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'user_option', sql.Column( 'user_id', sql.String(64), sql.ForeignKey('user.id', ondelete='CASCADE'), nullable=False, primary_key=True, ), sql.Column( 'option_id', sql.String(4), nullable=False, primary_key=True ), sql.Column('option_value', ks_sql.JsonBlob, nullable=True), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'whitelisted_config', sql.Column('domain_id', sql.String(64), primary_key=True), sql.Column('group', sql.String(255), primary_key=True), sql.Column('option', sql.String(255), primary_key=True), sql.Column('value', ks_sql.JsonBlob.impl, nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'access_token', sql.Column('id', sql.String(64), primary_key=True, nullable=False), sql.Column('access_secret', sql.String(64), nullable=False), sql.Column( 'authorizing_user_id', sql.String(64), nullable=False, index=True ), sql.Column('project_id', sql.String(64), nullable=False), sql.Column('role_ids', sql.Text(), nullable=False), sql.Column( 'consumer_id', sql.String(64), sql.ForeignKey('consumer.id'), nullable=False, index=True, ), sql.Column('expires_at', sql.String(64), nullable=True), ) op.create_table( 'application_credential_role', sql.Column( 'application_credential_id', sql.Integer, sql.ForeignKey( 'application_credential.internal_id', ondelete='CASCADE' ), primary_key=True, nullable=False, ), sql.Column( 'role_id', sql.String(length=64), primary_key=True, nullable=False ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'application_credential_access_rule', sql.Column( 'application_credential_id', sql.Integer, sql.ForeignKey( 'application_credential.internal_id', ondelete='CASCADE' ), primary_key=True, nullable=False, ), sql.Column( 'access_rule_id', sql.Integer, sql.ForeignKey('access_rule.id', ondelete='CASCADE'), primary_key=True, nullable=False, ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'endpoint', sql.Column('id', sql.String(length=64), primary_key=True), sql.Column('legacy_endpoint_id', sql.String(length=64)), sql.Column('interface', sql.String(length=8), nullable=False), sql.Column( 'service_id', sql.String(length=64), sql.ForeignKey( 'service.id', name='endpoint_service_id_fkey', ), nullable=False, ), sql.Column('url', sql.Text, nullable=False), sql.Column('extra', ks_sql.JsonBlob.impl), sql.Column( 'enabled', sql.Boolean, nullable=False, default=True, server_default='1', ), sql.Column( 'region_id', sql.String(length=255), sql.ForeignKey( 'region.id', name='fk_endpoint_region_id', ), nullable=True, ), # NOTE(stevemar): The index was named 'service_id' in # 050_fk_consistent_indexes.py and needs to be preserved sql.Index('service_id', 'service_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'endpoint_group', sql.Column('id', sql.String(64), primary_key=True), sql.Column('name', sql.String(255), nullable=False), sql.Column('description', sql.Text, nullable=True), sql.Column('filters', sql.Text(), nullable=False), ) op.create_table( 'expiring_user_group_membership', sql.Column( 'user_id', sql.String(64), sql.ForeignKey('user.id'), primary_key=True, ), sql.Column( 'group_id', sql.String(64), sql.ForeignKey('group.id'), primary_key=True, ), sql.Column( 'idp_id', sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), primary_key=True, ), sql.Column('last_verified', sql.DateTime(), nullable=False), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'federation_protocol', sql.Column('id', sql.String(64), primary_key=True), sql.Column( 'idp_id', sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), primary_key=True, ), sql.Column('mapping_id', sql.String(64), nullable=False), sql.Column('remote_id_attribute', sql.String(64)), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'implied_role', sql.Column( 'prior_role_id', sql.String(length=64), sql.ForeignKey( 'role.id', name='implied_role_prior_role_id_fkey', ondelete='CASCADE', ), primary_key=True, ), sql.Column( 'implied_role_id', sql.String(length=64), sql.ForeignKey( 'role.id', name='implied_role_implied_role_id_fkey', ondelete='CASCADE', ), primary_key=True, ), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'limit', sql.Column('id', sql.String(length=64), nullable=False), sql.Column('project_id', sql.String(64), nullable=True), sql.Column('resource_limit', sql.Integer, nullable=False), sql.Column('description', sql.Text), sql.Column('internal_id', sql.Integer, primary_key=True), # FIXME(stephenfin): This should have a foreign key constraint on # registered_limit.id, but sqlalchemy-migrate clearly didn't handle # creating a column with embedded FK info as was attempted in 048 sql.Column( 'registered_limit_id', sql.String(64), ), sql.Column('domain_id', sql.String(64), nullable=True), # NOTE(stephenfin): Name chosen to preserve backwards compatibility # with names used for primary key unique constraints sql.UniqueConstraint('id', name='limit_id_key'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'local_user', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column( 'user_id', sql.String(64), nullable=False, unique=True, ), sql.Column('domain_id', sql.String(64), nullable=False), sql.Column('name', sql.String(255), nullable=False), sql.Column('failed_auth_count', sql.Integer, nullable=True), sql.Column('failed_auth_at', sql.DateTime(), nullable=True), sql.ForeignKeyConstraint( ['user_id', 'domain_id'], ['user.id', 'user.domain_id'], name='local_user_user_id_fkey', onupdate='CASCADE', ondelete='CASCADE', ), sql.UniqueConstraint('domain_id', 'name'), ) op.create_table( 'nonlocal_user', sql.Column('domain_id', sql.String(64), primary_key=True), sql.Column('name', sql.String(255), primary_key=True), sql.Column( 'user_id', sql.String(64), nullable=False, ), sql.ForeignKeyConstraint( ['user_id', 'domain_id'], ['user.id', 'user.domain_id'], name='nonlocal_user_user_id_fkey', onupdate='CASCADE', ondelete='CASCADE', ), sql.UniqueConstraint('user_id', name='ixu_nonlocal_user_user_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) op.create_table( 'password', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column( 'local_user_id', sql.Integer, sql.ForeignKey('local_user.id', ondelete='CASCADE'), nullable=False, ), sql.Column('expires_at', sql.DateTime(), nullable=True), sql.Column( 'self_service', sql.Boolean, nullable=False, server_default='0', default=False, ), # NOTE(notmorgan): To support the full range of scrypt and pbkfd # password hash lengths, this should be closer to varchar(1500) instead # of varchar(255). sql.Column('password_hash', sql.String(255), nullable=True), sql.Column( 'created_at_int', ks_sql.DateTimeInt(), nullable=False, default=0, server_default='0', ), sql.Column('expires_at_int', ks_sql.DateTimeInt(), nullable=True), sql.Column( 'created_at', sql.DateTime(), nullable=False, default=timeutils.utcnow, ), ) op.create_table( 'project_endpoint_group', sql.Column( 'endpoint_group_id', sql.String(64), sql.ForeignKey('endpoint_group.id'), nullable=False, ), sql.Column('project_id', sql.String(64), nullable=False), sql.PrimaryKeyConstraint('endpoint_group_id', 'project_id'), ) op.create_table( 'federated_user', sql.Column('id', sql.Integer, primary_key=True, nullable=False), sql.Column( 'user_id', sql.String(64), sql.ForeignKey('user.id', ondelete='CASCADE'), nullable=False, ), sql.Column( 'idp_id', sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), nullable=False, ), sql.Column('protocol_id', sql.String(64), nullable=False), sql.Column('unique_id', sql.String(255), nullable=False), sql.Column('display_name', sql.String(255), nullable=True), sql.ForeignKeyConstraint( ['protocol_id', 'idp_id'], ['federation_protocol.id', 'federation_protocol.idp_id'], name='federated_user_protocol_id_fkey', ondelete='CASCADE', ), sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'), mysql_engine='InnoDB', mysql_charset='utf8', ) if bind.engine.name == 'sqlite': # NOTE(stevemar): We need to keep this FK constraint due to 073, but # only for sqlite, once we collapse 073 we can remove this constraint with op.batch_alter_table('assignment') as batch_op: batch_op.create_foreign_key( 'fk_assignment_role_id', 'role', ['role_id'], ['id'], ) # TODO(stephenfin): Remove these procedures in a future contract migration if bind.engine.name == 'postgresql': error_message = ( 'Credential migration in progress. Cannot perform ' 'writes to credential table.' ) credential_update_trigger = textwrap.dedent( f""" CREATE OR REPLACE FUNCTION keystone_read_only_update() RETURNS trigger AS $BODY$ BEGIN IF NEW.encrypted_blob IS NULL THEN RAISE EXCEPTION '{error_message}'; END IF; IF NEW.encrypted_blob IS NOT NULL AND OLD.blob IS NULL THEN RAISE EXCEPTION '{error_message}'; END IF; RETURN NEW; END $BODY$ LANGUAGE plpgsql; """ ) op.execute(credential_update_trigger) error_message = ( 'Identity provider migration in progress. Cannot ' 'insert new rows into the identity_provider table at ' 'this time.' ) identity_provider_insert_trigger = textwrap.dedent( f""" CREATE OR REPLACE FUNCTION keystone_read_only_insert() RETURNS trigger AS $BODY$ BEGIN RAISE EXCEPTION '{error_message}'; END $BODY$ LANGUAGE plpgsql; """ ) op.execute(identity_provider_insert_trigger) federated_user_insert_trigger = textwrap.dedent( """ CREATE OR REPLACE FUNCTION update_federated_user_domain_id() RETURNS trigger AS $BODY$ BEGIN UPDATE "user" SET domain_id = ( SELECT domain_id FROM identity_provider WHERE id = NEW.idp_id) WHERE id = NEW.user_id and domain_id IS NULL; RETURN NULL; END $BODY$ LANGUAGE plpgsql; """ ) op.execute(federated_user_insert_trigger) local_user_insert_trigger = textwrap.dedent( """ CREATE OR REPLACE FUNCTION update_user_domain_id() RETURNS trigger AS $BODY$ BEGIN UPDATE "user" SET domain_id = NEW.domain_id WHERE id = NEW.user_id; RETURN NULL; END $BODY$ LANGUAGE plpgsql; """ ) op.execute(local_user_insert_trigger) # FIXME(stephenfin): Remove these indexes. They're left over from attempts # to remove foreign key constraints in past migrations. Apparently # sqlalchemy-migrate didn't do the job fully and left behind indexes if bind.engine.name == 'mysql': op.create_index('region_id', 'registered_limit', ['region_id']) # FIXME(stephenfin): This should be dropped when we add the FK # constraint to this column op.create_index( 'registered_limit_id', 'limit', ['registered_limit_id'], ) # FIXME(stephenfin): These are leftover from when we removed a FK # constraint and should probable be dropped op.create_index('domain_id', 'identity_provider', ['domain_id']) op.create_index('domain_id', 'user', ['domain_id']) # data migration def _generate_root_domain_project(): # Generate a project that will act as a root for all domains, in order # for use to be able to use a FK constraint on domain_id. Projects # acting as a domain will not reference this as their parent_id, just # as domain_id. # # This special project is filtered out by the driver, so is never # visible to the manager or API. project_ref = { 'id': NULL_DOMAIN_ID, 'name': NULL_DOMAIN_ID, 'enabled': False, 'description': '', 'domain_id': NULL_DOMAIN_ID, 'is_domain': True, 'parent_id': None, 'extra': '{}', } return project_ref bind = op.get_bind() meta = sql.MetaData() project = sql.Table('project', meta, autoload_with=bind) root_domain_project = _generate_root_domain_project() op.execute(project.insert().values(**root_domain_project)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/CONTRACT_HEAD0000664000175000017500000000001500000000000025354 0ustar00zuulzuul00000000000000c88cdce8f248 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/EXPAND_HEAD0000664000175000017500000000001100000000000025112 0ustar00zuulzuul0000000000000047147121 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/0000775000175000017500000000000000000000000024671 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/contract/0000775000175000017500000000000000000000000026506 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000022100000000000011450 xustar0000000000000000123 path=keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/contract/99de3849d860_fix_incorrect_constraints.py 22 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/contract/99de3849d860_fix_incorrect_c0000664000175000017500000000222600000000000033357 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix incorrect constraints. Revision ID: 99de3849d860 Revises: e25ffa003242 Create Date: 2022-08-02 12:23:25.525035 """ from alembic import op # revision identifiers, used by Alembic. revision = '99de3849d860' down_revision = 'e25ffa003242' branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table('access_rule', schema=None) as batch_op: batch_op.drop_constraint('access_rule_external_id_key', type_='unique') with op.batch_alter_table('trust', schema=None) as batch_op: batch_op.drop_constraint( 'duplicate_trust_constraint_expanded', type_='unique' ) ././@PaxHeader0000000000000000000000000000022400000000000011453 xustar0000000000000000126 path=keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/contract/c88cdce8f248_remove_duplicate_constraints.py 22 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/contract/c88cdce8f248_remove_duplicat0000664000175000017500000000571200000000000033615 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove duplicate constraints. Revision ID: c88cdce8f248 Revises: 99de3849d860 Create Date: 2023-03-15 13:17:44.060715 """ from alembic import op from sqlalchemy.engine import reflection # revision identifiers, used by Alembic. revision = 'c88cdce8f248' down_revision = '99de3849d860' branch_labels = None depends_on = None def upgrade(): bind = op.get_bind() # This only affects MySQL - PostgreSQL and SQLite were smart enough to # ignore the duplicate constraints if bind.engine.name != 'mysql': return # We want to drop a duplicate index on the 'project_tag' table. To drop an # index, we would normally just use drop_index like so: # # with op.batch_alter_table('project_tag', schema=None) as batch_op: # batch_op.drop_index(foo) # # However, the index wasn't explicitly named so we're not sure what 'foo' # is. It has two potential names: # # - If it was created by alembic, alembic will have left things up to the # backend, which on MySQL means the index will have the same name as the # first column the index covers [1]. Alternatively, ... # - If it was created by sqlalchemy-migrate then it will be called # '{table_name}_{first_column_name}_key' [2] # # We need to handle both, so we need to first inspect the table to find # which it is. # # Note that unlike MariaDB [3], MySQL [4] doesn't support the 'ALTER TABLE # tbl_name DROP CONSTRAINT IF EXISTS constraint_name' syntax, which would # have allowed us to avoid the inspection. Boo. # # [1] https://dba.stackexchange.com/a/160712 # [2] https://opendev.org/x/sqlalchemy-migrate/src/commit/5d1f322542cd8eb42381612765be4ed9ca8105ec/migrate/changeset/constraint.py#L199 # noqa: E501 # [3] https://mariadb.com/kb/en/alter-table/ # [4] https://dev.mysql.com/doc/refman/8.0/en/alter-table.html inspector = reflection.Inspector.from_engine(bind) indexes = inspector.get_indexes('project_tag') index_name = None for index in indexes: if index['column_names'] == ['project_id', 'name']: index_name = index['name'] break else: # This should never happen *but* we silently ignore it since there's no # need to break user's upgrade flow, even if they've borked something return with op.batch_alter_table('project_tag', schema=None) as batch_op: batch_op.drop_index(index_name) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/expand/0000775000175000017500000000000000000000000026150 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000025000000000000011452 xustar0000000000000000146 path=keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/expand/11c3b243b4cb_remove_service_provider_relay_state_server_default.py 22 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/expand/11c3b243b4cb_remove_service_pr0000664000175000017500000000204700000000000033471 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Remove service_provider.relay_state_prefix server default. Revision ID: 11c3b243b4cb Revises: b4f8b3f584e0 Create Date: 2023-07-03 12:03:21.649144 """ from alembic import op # revision identifiers, used by Alembic. revision = '11c3b243b4cb' down_revision = 'b4f8b3f584e0' branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table('service_provider', schema=None) as batch_op: batch_op.alter_column( 'relay_state_prefix', server_default=None, ) ././@PaxHeader0000000000000000000000000000021700000000000011455 xustar0000000000000000121 path=keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/expand/b4f8b3f584e0_fix_incorrect_constraints.py 22 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/bobcat/expand/b4f8b3f584e0_fix_incorrect_con0000664000175000017500000000225700000000000033502 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fix incorrect constraints. Revision ID: b4f8b3f584e0 Revises: 29e87d24a316 Create Date: 2022-08-02 12:23:25.520570 """ from alembic import op # revision identifiers, used by Alembic. revision = 'b4f8b3f584e0' down_revision = '29e87d24a316' branch_labels = None depends_on = None def upgrade(): with op.batch_alter_table('trust', schema=None) as batch_op: batch_op.create_unique_constraint( 'duplicate_trust_constraint', [ 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', ], ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/common/sql/migrations/versions/yoga/0000775000175000017500000000000000000000000024376 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/sql/migrations/versions/yoga/contract/0000775000175000017500000000000000000000000026213 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py0000664000175000017500000000157000000000000032024 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial no-op Yoga contract migration. Revision ID: e25ffa003242 Revises: 27e647c0fad4 Create Date: 2022-01-21 00:00:00.000000 """ # revision identifiers, used by Alembic. revision = 'e25ffa003242' down_revision = '27e647c0fad4' branch_labels = ('contract',) # milestone identifier keystone_milestone = ['yoga'] def upgrade(): pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/sql/migrations/versions/yoga/expand/0000775000175000017500000000000000000000000025655 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/migrations/versions/yoga/expand/29e87d24a316_initial.py0000664000175000017500000000156400000000000031431 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Initial no-op Yoga expand migration. Revision ID: 29e87d24a316 Revises: 27e647c0fad4 Create Date: 2022-01-21 00:00:00.000000 """ # revision identifiers, used by Alembic. revision = '29e87d24a316' down_revision = '27e647c0fad4' branch_labels = ('expand',) # milestone identifier keystone_milestone = ['yoga'] def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/sql/upgrades.py0000664000175000017500000002446700000000000021614 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from alembic import command as alembic_api from alembic import config as alembic_config from alembic import migration as alembic_migration from alembic import script as alembic_script from oslo_db import exception as db_exception from oslo_log import log as logging from oslo_utils import fileutils from keystone.common import sql import keystone.conf CONF = keystone.conf.CONF LOG = logging.getLogger(__name__) ALEMBIC_INIT_VERSION = '27e647c0fad4' EXPAND_BRANCH = 'expand' DATA_MIGRATION_BRANCH = 'data_migration' CONTRACT_BRANCH = 'contract' RELEASES = ( 'yoga', 'bobcat', '2024.01', ) MILESTONES = ( 'yoga', # Do not add the milestone until the end of the release ) CURRENT_RELEASE = RELEASES[-1] MIGRATION_BRANCHES = (EXPAND_BRANCH, CONTRACT_BRANCH) VERSIONS_PATH = os.path.join( os.path.dirname(sql.__file__), 'migrations', 'versions', ) def get_version_branch_path(release=None, branch=None): """Get the path to a version branch.""" version_path = VERSIONS_PATH if branch and release: return os.path.join(version_path, release, branch) return version_path def check_bootstrap_new_branch(branch, version_path, addn_kwargs): """Bootstrap a new migration branch if it does not exist.""" addn_kwargs['version_path'] = version_path addn_kwargs['head'] = f'{branch}@head' if not os.path.exists(version_path): # Bootstrap initial directory structure fileutils.ensure_tree(version_path, mode=0o755) def _find_alembic_conf(): """Get the project's alembic configuration. :returns: An instance of ``alembic.config.Config`` """ path = os.path.join( os.path.abspath(os.path.dirname(__file__)), 'alembic.ini', ) config = alembic_config.Config(os.path.abspath(path)) config.set_main_option('sqlalchemy.url', CONF.database.connection) # we don't want to use the logger configuration from the file, which is # only really intended for the CLI # https://stackoverflow.com/a/42691781/613428 config.attributes['configure_logger'] = False # we want to scan all the versioned subdirectories version_paths = [VERSIONS_PATH] for release in RELEASES: for branch in MIGRATION_BRANCHES: version_path = os.path.join(VERSIONS_PATH, release, branch) version_paths.append(version_path) config.set_main_option('version_locations', ' '.join(version_paths)) return config def get_alembic_config(): return _find_alembic_conf() def _get_current_heads(engine, config): script = alembic_script.ScriptDirectory.from_config(config) with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) heads = context.get_current_heads() heads_map = {} for head in heads: if CONTRACT_BRANCH in script.get_revision(head).branch_labels: heads_map[CONTRACT_BRANCH] = head else: heads_map[EXPAND_BRANCH] = head return heads_map def get_current_heads(): """Get the current head of each the expand and contract branches.""" config = _find_alembic_conf() with sql.session_for_read() as session: engine = session.get_bind() # discard the URL encoded in alembic.ini in favour of the URL # configured for the engine by the database fixtures, casting from # 'sqlalchemy.engine.url.URL' to str in the process. This returns a # RFC-1738 quoted URL, which means that a password like "foo@" will be # turned into "foo%40". This in turns causes a problem for # set_main_option() because that uses ConfigParser.set, which (by # design) uses *python* interpolation to write the string out ... where # "%" is the special python interpolation character! Avoid this # mismatch by quoting all %'s for the set below. engine_url = engine.url.render_as_string(hide_password=False).replace( '%', '%%' ) config.set_main_option('sqlalchemy.url', engine_url) heads = _get_current_heads(engine, config) return heads def _is_database_under_alembic_control(engine): with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) return bool(context.get_current_heads()) def _upgrade_alembic(engine, config, branch): revision = 'heads' if branch: revision = f'{branch}@head' # re-use the connection rather than creating a new one with engine.begin() as connection: config.attributes['connection'] = connection alembic_api.upgrade(config, revision) def get_db_version(branch=EXPAND_BRANCH, *, engine=None): config = _find_alembic_conf() if engine is None: with sql.session_for_read() as session: engine = session.get_bind() # discard the URL encoded in alembic.ini in favour of the URL # configured for the engine by the database fixtures, casting from # 'sqlalchemy.engine.url.URL' to str in the process. This returns a # RFC-1738 quoted URL, which means that a password like "foo@" will be # turned into "foo%40". This in turns causes a problem for # set_main_option() because that uses ConfigParser.set, which (by # design) uses *python* interpolation to write the string out ... where # "%" is the special python interpolation character! Avoid this # mismatch by quoting all %'s for the set below. engine_url = engine.url.render_as_string(hide_password=False).replace( '%', '%%' ) config.set_main_option('sqlalchemy.url', engine_url) # we use '.get' since the particular branch might not have been created alembic_version = _get_current_heads(engine, config).get(branch) return alembic_version def _db_sync(branch=None, *, engine=None): config = _find_alembic_conf() if engine is None: with sql.session_for_write() as session: engine = session.get_bind() # discard the URL encoded in alembic.ini in favour of the URL # configured for the engine by the database fixtures, casting from # 'sqlalchemy.engine.url.URL' to str in the process. This returns a # RFC-1738 quoted URL, which means that a password like "foo@" will be # turned into "foo%40". This in turns causes a problem for # set_main_option() because that uses ConfigParser.set, which (by # design) uses *python* interpolation to write the string out ... where # "%" is the special python interpolation character! Avoid this # mismatch by quoting all %'s for the set below. engine_url = engine.url.render_as_string(hide_password=False).replace( '%', '%%' ) config.set_main_option('sqlalchemy.url', engine_url) _upgrade_alembic(engine, config, branch) def _validate_upgrade_order(branch, *, engine=None): """Validate the upgrade order of the migration branches. This is run before allowing the db_sync command to execute. Ensure the expand steps have been run before the contract steps. :param branch: The name of the branch that the user is trying to upgrade. """ if branch == EXPAND_BRANCH: return if branch == DATA_MIGRATION_BRANCH: # this is a no-op in alembic land return config = _find_alembic_conf() if engine is None: with sql.session_for_read() as session: engine = session.get_bind() script = alembic_script.ScriptDirectory.from_config(config) expand_head = None for head in script.get_heads(): if EXPAND_BRANCH in script.get_revision(head).branch_labels: expand_head = head break with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) current_heads = context.get_current_heads() if expand_head not in current_heads: raise db_exception.DBMigrationError( 'You are attempting to upgrade contract ahead of expand. ' 'Please refer to ' 'https://docs.openstack.org/keystone/latest/admin/' 'identity-upgrading.html ' 'to see the proper steps for rolling upgrades.' ) def expand_schema(engine=None): """Expand the database schema ahead of data migration. This is run manually by the keystone-manage command before the first keystone node is migrated to the latest release. """ _validate_upgrade_order(EXPAND_BRANCH, engine=engine) _db_sync(EXPAND_BRANCH, engine=engine) def migrate_data(engine=None): """Migrate data to match the new schema. This is run manually by the keystone-manage command once the keystone schema has been expanded for the new release. """ print( 'Data migrations are no longer supported with alembic. ' 'This is now a no-op.' ) def contract_schema(engine=None): """Contract the database. This is run manually by the keystone-manage command once the keystone nodes have been upgraded to the latest release and will remove any old tables/columns that are no longer required. """ _validate_upgrade_order(CONTRACT_BRANCH, engine=engine) _db_sync(CONTRACT_BRANCH, engine=engine) def offline_sync_database_to_version(version=None, *, engine=None): """Perform and off-line sync of the database. Migrate the database up to the latest version, doing the equivalent of the cycle of --expand, --migrate and --contract, for when an offline upgrade is being performed. If a version is specified then only migrate the database up to that version. Downgrading is not supported. If version is specified, then only the main database migration is carried out - and the expand, migration and contract phases will NOT be run. """ if version: raise Exception('Specifying a version is no longer supported') _db_sync(engine=engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/tokenless_auth.py0000664000175000017500000001736200000000000022227 0ustar00zuulzuul00000000000000# Copyright 2015 Hewlett-Packard # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from oslo_log import log from keystone.auth import core from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) class TokenlessAuthHelper(provider_api.ProviderAPIMixin): def __init__(self, env): """A init class for TokenlessAuthHelper. :param env: The HTTP request environment that should contain client certificate attributes. These attributes should match with what the mapping defines. Or a user cannot be mapped and results un-authenticated. The following examples are for the attributes that reference to the client certificate's Subject's Common Name and Organization: SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O :type env: dict """ self.env = env def _build_scope_info(self): """Build the token request scope based on the headers. :returns: scope data :rtype: dict """ project_id = self.env.get('HTTP_X_PROJECT_ID') project_name = self.env.get('HTTP_X_PROJECT_NAME') project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID') project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME') domain_id = self.env.get('HTTP_X_DOMAIN_ID') domain_name = self.env.get('HTTP_X_DOMAIN_NAME') scope = {} if project_id: scope['project'] = {'id': project_id} elif project_name: scope['project'] = {'name': project_name} if project_domain_id: scope['project']['domain'] = {'id': project_domain_id} elif project_domain_name: scope['project']['domain'] = {'name': project_domain_name} else: msg = _( 'Neither Project Domain ID nor Project Domain Name ' 'was provided.' ) raise exception.ValidationError(msg) elif domain_id: scope['domain'] = {'id': domain_id} elif domain_name: scope['domain'] = {'name': domain_name} else: raise exception.ValidationError( attribute='project or domain', target='scope' ) return scope def get_scope(self): auth = {} # NOTE(chioleong): Auth methods here are insignificant because # we only care about using auth.controllers.AuthInfo # to validate the scope information. Therefore, # we don't provide any identity. auth['scope'] = self._build_scope_info() # NOTE(chioleong): We'll let AuthInfo validate the scope for us auth_info = core.AuthInfo.create(auth, scope_only=True) return auth_info.get_scope() def get_mapped_user(self, project_id=None, domain_id=None): """Map client certificate to an existing user. If user is ephemeral, there is no validation on the user himself; however it will be mapped to a corresponding group(s) and the scope of this ephemeral user is the same as what is assigned to the group. :param project_id: Project scope of the mapped user. :param domain_id: Domain scope of the mapped user. :returns: A dictionary that contains the keys, such as user_id, user_name, domain_id, domain_name :rtype: dict """ idp_id = self._build_idp_id() LOG.debug( 'The IdP Id %s and protocol Id %s are used to look up ' 'the mapping.', idp_id, CONF.tokenless_auth.protocol, ) mapped_properties, mapping_id = self.federation_api.evaluate( idp_id, CONF.tokenless_auth.protocol, self.env ) user = mapped_properties.get('user', {}) user_id = user.get('id') user_name = user.get('name') user_type = user.get('type') if user.get('domain') is not None: user_domain_id = user.get('domain').get('id') user_domain_name = user.get('domain').get('name') else: user_domain_id = None user_domain_name = None # if user is ephemeral type, we don't care if the user exists # or not, but just care if the mapped group(s) is valid. if user_type == utils.UserType.EPHEMERAL: user_ref = {'type': utils.UserType.EPHEMERAL} group_ids = mapped_properties['group_ids'] utils.validate_mapped_group_ids( group_ids, mapping_id, self.identity_api ) group_ids.extend( utils.transform_to_group_ids( mapped_properties['group_names'], mapping_id, self.identity_api, self.resource_api, ) ) roles = self.assignment_api.get_roles_for_groups( group_ids, project_id, domain_id ) if roles is not None: role_names = [role['name'] for role in roles] user_ref['roles'] = role_names user_ref['group_ids'] = list(group_ids) user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id user_ref[federation_constants.PROTOCOL] = ( CONF.tokenless_auth.protocol ) return user_ref if user_id: user_ref = self.identity_api.get_user(user_id) elif user_name and (user_domain_name or user_domain_id): if user_domain_name: user_domain = self.resource_api.get_domain_by_name( user_domain_name ) self.resource_api.assert_domain_enabled( user_domain['id'], user_domain ) user_domain_id = user_domain['id'] user_ref = self.identity_api.get_user_by_name( user_name, user_domain_id ) else: msg = _( 'User auth cannot be built due to missing either ' 'user id, or user name with domain id, or user name ' 'with domain name.' ) raise exception.ValidationError(msg) self.identity_api.assert_user_enabled( user_id=user_ref['id'], user=user_ref ) user_ref['type'] = utils.UserType.LOCAL return user_ref def _build_idp_id(self): """Build the IdP name from the given config option issuer_attribute. The default issuer attribute SSL_CLIENT_I_DN in the environment is built with the following formula - base64_idp = sha1(env['SSL_CLIENT_I_DN']) :returns: base64_idp like the above example :rtype: str """ idp = self.env.get(CONF.tokenless_auth.issuer_attribute) if idp is None: raise exception.TokenlessAuthConfigError( issuer_attribute=CONF.tokenless_auth.issuer_attribute ) hashed_idp = hashlib.sha256(idp.encode('utf-8')) return hashed_idp.hexdigest() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/utils.py0000664000175000017500000004615100000000000020335 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 - 2012 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import collections.abc import contextlib import grp import hashlib import itertools import os import pwd import urllib import uuid from cryptography import x509 from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import reflection from oslo_utils import strutils from oslo_utils import timeutils from keystone.common import password_hashing import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) WHITELISTED_PROPERTIES = [ 'tenant_id', 'project_id', 'user_id', 'compute_host', 'public_endpoint', ] # NOTE(stevermar): This UUID must stay the same, forever, across # all of keystone to preserve its value as a URN namespace, which is # used for ID transformation. RESOURCE_ID_NAMESPACE = uuid.UUID('4332ecab-770b-4288-a680-b9aca3b1b153') # Compatibilty for password hashing functions. verify_length_and_trunc_password = ( password_hashing.verify_length_and_trunc_password ) # noqa hash_password = password_hashing.hash_password hash_user_password = password_hashing.hash_user_password check_password = password_hashing.check_password # NOTE(hiromu): This dict defines alternative DN string for X.509. When # retriving DN from X.509, converting attributes types that are not listed # in the RFC4514 to a corresponding alternative DN string. ATTR_NAME_OVERRIDES = { x509.NameOID.EMAIL_ADDRESS: "emailAddress", } def resource_uuid(value): """Convert input to valid UUID hex digits.""" try: uuid.UUID(value) return value except ValueError: if len(value) <= 64: return uuid.uuid5(RESOURCE_ID_NAMESPACE, value).hex raise ValueError( _( 'Length of transformable resource id > 64, ' 'which is max allowed characters' ) ) def flatten_dict(d, parent_key=''): """Flatten a nested dictionary. Converts a dictionary with nested values to a single level flat dictionary, with dotted notation for each key. """ items = [] for k, v in d.items(): new_key = parent_key + '.' + k if parent_key else k if isinstance(v, collections.abc.MutableMapping): items.extend(list(flatten_dict(v, new_key).items())) else: items.append((new_key, v)) return dict(items) class SmarterEncoder(jsonutils.json.JSONEncoder): """Help for JSON encoding dict-like objects.""" def default(self, obj): if not isinstance(obj, dict) and hasattr(obj, 'items'): return dict(obj.items()) return super().default(obj) def hash_access_key(access): hash_ = hashlib.sha256() if not isinstance(access, bytes): access = access.encode('utf-8') hash_.update(access) return hash_.hexdigest() def attr_as_boolean(val_attr): """Return the boolean value, decoded from a string. We test explicitly for a value meaning False, which can be one of several formats as specified in oslo strutils.FALSE_STRINGS. All other string values (including an empty string) are treated as meaning True. """ return strutils.bool_from_string(val_attr, default=True) def auth_str_equal(provided, known): """Constant-time string comparison. :params provided: the first string :params known: the second string :returns: True if the strings are equal. This function takes two strings and compares them. It is intended to be used when doing a comparison for authentication purposes to help guard against timing attacks. When using the function for this purpose, always provide the user-provided password as the first argument. The time this function will take is always a factor of the length of this string. """ result = 0 p_len = len(provided) k_len = len(known) for i in range(p_len): a = ord(provided[i]) if i < p_len else 0 b = ord(known[i]) if i < k_len else 0 result |= a ^ b return (p_len == k_len) & (result == 0) def setup_remote_pydev_debug(): if CONF.pydev_debug_host and CONF.pydev_debug_port: try: try: from pydev import pydevd except ImportError: import pydevd pydevd.settrace( CONF.pydev_debug_host, port=CONF.pydev_debug_port, stdoutToServer=True, stderrToServer=True, ) return True except Exception: LOG.exception( 'Error setting up the debug environment. Verify that the ' 'option --debug-url has the format : and that a ' 'debugger processes is listening on that port.' ) raise def get_unix_user(user=None): """Get the uid and user name. This is a convenience utility which accepts a variety of input which might represent a unix user. If successful it returns the uid and name. Valid input is: string A string is first considered to be a user name and a lookup is attempted under that name. If no name is found then an attempt is made to convert the string to an integer and perform a lookup as a uid. int An integer is interpreted as a uid. None None is interpreted to mean use the current process's effective user. If the input is a valid type but no user is found a KeyError is raised. If the input is not a valid type a TypeError is raised. :param object user: string, int or None specifying the user to lookup. :returns: tuple of (uid, name) """ if isinstance(user, str): try: user_info = pwd.getpwnam(user) except KeyError: try: i = int(user) except ValueError: raise KeyError("user name '%s' not found" % user) try: user_info = pwd.getpwuid(i) except KeyError: raise KeyError("user id %d not found" % i) elif isinstance(user, int): try: user_info = pwd.getpwuid(user) except KeyError: raise KeyError("user id %d not found" % user) elif user is None: user_info = pwd.getpwuid(os.geteuid()) else: user_cls_name = reflection.get_class_name(user, fully_qualified=False) raise TypeError( 'user must be string, int or None; not %s (%r)' % (user_cls_name, user) ) return user_info.pw_uid, user_info.pw_name def get_unix_group(group=None): """Get the gid and group name. This is a convenience utility which accepts a variety of input which might represent a unix group. If successful it returns the gid and name. Valid input is: string A string is first considered to be a group name and a lookup is attempted under that name. If no name is found then an attempt is made to convert the string to an integer and perform a lookup as a gid. int An integer is interpreted as a gid. None None is interpreted to mean use the current process's effective group. If the input is a valid type but no group is found a KeyError is raised. If the input is not a valid type a TypeError is raised. :param object group: string, int or None specifying the group to lookup. :returns: tuple of (gid, name) """ if isinstance(group, str): try: group_info = grp.getgrnam(group) except KeyError: # Was an int passed as a string? # Try converting to int and lookup by id instead. try: i = int(group) except ValueError: raise KeyError("group name '%s' not found" % group) try: group_info = grp.getgrgid(i) except KeyError: raise KeyError("group id %d not found" % i) elif isinstance(group, int): try: group_info = grp.getgrgid(group) except KeyError: raise KeyError("group id %d not found" % group) elif group is None: group_info = grp.getgrgid(os.getegid()) else: group_cls_name = reflection.get_class_name( group, fully_qualified=False ) raise TypeError( 'group must be string, int or None; not %s (%r)' % (group_cls_name, group) ) return group_info.gr_gid, group_info.gr_name class WhiteListedItemFilter: def __init__(self, whitelist, data): self._whitelist = set(whitelist or []) self._data = data def __getitem__(self, name): """Evaluation on an item access.""" if name not in self._whitelist: raise KeyError return self._data[name] _ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f' _ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S' def isotime(at=None, subsecond=False): """Stringify time in ISO 8601 format. Python provides a similar instance method for datetime.datetime objects called `isoformat()`. The format of the strings generated by `isoformat()` has a couple of problems: 1) The strings generated by `isotime()` are used in tokens and other public APIs that we can't change without a deprecation period. The strings generated by `isoformat()` are not the same format, so we can't just change to it. 2) The strings generated by `isoformat()` do not include the microseconds if the value happens to be 0. This will likely show up as random failures as parsers may be written to always expect microseconds, and it will parse correctly most of the time. :param at: Optional datetime object to return at a string. If not provided, the time when the function was called will be used. :type at: datetime.datetime :param subsecond: If true, the returned string will represent microsecond precision, but only precise to the second. For example, a `datetime.datetime(2016, 9, 14, 14, 1, 13, 970223)` will be returned as `2016-09-14T14:01:13.000000Z`. :type subsecond: bool :returns: A time string represented in ISO 8601 format. :rtype: str """ if not at: at = timeutils.utcnow() # NOTE(lbragstad): Datetime objects are immutable, so reassign the date we # are working with to itself as we drop microsecond precision. at = at.replace(microsecond=0) st = at.strftime( _ISO8601_TIME_FORMAT if not subsecond else _ISO8601_TIME_FORMAT_SUBSECOND ) tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC' # Need to handle either iso8601 or python UTC format st += 'Z' if tz in ['UTC', 'UTC+00:00'] else tz return st def parse_expiration_date(expiration_date): if not expiration_date.endswith('Z'): expiration_date += 'Z' try: expiration_time = timeutils.parse_isotime(expiration_date) except ValueError: raise exception.ValidationTimeStampError() if timeutils.is_older_than(expiration_time, 0): raise exception.ValidationExpirationError() return expiration_time URL_RESERVED_CHARS = ":/?#[]@!$&'()*+,;=" def is_not_url_safe(name): """Check if a string contains any url reserved characters.""" return len(list_url_unsafe_chars(name)) > 0 def list_url_unsafe_chars(name): """Return a list of the reserved characters.""" reserved_chars = '' for i in name: if i in URL_RESERVED_CHARS: reserved_chars += i return reserved_chars def lower_case_hostname(url): """Change the URL's hostname to lowercase.""" # NOTE(gyee): according to # https://www.w3.org/TR/WD-html40-970708/htmlweb.html, the netloc portion # of the URL is case-insensitive parsed = urllib.parse.urlparse(url) # Note: _replace method for named tuples is public and defined in docs replaced = parsed._replace(netloc=parsed.netloc.lower()) return urllib.parse.urlunparse(replaced) def remove_standard_port(url): # remove the default ports specified in RFC2616 and 2818 o = urllib.parse.urlparse(url) separator = ':' (host, separator, port) = o.netloc.partition(separator) if o.scheme.lower() == 'http' and port == '80': # NOTE(gyee): _replace() is not a private method. It has # an underscore prefix to prevent conflict with field names. # See https://docs.python.org/2/library/collections.html# # collections.namedtuple o = o._replace(netloc=host) if o.scheme.lower() == 'https' and port == '443': o = o._replace(netloc=host) return urllib.parse.urlunparse(o) def format_url(url, substitutions, silent_keyerror_failures=None): """Format a user-defined URL with the given substitutions. :param string url: the URL to be formatted :param dict substitutions: the dictionary used for substitution :param list silent_keyerror_failures: keys for which we should be silent if there is a KeyError exception on substitution attempt :returns: a formatted URL """ substitutions = WhiteListedItemFilter( WHITELISTED_PROPERTIES, substitutions ) allow_keyerror = silent_keyerror_failures or [] try: result = url.replace('$(', '%(') % substitutions except AttributeError: msg = "Malformed endpoint - %(url)r is not a string" LOG.error(msg, {"url": url}) raise exception.MalformedEndpoint(endpoint=url) except KeyError as e: if not e.args or e.args[0] not in allow_keyerror: msg = "Malformed endpoint %(url)s - unknown key %(keyerror)s" LOG.error(msg, {"url": url, "keyerror": e}) raise exception.MalformedEndpoint(endpoint=url) else: result = None except TypeError as e: msg = ( "Malformed endpoint '%(url)s'. The following type error " "occurred during string substitution: %(typeerror)s" ) LOG.error(msg, {"url": url, "typeerror": e}) raise exception.MalformedEndpoint(endpoint=url) except ValueError: msg = ( "Malformed endpoint %s - incomplete format " "(are you missing a type notifier ?)" ) LOG.error(msg, url) raise exception.MalformedEndpoint(endpoint=url) return result def check_endpoint_url(url): """Check substitution of url. The invalid urls are as follows: urls with substitutions that is not in the whitelist Check the substitutions in the URL to make sure they are valid and on the whitelist. :param str url: the URL to validate :rtype: None :raises keystone.exception.URLValidationError: if the URL is invalid """ # check whether the property in the path is exactly the same # with that in the whitelist below substitutions = dict(zip(WHITELISTED_PROPERTIES, itertools.repeat(''))) try: url.replace('$(', '%(') % substitutions except (KeyError, TypeError, ValueError): raise exception.URLValidationError(url=url) def get_certificate_subject_dn(cert_pem): """Get subject DN from the PEM certificate content. :param str cert_pem: the PEM certificate content :rtype: JSON data for subject DN :raises keystone.exception.ValidationError: if the PEM certificate content is invalid """ dn_dict = {} try: cert = x509.load_pem_x509_certificate(cert_pem.encode('utf-8')) for item in cert.subject: name, value = item.rfc4514_string().split('=') if item.oid in ATTR_NAME_OVERRIDES: name = ATTR_NAME_OVERRIDES[item.oid] dn_dict[name] = value except Exception as error: LOG.exception(error) message = _('The certificate content is not PEM format.') raise exception.ValidationError(message=message) return dn_dict def get_certificate_issuer_dn(cert_pem): """Get issuer DN from the PEM certificate content. :param str cert_pem: the PEM certificate content :rtype: JSON data for issuer DN :raises keystone.exception.ValidationError: if the PEM certificate content is invalid """ dn_dict = {} try: cert = x509.load_pem_x509_certificate(cert_pem.encode('utf-8')) for item in cert.issuer: name, value = item.rfc4514_string().split('=') if item.oid in ATTR_NAME_OVERRIDES: name = ATTR_NAME_OVERRIDES[item.oid] dn_dict[name] = value except Exception as error: LOG.exception(error) message = _('The certificate content is not PEM format.') raise exception.ValidationError(message=message) return dn_dict def get_certificate_thumbprint(cert_pem): """Get certificate thumbprint from the PEM certificate content. :param str cert_pem: the PEM certificate content :rtype: certificate thumbprint """ thumb_sha256 = hashlib.sha256(cert_pem.encode('ascii')).digest() thumbprint = base64.urlsafe_b64encode(thumb_sha256).decode('ascii') return thumbprint def create_directory(directory, keystone_user_id=None, keystone_group_id=None): """Attempt to create a directory if it doesn't exist. :param directory: string containing the path of the directory to create. :param keystone_user_id: the system ID of the process running keystone. :param keystone_group_id: the system ID of the group running keystone. """ if not os.access(directory, os.F_OK): LOG.info( '%s does not appear to exist; attempting to create it', directory ) try: os.makedirs(directory, 0o700) except OSError: LOG.error( 'Failed to create %s: either it already ' 'exists or you don\'t have sufficient permissions to ' 'create it', directory, ) if keystone_user_id and keystone_group_id: os.chown(directory, keystone_user_id, keystone_group_id) elif keystone_user_id or keystone_group_id: LOG.warning( 'Unable to change the ownership of key repository without ' 'a keystone user ID and keystone group ID both being ' 'provided: %s', directory, ) @contextlib.contextmanager def nested_contexts(*contexts): with contextlib.ExitStack() as stack: yield [stack.enter_context(c) for c in contexts] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.514114 keystone-26.0.0/keystone/common/validation/0000775000175000017500000000000000000000000020746 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/validation/__init__.py0000664000175000017500000000445000000000000023062 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Request body validating middleware for OpenStack Identity resources.""" from keystone.common.validation import validators def lazy_validate(request_body_schema, resource_to_validate): """A non-decorator way to validate a request, to be used inline. :param request_body_schema: a schema to validate the resource reference :param resource_to_validate: dictionary to validate :raises keystone.exception.ValidationError: if `resource_to_validate` is None. (see wrapper method below). :raises TypeError: at decoration time when the expected resource to validate isn't found in the decorated method's signature """ schema_validator = validators.SchemaValidator(request_body_schema) schema_validator.validate(resource_to_validate) def nullable(property_schema): """Clone a property schema into one that is nullable. :param dict property_schema: schema to clone into a nullable schema :returns: a new dict schema """ # TODO(dstanek): deal with the case where type is already a list; we don't # do that yet so I'm not wasting time on it new_schema = property_schema.copy() new_schema['type'] = [property_schema['type'], 'null'] # NOTE(kmalloc): If enum is specified (such as our boolean case) ensure we # add null to the enum as well so that null can be passed/validated as # expected. Without adding to the enum, null will not validate as enum is # explicitly listing valid values. According to the JSON Schema # specification, the values must be unique in the enum array. if 'enum' in new_schema and None not in new_schema['enum']: # In the enum the 'null' is NoneType new_schema['enum'].append(None) return new_schema ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/validation/parameter_types.py0000664000175000017500000000376700000000000024541 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common parameter types for validating a request reference.""" boolean = {'type': 'boolean', 'enum': [True, False]} # NOTE(lbragstad): Be mindful of this pattern as it might require changes # once this is used on user names, LDAP-based user names specifically since # commas aren't allowed in the following pattern. Here we are only going to # check the length of the name and ensure that it's a string. Right now we are # not going to validate on a naming pattern for issues with # internationalization. name = { 'type': 'string', 'minLength': 1, 'maxLength': 255, 'pattern': r'[\S]+', } external_id_string = {'type': 'string', 'minLength': 1, 'maxLength': 64} id_string = { 'type': 'string', 'minLength': 1, 'maxLength': 64, # TODO(lbragstad): Find a way to make this configurable such that the end # user chooses how much control they want over id_strings with a regex 'pattern': r'^[a-zA-Z0-9-]+$', } mapping_id_string = { 'type': 'string', 'minLength': 1, 'maxLength': 64, 'pattern': '^[a-zA-Z0-9-_]+$', } description = {'type': 'string'} url = { 'type': 'string', 'minLength': 0, 'maxLength': 225, # NOTE(edmondsw): we could do more to validate per various RFCs, but # decision was made to err on the side of leniency. The following is based # on rfc1738 section 2.1 'pattern': '^[a-zA-Z0-9+.-]+:.+', } email = {'type': 'string', 'format': 'email'} integer_min0 = {'type': 'integer', 'minimum': 0} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/common/validation/validators.py0000664000175000017500000000733600000000000023501 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Internal implementation of request body validating middleware.""" import re import jsonschema from oslo_config import cfg from oslo_log import log from keystone import exception from keystone.i18n import _ CONF = cfg.CONF LOG = log.getLogger(__name__) # TODO(rderose): extend schema validation and add this check there def validate_password(password): pattern = CONF.security_compliance.password_regex if pattern: if not isinstance(password, str): detail = _("Password must be a string type") raise exception.PasswordValidationError(detail=detail) try: if not re.match(pattern, password): pattern_desc = ( CONF.security_compliance.password_regex_description ) raise exception.PasswordRequirementsValidationError( detail=pattern_desc ) except re.error: msg = ( "Unable to validate password due to invalid regular " "expression - password_regex: %s" ) LOG.error(msg, pattern) detail = _( "Unable to validate password due to invalid configuration" ) raise exception.PasswordValidationError(detail=detail) class SchemaValidator: """Resource reference validator class.""" validator_org = jsonschema.Draft4Validator def __init__(self, schema): # NOTE(lbragstad): If at some point in the future we want to extend # our validators to include something specific we need to check for, # we can do it here. Nova's V3 API validators extend the validator to # include `self._validate_minimum` and `self._validate_maximum`. This # would be handy if we needed to check for something the jsonschema # didn't by default. See the Nova V3 validator for details on how this # is done. validators = {} validator_cls = jsonschema.validators.extend( self.validator_org, validators ) fc = jsonschema.FormatChecker() self.validator = validator_cls(schema, format_checker=fc) def validate(self, *args, **kwargs): try: self.validator.validate(*args, **kwargs) except jsonschema.ValidationError as ex: # NOTE: For whole OpenStack message consistency, this error # message has been written in a format consistent with WSME. if ex.path: # NOTE(lbragstad): Here we could think about using iter_errors # as a method of providing invalid parameters back to the # user. # TODO(lbragstad): If the value of a field is confidential or # too long, then we should build the masking in here so that # we don't expose sensitive user information in the event it # fails validation. path = '/'.join(map(str, ex.path)) detail = _( "Invalid input for field '%(path)s': %(message)s" ) % {'path': path, 'message': str(ex)} else: detail = str(ex) raise exception.SchemaValidationError(detail=detail) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5181139 keystone-26.0.0/keystone/conf/0000775000175000017500000000000000000000000016251 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/__init__.py0000664000175000017500000001244300000000000020366 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from oslo_cache import core as cache from oslo_config import cfg from oslo_log import log import oslo_messaging from oslo_middleware import cors from oslo_policy import opts as policy_opts from osprofiler import opts as profiler from keystone.conf import application_credential from keystone.conf import assignment from keystone.conf import auth from keystone.conf import catalog from keystone.conf import credential from keystone.conf import default from keystone.conf import domain_config from keystone.conf import endpoint_filter from keystone.conf import endpoint_policy from keystone.conf import federation from keystone.conf import fernet_receipts from keystone.conf import fernet_tokens from keystone.conf import identity from keystone.conf import identity_mapping from keystone.conf import jwt_tokens from keystone.conf import ldap from keystone.conf import oauth1 from keystone.conf import oauth2 from keystone.conf import policy from keystone.conf import receipt from keystone.conf import resource from keystone.conf import revoke from keystone.conf import role from keystone.conf import saml from keystone.conf import security_compliance from keystone.conf import shadow_users from keystone.conf import token from keystone.conf import tokenless_auth from keystone.conf import totp from keystone.conf import trust from keystone.conf import unified_limit from keystone.conf import wsgi CONF = cfg.CONF conf_modules = [ application_credential, assignment, auth, catalog, credential, default, domain_config, endpoint_filter, endpoint_policy, federation, fernet_receipts, fernet_tokens, identity, identity_mapping, jwt_tokens, ldap, oauth1, oauth2, policy, receipt, resource, revoke, role, saml, security_compliance, shadow_users, token, tokenless_auth, totp, trust, unified_limit, wsgi, ] oslo_messaging.set_transport_defaults(control_exchange='keystone') def set_default_for_default_log_levels(): """Set the default for the default_log_levels option for keystone. Keystone uses some packages that other OpenStack services don't use that do logging. This will set the default_log_levels default level for those packages. This function needs to be called before CONF(). """ extra_log_level_defaults = [ 'dogpile=INFO', 'routes=INFO', ] log.register_options(CONF) log.set_defaults( default_log_levels=log.get_default_log_levels() + extra_log_level_defaults ) def setup_logging(): """Set up logging for the keystone package.""" log.setup(CONF, 'keystone') logging.captureWarnings(True) def configure(conf=None): if conf is None: conf = CONF for module in conf_modules: module.register_opts(conf) # register any non-default auth methods here (used by extensions, etc) auth.setup_authentication() # add oslo.cache related config options cache.configure(conf) def set_external_opts_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=[ 'X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token', 'X-Project-Id', 'X-Project-Name', 'X-Project-Domain-Id', 'X-Project-Domain-Name', 'X-Domain-Id', 'X-Domain-Name', 'Openstack-Auth-Receipt', ], expose_headers=[ 'X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token', 'Openstack-Auth-Receipt', ], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'], ) # configure OSprofiler options profiler.set_defaults(CONF, enabled=False, trace_sqlalchemy=False) # TODO(gmann): Remove setting the default value of config policy_file # once oslo_policy change the default value to 'policy.yaml'. # https://github.com/openstack/oslo.policy/blob/a626ad12fe5a3abd49d70e3e5b95589d279ab578/oslo_policy/opts.py#L49 DEFAULT_POLICY_FILE = 'policy.yaml' policy_opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE) # Oslo.cache is always enabled by default for request-local caching # TODO(morganfainberg): Fix this to not use internal interface when # oslo.cache has proper interface to set defaults added. This is # just a bad way to do this. opts = cache._opts.list_opts() for opt_list in opts: if opt_list[0] == 'cache': for o in opt_list[1]: if o.name == 'enabled': o.default = True def set_config_defaults(): """Override all configuration default values for keystone.""" set_default_for_default_log_levels() set_external_opts_defaults() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/application_credential.py0000664000175000017500000000364500000000000023330 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the application credential backend driver in the `keystone.application_credential` namespace. Keystone only provides a `sql` driver, so there is no reason to change this unless you are providing a custom entry point. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for application credential caching. This has no effect unless global caching is enabled. """ ), ) cache_time = cfg.IntOpt( 'cache_time', help=utils.fmt( """ Time to cache application credential data in seconds. This has no effect unless global caching is enabled. """ ), ) user_limit = cfg.IntOpt( 'user_limit', default=-1, help=utils.fmt( """ Maximum number of application credentials a user is permitted to create. A value of -1 means unlimited. If a limit is not set, users are permitted to create application credentials at will, which could lead to bloat in the keystone database or open keystone to a DoS attack. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, caching, cache_time, user_limit, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/assignment.py0000664000175000017500000000256400000000000021002 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the assignment backend driver (where role assignments are stored) in the `keystone.assignment` namespace. Only a SQL driver is supplied by keystone itself. Unless you are writing proprietary drivers for keystone, you do not need to set this option. """ ), ) prohibited_implied_role = cfg.ListOpt( 'prohibited_implied_role', default=['admin'], help=utils.fmt( """ A list of role names which are prohibited from being an implied role. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [driver, prohibited_implied_role] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/auth.py0000664000175000017500000001040400000000000017563 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import constants from keystone.conf import utils methods = cfg.ListOpt( 'methods', default=constants._DEFAULT_AUTH_METHODS, help=utils.fmt( """ Allowed authentication methods. Note: You should disable the `external` auth method if you are currently using federation. External auth and federation both use the REMOTE_USER variable. Since both the mapped and external plugin are being invoked to validate attributes in the request environment, it can cause conflicts. """ ), ) password = cfg.StrOpt( # nosec : This is the name of the plugin, not 'password', # a password that needs to be protected. help=utils.fmt( """ Entry point for the password auth plugin module in the `keystone.auth.password` namespace. You do not need to set this unless you are overriding keystone's own password authentication plugin. """ ), ) token = cfg.StrOpt( 'token', help=utils.fmt( """ Entry point for the token auth plugin module in the `keystone.auth.token` namespace. You do not need to set this unless you are overriding keystone's own token authentication plugin. """ ), ) # deals with REMOTE_USER authentication external = cfg.StrOpt( 'external', help=utils.fmt( """ Entry point for the external (`REMOTE_USER`) auth plugin module in the `keystone.auth.external` namespace. Supplied drivers are `DefaultDomain` and `Domain`. The default driver is `DefaultDomain`, which assumes that all users identified by the username specified to keystone in the `REMOTE_USER` variable exist within the context of the default domain. The `Domain` option expects an additional environment variable be presented to keystone, `REMOTE_DOMAIN`, containing the domain name of the `REMOTE_USER` (if `REMOTE_DOMAIN` is not set, then the default domain will be used instead). You do not need to set this unless you are taking advantage of "external authentication", where the application server (such as Apache) is handling authentication instead of keystone. """ ), ) oauth1 = cfg.StrOpt( 'oauth1', help=utils.fmt( """ Entry point for the OAuth 1.0a auth plugin module in the `keystone.auth.oauth1` namespace. You do not need to set this unless you are overriding keystone's own `oauth1` authentication plugin. """ ), ) mapped = cfg.StrOpt( 'mapped', help=utils.fmt( """ Entry point for the mapped auth plugin module in the `keystone.auth.mapped` namespace. You do not need to set this unless you are overriding keystone's own `mapped` authentication plugin. """ ), ) application_credential = cfg.StrOpt( 'application_credential', help=utils.fmt( """ Entry point for the application_credential auth plugin module in the `keystone.auth.application_credential` namespace. You do not need to set this unless you are overriding keystone's own `application_credential` authentication plugin. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ methods, password, token, external, oauth1, mapped, application_credential, ] def _register_auth_plugin_opt(conf, option): conf.register_opt(option, group=GROUP_NAME) def setup_authentication(conf=None): """Register non-default auth methods (used by extensions, etc).""" # register any non-default auth methods here (used by extensions, etc) if conf is None: conf = cfg.CONF for method_name in conf.auth.methods: if method_name not in constants._DEFAULT_AUTH_METHODS: option = cfg.StrOpt(method_name) _register_auth_plugin_opt(conf, option) def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) setup_authentication(conf) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/catalog.py0000664000175000017500000000466200000000000020245 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils template_file = cfg.StrOpt( 'template_file', default='default_catalog.templates', help=utils.fmt( """ Absolute path to the file used for the templated catalog backend. This option is only used if the `[catalog] driver` is set to `templated`. """ ), ) driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the catalog driver in the `keystone.catalog` namespace. Keystone provides a `sql` option (which supports basic CRUD operations through SQL), a `templated` option (which loads the catalog from a templated catalog file on disk), and a `endpoint_filter.sql` option (which supports arbitrary service catalogs per project). """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for catalog caching. This has no effect unless global caching is enabled. In a typical deployment, there is no reason to disable this. """ ), ) cache_time = cfg.IntOpt( 'cache_time', help=utils.fmt( """ Time to cache catalog data (in seconds). This has no effect unless global and catalog caching are both enabled. Catalog data (services, endpoints, etc.) typically does not change frequently, and so a longer duration than the global default may be desirable. """ ), ) list_limit = cfg.IntOpt( 'list_limit', help=utils.fmt( """ Maximum number of entities that will be returned in a catalog collection. There is typically no reason to set this, as it would be unusual for a deployment to have enough services or endpoints to exceed a reasonable limit. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ template_file, driver, caching, cache_time, list_limit, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/constants.py0000664000175000017500000000163700000000000020646 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Constants for use in the keystone.conf package. These constants are shared by more than one module in the keystone.conf package. """ _DEFAULT_AUTH_METHODS = [ 'external', 'password', 'token', 'oauth1', 'mapped', 'application_credential', ] _CERTFILE = '/etc/keystone/ssl/certs/signing_cert.pem' _KEYFILE = '/etc/keystone/ssl/private/signing_key.pem' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/credential.py0000664000175000017500000000604000000000000020735 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the credential backend driver in the `keystone.credential` namespace. Keystone only provides a `sql` driver, so there's no reason to change this unless you are providing a custom entry point. """ ), ) provider = cfg.StrOpt( 'provider', default='fernet', help=utils.fmt( """ Entry point for credential encryption and decryption operations in the `keystone.credential.provider` namespace. Keystone only provides a `fernet` driver, so there's no reason to change this unless you are providing a custom entry point to encrypt and decrypt credentials. """ ), ) key_repository = cfg.StrOpt( 'key_repository', default='/etc/keystone/credential-keys/', help=utils.fmt( """ Directory containing Fernet keys used to encrypt and decrypt credentials stored in the credential backend. Fernet keys used to encrypt credentials have no relationship to Fernet keys used to encrypt Fernet tokens. Both sets of keys should be managed separately and require different rotation policies. Do not share this repository with the repository used to manage keys for Fernet tokens. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for caching only on retrieval of user credentials. This has no effect unless global caching is enabled. """ ), ) cache_time = cfg.IntOpt( 'cache_time', help=utils.fmt( """ Time to cache credential data in seconds. This has no effect unless global caching is enabled. """ ), ) auth_ttl = cfg.IntOpt( 'auth_ttl', default=15, help=utils.fmt( """ The length of time in minutes for which a signed EC2 or S3 token request is valid from the timestamp contained in the token request. """ ), ) user_limit = cfg.IntOpt( 'user_limit', default=-1, help=utils.fmt( """ Maximum number of credentials a user is permitted to create. A value of -1 means unlimited. If a limit is not set, users are permitted to create credentials at will, which could lead to bloat in the keystone database or open keystone to a DoS attack. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, provider, key_repository, caching, cache_time, auth_ttl, user_limit, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/conf/default.py0000664000175000017500000001370300000000000020253 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils admin_token = cfg.StrOpt( 'admin_token', secret=True, help=utils.fmt( """ Using this feature is *NOT* recommended. Instead, use the `keystone-manage bootstrap` command. The value of this option is treated as a "shared secret" that can be used to bootstrap Keystone through the API. This "token" does not represent a user (it has no identity), and carries no explicit authorization (it effectively bypasses most authorization checks). If set to `None`, the value is ignored and the `admin_token` middleware is effectively disabled. """ ), ) public_endpoint = cfg.URIOpt( 'public_endpoint', help=utils.fmt( """ The base public endpoint URL for Keystone that is advertised to clients (NOTE: this does NOT affect how Keystone listens for connections). Defaults to the base host URL of the request. For example, if keystone receives a request to `http://server:5000/v3/users`, then this will option will be automatically treated as `http://server:5000`. You should only need to set option if either the value of the base URL contains a path that keystone does not automatically infer (`/prefix/v3`), or if the endpoint should be found on a different host. """ ), ) max_project_tree_depth = cfg.IntOpt( 'max_project_tree_depth', default=5, help=utils.fmt( """ Maximum depth of the project hierarchy, excluding the project acting as a domain at the top of the hierarchy. WARNING: Setting it to a large value may adversely impact performance. """ ), ) max_param_size = cfg.IntOpt( 'max_param_size', default=64, help=utils.fmt( """ Limit the sizes of user & project ID/names. """ ), ) # NOTE(breton): 255 is the size of the database columns used for ID fields. # This size is picked so that the tokens can be indexed in-place as opposed to # being entries in a string table. Thus, this is a performance decision. max_token_size = cfg.IntOpt( 'max_token_size', default=255, help=utils.fmt( """ Similar to `[DEFAULT] max_param_size`, but provides an exception for token values. With Fernet tokens, this can be set as low as 255. """ ), ) list_limit = cfg.IntOpt( 'list_limit', help=utils.fmt( """ The maximum number of entities that will be returned in a collection. This global limit may be then overridden for a specific driver, by specifying a list_limit in the appropriate section (for example, `[assignment]`). No limit is set by default. In larger deployments, it is recommended that you set this to a reasonable number to prevent operations like listing all users and projects from placing an unnecessary load on the system. """ ), ) strict_password_check = cfg.BoolOpt( 'strict_password_check', default=False, help=utils.fmt( """ If set to true, strict password length checking is performed for password manipulation. If a password exceeds the maximum length, the operation will fail with an HTTP 403 Forbidden error. If set to false, passwords are automatically truncated to the maximum length. """ ), ) insecure_debug = cfg.BoolOpt( 'insecure_debug', default=False, help=utils.fmt( """ If set to true, then the server will return information in HTTP responses that may allow an unauthenticated or authenticated user to get more information than normal, such as additional details about why authentication failed. This may be useful for debugging but is insecure. """ ), ) default_publisher_id = cfg.StrOpt( 'default_publisher_id', help=utils.fmt( """ Default `publisher_id` for outgoing notifications. If left undefined, Keystone will default to using the server's host name. """ ), ) notification_format = cfg.StrOpt( 'notification_format', default='cadf', choices=['basic', 'cadf'], help=utils.fmt( """ Define the notification format for identity service events. A `basic` notification only has information about the resource being operated on. A `cadf` notification has the same information, as well as information about the initiator of the event. The `cadf` option is entirely backwards compatible with the `basic` option, but is fully CADF-compliant, and is recommended for auditing use cases. """ ), ) notification_opt_out = cfg.MultiStrOpt( 'notification_opt_out', default=["identity.authenticate.success", "identity.authenticate.pending"], help=utils.fmt( """ You can reduce the number of notifications keystone emits by explicitly opting out. Keystone will not emit notifications that match the patterns expressed in this list. Values are expected to be in the form of `identity..`. By default, all notifications related to authentication are automatically suppressed. This field can be set multiple times in order to opt-out of multiple notification topics. For example, the following suppresses notifications describing user creation or successful authentication events: notification_opt_out=identity.user.create notification_opt_out=identity.authenticate.success """ ), ) GROUP_NAME = 'DEFAULT' ALL_OPTS = [ admin_token, public_endpoint, max_project_tree_depth, max_param_size, max_token_size, list_limit, strict_password_check, insecure_debug, default_publisher_id, notification_format, notification_opt_out, ] def register_opts(conf): conf.register_opts(ALL_OPTS) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/domain_config.py0000664000175000017500000000463700000000000021431 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the domain-specific configuration driver in the `keystone.resource.domain_config` namespace. Only a `sql` option is provided by keystone, so there is no reason to set this unless you are providing a custom entry point. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for caching of the domain-specific configuration backend. This has no effect unless global caching is enabled. There is normally no reason to disable this. """ ), ) cache_time = cfg.IntOpt( 'cache_time', default=300, help=utils.fmt( """ Time-to-live (TTL, in seconds) to cache domain-specific configuration data. This has no effect unless `[domain_config] caching` is enabled. """ ), ) additional_whitelisted_options = cfg.Opt( 'additional_whitelisted_options', type=cfg.types.Dict(value_type=cfg.types.List(bounds=True)), help=utils.fmt( """ Additional whitelisted domain-specific options for out-of-tree drivers. This is a dictonary of lists with the key being the group name and value a list of group options.""" ), ) additional_sensitive_options = cfg.Opt( 'additional_sensitive_options', type=cfg.types.Dict(value_type=cfg.types.List(bounds=True)), help=utils.fmt( """ Additional sensitive domain-specific options for out-of-tree drivers. This is a dictonary of lists with the key being the group name and value a list of group options.""" ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, caching, cache_time, additional_whitelisted_options, additional_sensitive_options, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/endpoint_filter.py0000664000175000017500000000312300000000000022007 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the endpoint filter driver in the `keystone.endpoint_filter` namespace. Only a `sql` option is provided by keystone, so there is no reason to set this unless you are providing a custom entry point. """ ), ) return_all_endpoints_if_no_filter = cfg.BoolOpt( 'return_all_endpoints_if_no_filter', default=True, help=utils.fmt( """ This controls keystone's behavior if the configured endpoint filters do not result in any endpoints for a user + project pair (and therefore a potentially empty service catalog). If set to true, keystone will return the entire service catalog. If set to false, keystone will return an empty service catalog. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, return_all_endpoints_if_no_filter, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/endpoint_policy.py0000664000175000017500000000213600000000000022024 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the endpoint policy driver in the `keystone.endpoint_policy` namespace. Only a `sql` driver is provided by keystone, so there is no reason to set this unless you are providing a custom entry point. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/federation.py0000664000175000017500000001141400000000000020744 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import versionutils from keystone.conf import utils _DEPRECATED_MSG = utils.fmt( """ This option has been superseded by ephemeral users existing in the domain of their identity provider. """ ) driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the federation backend driver in the `keystone.federation` namespace. Keystone only provides a `sql` driver, so there is no reason to set this option unless you are providing a custom entry point. """ ), ) assertion_prefix = cfg.StrOpt( 'assertion_prefix', default='', help=utils.fmt( """ Prefix to use when filtering environment variable names for federated assertions. Matched variables are passed into the federated mapping engine. """ ), ) remote_id_attribute = cfg.StrOpt( 'remote_id_attribute', help=utils.fmt( """ Default value for all protocols to be used to obtain the entity ID of the Identity Provider from the environment. For `mod_shib`, this would be `Shib-Identity-Provider`. For `mod_auth_openidc`, this could be `HTTP_OIDC_ISS`. For `mod_auth_mellon`, this could be `MELLON_IDP`. This can be overridden on a per-protocol basis by providing a `remote_id_attribute` to the federation protocol using the API. """ ), ) federated_domain_name = cfg.StrOpt( 'federated_domain_name', default='Federated', deprecated_for_removal=True, deprecated_reason=_DEPRECATED_MSG, deprecated_since=versionutils.deprecated.TRAIN, help=utils.fmt( """ An arbitrary domain name that is reserved to allow federated ephemeral users to have a domain concept. Note that an admin will not be able to create a domain with this name or update an existing domain to this name. You are not advised to change this value unless you really have to. """ ), ) trusted_dashboard = cfg.MultiStrOpt( 'trusted_dashboard', default=[], help=utils.fmt( """ A list of trusted dashboard hosts. Before accepting a Single Sign-On request to return a token, the origin host must be a member of this list. This configuration option may be repeated for multiple values. You must set this in order to use web-based SSO flows. For example: trusted_dashboard=https://acme.example.com/auth/websso trusted_dashboard=https://beta.example.com/auth/websso """ ), ) sso_callback_template = cfg.StrOpt( 'sso_callback_template', default='/etc/keystone/sso_callback_template.html', help=utils.fmt( """ Absolute path to an HTML file used as a Single Sign-On callback handler. This page is expected to redirect the user from keystone back to a trusted dashboard host, by form encoding a token in a POST request. Keystone's default value should be sufficient for most deployments. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for federation caching. This has no effect unless global caching is enabled. There is typically no reason to disable this. """ ), ) default_authorization_ttl = cfg.IntOpt( 'default_authorization_ttl', default=0, help=utils.fmt( """ Default time in minutes for the validity of group memberships carried over from a mapping. Default is 0, which means disabled. """ ), ) attribute_mapping_default_schema_version = cfg.StrOpt( 'attribute_mapping_default_schema_version', default='1.0', help=utils.fmt( """ The attribute mapping default schema version to be used, if the attribute mapping being registered does not have a schema version. One must bear in mind that changing this value will have no effect on attribute mappings that were previously registered when another default value was applied. Once registered, one needs to update the attribute mapping schema via the update API to be able to change an attribute mapping schema version. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, assertion_prefix, remote_id_attribute, federated_domain_name, trusted_dashboard, sso_callback_template, caching, default_authorization_ttl, attribute_mapping_default_schema_version, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/fernet_receipts.py0000664000175000017500000000566500000000000022020 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils key_repository = cfg.StrOpt( 'key_repository', default='/etc/keystone/fernet-keys/', help=utils.fmt( """ Directory containing Fernet receipt keys. This directory must exist before using `keystone-manage fernet_setup` for the first time, must be writable by the user running `keystone-manage fernet_setup` or `keystone-manage fernet_rotate`, and of course must be readable by keystone's server process. The repository may contain keys in one of three states: a single staged key (always index 0) used for receipt validation, a single primary key (always the highest index) used for receipt creation and validation, and any number of secondary keys (all other index values) used for receipt validation. With multiple keystone nodes, each node must share the same key repository contents, with the exception of the staged key (index 0). It is safe to run `keystone-manage fernet_rotate` once on any one node to promote a staged key (index 0) to be the new primary (incremented from the previous highest index), and produce a new staged key (a new key with index 0); the resulting repository can then be atomically replicated to other nodes without any risk of race conditions (for example, it is safe to run `keystone-manage fernet_rotate` on host A, wait any amount of time, create a tarball of the directory on host A, unpack it on host B to a temporary location, and atomically move (`mv`) the directory into place on host B). Running `keystone-manage fernet_rotate` *twice* on a key repository without syncing other nodes will result in receipts that can not be validated by all nodes. """ ), ) max_active_keys = cfg.IntOpt( 'max_active_keys', default=3, min=1, help=utils.fmt( """ This controls how many keys are held in rotation by `keystone-manage fernet_rotate` before they are discarded. The default value of 3 means that keystone will maintain one staged key (always index 0), one primary key (the highest numerical index), and one secondary key (every other index). Increasing this value means that additional secondary keys will be kept in the rotation. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ key_repository, max_active_keys, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/fernet_tokens.py0000664000175000017500000000560500000000000021477 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils key_repository = cfg.StrOpt( 'key_repository', default='/etc/keystone/fernet-keys/', help=utils.fmt( """ Directory containing Fernet token keys. This directory must exist before using `keystone-manage fernet_setup` for the first time, must be writable by the user running `keystone-manage fernet_setup` or `keystone-manage fernet_rotate`, and of course must be readable by keystone's server process. The repository may contain keys in one of three states: a single staged key (always index 0) used for token validation, a single primary key (always the highest index) used for token creation and validation, and any number of secondary keys (all other index values) used for token validation. With multiple keystone nodes, each node must share the same key repository contents, with the exception of the staged key (index 0). It is safe to run `keystone-manage fernet_rotate` once on any one node to promote a staged key (index 0) to be the new primary (incremented from the previous highest index), and produce a new staged key (a new key with index 0); the resulting repository can then be atomically replicated to other nodes without any risk of race conditions (for example, it is safe to run `keystone-manage fernet_rotate` on host A, wait any amount of time, create a tarball of the directory on host A, unpack it on host B to a temporary location, and atomically move (`mv`) the directory into place on host B). Running `keystone-manage fernet_rotate` *twice* on a key repository without syncing other nodes will result in tokens that can not be validated by all nodes. """ ), ) max_active_keys = cfg.IntOpt( 'max_active_keys', default=3, min=1, help=utils.fmt( """ This controls how many keys are held in rotation by `keystone-manage fernet_rotate` before they are discarded. The default value of 3 means that keystone will maintain one staged key (always index 0), one primary key (the highest numerical index), and one secondary key (every other index). Increasing this value means that additional secondary keys will be kept in the rotation. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ key_repository, max_active_keys, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/identity.py0000664000175000017500000001556300000000000020466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import passlib.utils from keystone.conf import utils default_domain_id = cfg.StrOpt( 'default_domain_id', default='default', help=utils.fmt( """ This references the domain to use for all Identity API v2 requests (which are not aware of domains). A domain with this ID can optionally be created for you by `keystone-manage bootstrap`. The domain referenced by this ID cannot be deleted on the v3 API, to prevent accidentally breaking the v2 API. There is nothing special about this domain, other than the fact that it must exist to order to maintain support for your v2 clients. There is typically no reason to change this value. """ ), ) domain_specific_drivers_enabled = cfg.BoolOpt( 'domain_specific_drivers_enabled', default=False, help=utils.fmt( """ A subset (or all) of domains can have their own identity driver, each with their own partial configuration options, stored in either the resource backend or in a file in a domain configuration directory (depending on the setting of `[identity] domain_configurations_from_database`). Only values specific to the domain need to be specified in this manner. This feature is disabled by default, but may be enabled by default in a future release; set to true to enable. """ ), ) domain_configurations_from_database = cfg.BoolOpt( 'domain_configurations_from_database', default=False, help=utils.fmt( """ By default, domain-specific configuration data is read from files in the directory identified by `[identity] domain_config_dir`. Enabling this configuration option allows you to instead manage domain-specific configurations through the API, which are then persisted in the backend (typically, a SQL database), rather than using configuration files on disk. """ ), ) domain_config_dir = cfg.StrOpt( 'domain_config_dir', default='/etc/keystone/domains', help=utils.fmt( """ Absolute path where keystone should locate domain-specific `[identity]` configuration files. This option has no effect unless `[identity] domain_specific_drivers_enabled` is set to true. There is typically no reason to change this value. """ ), ) driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the identity backend driver in the `keystone.identity` namespace. Keystone provides a `sql` and `ldap` driver. This option is also used as the default driver selection (along with the other configuration variables in this section) in the event that `[identity] domain_specific_drivers_enabled` is enabled, but no applicable domain-specific configuration is defined for the domain in question. Unless your deployment primarily relies on `ldap` AND is not using domain-specific configuration, you should typically leave this set to `sql`. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for identity caching. This has no effect unless global caching is enabled. There is typically no reason to disable this. """ ), ) cache_time = cfg.IntOpt( 'cache_time', default=600, help=utils.fmt( """ Time to cache identity data (in seconds). This has no effect unless global and identity caching are enabled. """ ), ) max_password_length = cfg.IntOpt( 'max_password_length', default=4096, max=passlib.utils.MAX_PASSWORD_SIZE, help=utils.fmt( """ Maximum allowed length for user passwords. Decrease this value to improve performance. Changing this value does not effect existing passwords. This value can also be overridden by certain hashing algorithms maximum allowed length which takes precedence over the configured value. The bcrypt max_password_length is 72 bytes. """ ), ) list_limit = cfg.IntOpt( 'list_limit', help=utils.fmt( """ Maximum number of entities that will be returned in an identity collection. """ ), ) password_hash_algorithm = cfg.StrOpt( 'password_hash_algorithm', choices=['bcrypt', 'bcrypt_sha256', 'scrypt', 'pbkdf2_sha512'], default='bcrypt', help=utils.fmt( """ The password hashing algorithm to use for passwords stored within keystone. """ ), ) password_hash_rounds = cfg.IntOpt( 'password_hash_rounds', help=utils.fmt( """ This option represents a trade off between security and performance. Higher values lead to slower performance, but higher security. Changing this option will only affect newly created passwords as existing password hashes already have a fixed number of rounds applied, so it is safe to tune this option in a running cluster. The default for bcrypt is 12, must be between 4 and 31, inclusive. The default for scrypt is 16, must be within `range(1,32)`. The default for pbkdf_sha512 is 60000, must be within `range(1,1<<32)` WARNING: If using scrypt, increasing this value increases BOTH time AND memory requirements to hash a password. """ ), ) salt_bytesize = cfg.IntOpt( 'salt_bytesize', min=0, max=96, help=utils.fmt( """ Number of bytes to use in scrypt and pbkfd2_sha512 hashing salt. Default for scrypt is 16 bytes. Default for pbkfd2_sha512 is 16 bytes. Limited to a maximum of 96 bytes due to the size of the column used to store password hashes. """ ), ) scrypt_block_size = cfg.IntOpt( 'scrypt_block_size', help=utils.fmt( """ Optional block size to pass to scrypt hash function (the `r` parameter). Useful for tuning scrypt to optimal performance for your CPU architecture. This option is only used when the `password_hash_algorithm` option is set to `scrypt`. Defaults to 8. """ ), ) scrypt_paralellism = cfg.IntOpt( 'scrypt_parallelism', help=utils.fmt( """ Optional parallelism to pass to scrypt hash function (the `p` parameter). This option is only used when the `password_hash_algorithm` option is set to `scrypt`. Defaults to 1. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ default_domain_id, domain_specific_drivers_enabled, domain_configurations_from_database, domain_config_dir, driver, caching, cache_time, max_password_length, list_limit, password_hash_algorithm, password_hash_rounds, scrypt_block_size, scrypt_paralellism, salt_bytesize, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/identity_mapping.py0000664000175000017500000000544200000000000022174 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the identity mapping backend driver in the `keystone.identity.id_mapping` namespace. Keystone only provides a `sql` driver, so there is no reason to change this unless you are providing a custom entry point. """ ), ) generator = cfg.StrOpt( 'generator', default='sha256', help=utils.fmt( """ Entry point for the public ID generator for user and group entities in the `keystone.identity.id_generator` namespace. The Keystone identity mapper only supports generators that produce 64 bytes or less. Keystone only provides a `sha256` entry point, so there is no reason to change this value unless you're providing a custom entry point. """ ), ) backward_compatible_ids = cfg.BoolOpt( 'backward_compatible_ids', default=True, help=utils.fmt( """ The format of user and group IDs changed in Juno for backends that do not generate UUIDs (for example, LDAP), with keystone providing a hash mapping to the underlying attribute in LDAP. By default this mapping is disabled, which ensures that existing IDs will not change. Even when the mapping is enabled by using domain-specific drivers (`[identity] domain_specific_drivers_enabled`), any users and groups from the default domain being handled by LDAP will still not be mapped to ensure their IDs remain backward compatible. Setting this value to false will enable the new mapping for all backends, including the default LDAP driver. It is only guaranteed to be safe to enable this option if you do not already have assignments for users and groups from the default LDAP domain, and you consider it to be acceptable for Keystone to provide the different IDs to clients than it did previously (existing IDs in the API will suddenly change). Typically this means that the only time you can set this value to false is when configuring a fresh installation, although that is the recommended value. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, generator, backward_compatible_ids, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/jwt_tokens.py0000664000175000017500000000450500000000000021016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils jws_public_key_repository = cfg.StrOpt( 'jws_public_key_repository', default='/etc/keystone/jws-keys/public', help=utils.fmt( """ Directory containing public keys for validating JWS token signatures. This directory must exist in order for keystone's server process to start. It must also be readable by keystone's server process. It must contain at least one public key that corresponds to a private key in `keystone.conf [jwt_tokens] jws_private_key_repository`. This option is only applicable in deployments issuing JWS tokens and setting `keystone.conf [token] provider = jws`. """ ), ) jws_private_key_repository = cfg.StrOpt( 'jws_private_key_repository', default='/etc/keystone/jws-keys/private', help=utils.fmt( """ Directory containing private keys for signing JWS tokens. This directory must exist in order for keystone's server process to start. It must also be readable by keystone's server process. It must contain at least one private key that corresponds to a public key in `keystone.conf [jwt_tokens] jws_public_key_repository`. In the event there are multiple private keys in this directory, keystone will use a key named `private.pem` to sign tokens. In the future, keystone may support the ability to sign tokens with multiple private keys. For now, only a key named `private.pem` within this directory is required to issue JWS tokens. This option is only applicable in deployments issuing JWS tokens and setting `keystone.conf [token] provider = jws`. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [jws_public_key_repository, jws_private_key_repository] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/ldap.py0000664000175000017500000004331000000000000017544 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils url = cfg.StrOpt( 'url', default='ldap://localhost', help=utils.fmt( """ URL(s) for connecting to the LDAP server. Multiple LDAP URLs may be specified as a comma separated string. The first URL to successfully bind is used for the connection. """ ), ) randomize_urls = cfg.BoolOpt( 'randomize_urls', default=False, help=utils.fmt( """ Randomize the order of URLs in each keystone process. This makes the failure behavior more gradual, since if the first server is down, a process/thread will wait for the specified timeout before attempting a connection to a server further down the list. This defaults to False, for backward compatibility. """ ), ) user = cfg.StrOpt( 'user', help=utils.fmt( """ The user name of the administrator bind DN to use when querying the LDAP server, if your LDAP server requires it. """ ), ) password = cfg.StrOpt( 'password', secret=True, help=utils.fmt( """ The password of the administrator bind DN to use when querying the LDAP server, if your LDAP server requires it. """ ), ) suffix = cfg.StrOpt( 'suffix', default='cn=example,cn=com', help=utils.fmt( """ The default LDAP server suffix to use, if a DN is not defined via either `[ldap] user_tree_dn` or `[ldap] group_tree_dn`. """ ), ) query_scope = cfg.StrOpt( 'query_scope', default='one', choices=['one', 'sub'], help=utils.fmt( """ The search scope which defines how deep to search within the search base. A value of `one` (representing `oneLevel` or `singleLevel`) indicates a search of objects immediately below to the base object, but does not include the base object itself. A value of `sub` (representing `subtree` or `wholeSubtree`) indicates a search of both the base object itself and the entire subtree below it. """ ), ) page_size = cfg.IntOpt( 'page_size', default=0, min=0, help=utils.fmt( """ Defines the maximum number of results per page that keystone should request from the LDAP server when listing objects. A value of zero (`0`) disables paging. """ ), ) alias_dereferencing = cfg.StrOpt( 'alias_dereferencing', default='default', choices=['never', 'searching', 'always', 'finding', 'default'], help=utils.fmt( """ The LDAP dereferencing option to use for queries involving aliases. A value of `default` falls back to using default dereferencing behavior configured by your `ldap.conf`. A value of `never` prevents aliases from being dereferenced at all. A value of `searching` dereferences aliases only after name resolution. A value of `finding` dereferences aliases only during name resolution. A value of `always` dereferences aliases in all cases. """ ), ) debug_level = cfg.IntOpt( 'debug_level', min=-1, help=utils.fmt( """ Sets the LDAP debugging level for LDAP calls. A value of 0 means that debugging is not enabled. This value is a bitmask, consult your LDAP documentation for possible values. """ ), ) chase_referrals = cfg.BoolOpt( 'chase_referrals', help=utils.fmt( """ Sets keystone's referral chasing behavior across directory partitions. If left unset, the system's default behavior will be used. """ ), ) user_tree_dn = cfg.StrOpt( 'user_tree_dn', help=utils.fmt( """ The search base to use for users. Defaults to `ou=Users` with the `[ldap] suffix` appended to it. """ ), ) user_filter = cfg.StrOpt( 'user_filter', help=utils.fmt( """ The LDAP search filter to use for users. """ ), ) user_objectclass = cfg.StrOpt( 'user_objectclass', default='inetOrgPerson', help=utils.fmt( """ The LDAP object class to use for users. """ ), ) user_id_attribute = cfg.StrOpt( 'user_id_attribute', default='cn', help=utils.fmt( """ The LDAP attribute mapped to user IDs in keystone. This must NOT be a multivalued attribute. User IDs are expected to be globally unique across keystone domains and URL-safe. """ ), ) user_name_attribute = cfg.StrOpt( 'user_name_attribute', default='sn', help=utils.fmt( """ The LDAP attribute mapped to user names in keystone. User names are expected to be unique only within a keystone domain and are not expected to be URL-safe. """ ), ) user_description_attribute = cfg.StrOpt( 'user_description_attribute', default='description', help=utils.fmt( """ The LDAP attribute mapped to user descriptions in keystone. """ ), ) user_mail_attribute = cfg.StrOpt( 'user_mail_attribute', default='mail', help=utils.fmt( """ The LDAP attribute mapped to user emails in keystone. """ ), ) user_pass_attribute = cfg.StrOpt( 'user_pass_attribute', default='userPassword', help=utils.fmt( """ The LDAP attribute mapped to user passwords in keystone. """ ), ) user_enabled_attribute = cfg.StrOpt( 'user_enabled_attribute', default='enabled', help=utils.fmt( """ The LDAP attribute mapped to the user enabled attribute in keystone. If setting this option to `userAccountControl`, then you may be interested in setting `[ldap] user_enabled_mask` and `[ldap] user_enabled_default` as well. """ ), ) user_enabled_invert = cfg.BoolOpt( 'user_enabled_invert', default=False, help=utils.fmt( """ Logically negate the boolean value of the enabled attribute obtained from the LDAP server. Some LDAP servers use a boolean lock attribute where "true" means an account is disabled. Setting `[ldap] user_enabled_invert = true` will allow these lock attributes to be used. This option will have no effect if either the `[ldap] user_enabled_mask` or `[ldap] user_enabled_emulation` options are in use. """ ), ) user_enabled_mask = cfg.IntOpt( 'user_enabled_mask', default=0, min=0, help=utils.fmt( """ Bitmask integer to select which bit indicates the enabled value if the LDAP server represents "enabled" as a bit on an integer rather than as a discrete boolean. A value of `0` indicates that the mask is not used. If this is not set to `0` the typical value is `2`. This is typically used when `[ldap] user_enabled_attribute = userAccountControl`. Setting this option causes keystone to ignore the value of `[ldap] user_enabled_invert`. """ ), ) user_enabled_default = cfg.StrOpt( 'user_enabled_default', default='True', help=utils.fmt( """ The default value to enable users. This should match an appropriate integer value if the LDAP server uses non-boolean (bitmask) values to indicate if a user is enabled or disabled. If this is not set to `True`, then the typical value is `512`. This is typically used when `[ldap] user_enabled_attribute = userAccountControl`. """ ), ) user_attribute_ignore = cfg.ListOpt( 'user_attribute_ignore', default=['default_project_id'], help=utils.fmt( """ List of user attributes to ignore on create and update, or whether a specific user attribute should be filtered for list or show user. """ ), ) user_default_project_id_attribute = cfg.StrOpt( 'user_default_project_id_attribute', help=utils.fmt( """ The LDAP attribute mapped to a user's default_project_id in keystone. This is most commonly used when keystone has write access to LDAP. """ ), ) user_enabled_emulation = cfg.BoolOpt( 'user_enabled_emulation', default=False, help=utils.fmt( """ If enabled, keystone uses an alternative method to determine if a user is enabled or not by checking if they are a member of the group defined by the `[ldap] user_enabled_emulation_dn` option. Enabling this option causes keystone to ignore the value of `[ldap] user_enabled_invert`. """ ), ) user_enabled_emulation_dn = cfg.StrOpt( 'user_enabled_emulation_dn', help=utils.fmt( """ DN of the group entry to hold enabled users when using enabled emulation. Setting this option has no effect unless `[ldap] user_enabled_emulation` is also enabled. """ ), ) user_enabled_emulation_use_group_config = cfg.BoolOpt( 'user_enabled_emulation_use_group_config', default=False, help=utils.fmt( """ Use the `[ldap] group_member_attribute` and `[ldap] group_objectclass` settings to determine membership in the emulated enabled group. Enabling this option has no effect unless `[ldap] user_enabled_emulation` is also enabled. """ ), ) user_additional_attribute_mapping = cfg.ListOpt( 'user_additional_attribute_mapping', default=[], help=utils.fmt( """ A list of LDAP attribute to keystone user attribute pairs used for mapping additional attributes to users in keystone. The expected format is `:`, where `ldap_attr` is the attribute in the LDAP object and `user_attr` is the attribute which should appear in the identity API. """ ), ) group_tree_dn = cfg.StrOpt( 'group_tree_dn', help=utils.fmt( """ The search base to use for groups. Defaults to `ou=UserGroups` with the `[ldap] suffix` appended to it. """ ), ) group_filter = cfg.StrOpt( 'group_filter', help=utils.fmt( """ The LDAP search filter to use for groups. """ ), ) group_objectclass = cfg.StrOpt( 'group_objectclass', default='groupOfNames', help=utils.fmt( """ The LDAP object class to use for groups. If setting this option to `posixGroup`, you may also be interested in enabling the `[ldap] group_members_are_ids` option. """ ), ) group_id_attribute = cfg.StrOpt( 'group_id_attribute', default='cn', help=utils.fmt( """ The LDAP attribute mapped to group IDs in keystone. This must NOT be a multivalued attribute. Group IDs are expected to be globally unique across keystone domains and URL-safe. """ ), ) group_name_attribute = cfg.StrOpt( 'group_name_attribute', default='ou', help=utils.fmt( """ The LDAP attribute mapped to group names in keystone. Group names are expected to be unique only within a keystone domain and are not expected to be URL-safe. """ ), ) group_member_attribute = cfg.StrOpt( 'group_member_attribute', default='member', help=utils.fmt( """ The LDAP attribute used to indicate that a user is a member of the group. """ ), ) group_members_are_ids = cfg.BoolOpt( 'group_members_are_ids', default=False, help=utils.fmt( """ Enable this option if the members of the group object class are keystone user IDs rather than LDAP DNs. This is the case when using `posixGroup` as the group object class in Open Directory. """ ), ) group_desc_attribute = cfg.StrOpt( 'group_desc_attribute', default='description', help=utils.fmt( """ The LDAP attribute mapped to group descriptions in keystone. """ ), ) group_attribute_ignore = cfg.ListOpt( 'group_attribute_ignore', default=[], help=utils.fmt( """ List of group attributes to ignore on create and update. or whether a specific group attribute should be filtered for list or show group. """ ), ) group_additional_attribute_mapping = cfg.ListOpt( 'group_additional_attribute_mapping', default=[], help=utils.fmt( """ A list of LDAP attribute to keystone group attribute pairs used for mapping additional attributes to groups in keystone. The expected format is `:`, where `ldap_attr` is the attribute in the LDAP object and `group_attr` is the attribute which should appear in the identity API. """ ), ) group_ad_nesting = cfg.BoolOpt( 'group_ad_nesting', default=False, help=utils.fmt( """ If enabled, group queries will use Active Directory specific filters for nested groups. """ ), ) tls_cacertfile = cfg.StrOpt( 'tls_cacertfile', help=utils.fmt( """ An absolute path to a CA certificate file to use when communicating with LDAP servers. This option will take precedence over `[ldap] tls_cacertdir`, so there is no reason to set both. """ ), ) tls_cacertdir = cfg.StrOpt( 'tls_cacertdir', help=utils.fmt( """ An absolute path to a CA certificate directory to use when communicating with LDAP servers. There is no reason to set this option if you've also set `[ldap] tls_cacertfile`. """ ), ) use_tls = cfg.BoolOpt( 'use_tls', default=False, help=utils.fmt( """ Enable TLS when communicating with LDAP servers. You should also set the `[ldap] tls_cacertfile` and `[ldap] tls_cacertdir` options when using this option. Do not set this option if you are using LDAP over SSL (LDAPS) instead of TLS. """ ), ) tls_req_cert = cfg.StrOpt( 'tls_req_cert', default='demand', choices=['demand', 'never', 'allow'], help=utils.fmt( """ Specifies which checks to perform against client certificates on incoming TLS sessions. If set to `demand`, then a certificate will always be requested and required from the LDAP server. If set to `allow`, then a certificate will always be requested but not required from the LDAP server. If set to `never`, then a certificate will never be requested. """ ), ) connection_timeout = cfg.IntOpt( 'connection_timeout', default=-1, min=-1, help=utils.fmt( """ The connection timeout to use with the LDAP server. A value of `-1` means that connections will never timeout. """ ), ) use_pool = cfg.BoolOpt( 'use_pool', default=True, help=utils.fmt( """ Enable LDAP connection pooling for queries to the LDAP server. There is typically no reason to disable this. """ ), ) pool_size = cfg.IntOpt( 'pool_size', default=10, min=1, help=utils.fmt( """ The size of the LDAP connection pool. This option has no effect unless `[ldap] use_pool` is also enabled. """ ), ) pool_retry_max = cfg.IntOpt( 'pool_retry_max', default=3, min=1, help=utils.fmt( """ The maximum number of times to attempt connecting to the LDAP server before aborting. A value of one makes only one connection attempt. This option has no effect unless `[ldap] use_pool` is also enabled. """ ), ) pool_retry_delay = cfg.FloatOpt( 'pool_retry_delay', default=0.1, help=utils.fmt( """ The number of seconds to wait before attempting to reconnect to the LDAP server. This option has no effect unless `[ldap] use_pool` is also enabled. """ ), ) pool_connection_timeout = cfg.IntOpt( 'pool_connection_timeout', default=-1, min=-1, help=utils.fmt( """ The connection timeout to use when pooling LDAP connections. A value of `-1` means that connections will never timeout. This option has no effect unless `[ldap] use_pool` is also enabled. """ ), ) pool_connection_lifetime = cfg.IntOpt( 'pool_connection_lifetime', default=600, min=1, help=utils.fmt( """ The maximum connection lifetime to the LDAP server in seconds. When this lifetime is exceeded, the connection will be unbound and removed from the connection pool. This option has no effect unless `[ldap] use_pool` is also enabled. """ ), ) use_auth_pool = cfg.BoolOpt( 'use_auth_pool', default=True, help=utils.fmt( """ Enable LDAP connection pooling for end user authentication. There is typically no reason to disable this. """ ), ) auth_pool_size = cfg.IntOpt( 'auth_pool_size', default=100, min=1, help=utils.fmt( """ The size of the connection pool to use for end user authentication. This option has no effect unless `[ldap] use_auth_pool` is also enabled. """ ), ) auth_pool_connection_lifetime = cfg.IntOpt( 'auth_pool_connection_lifetime', default=60, min=1, help=utils.fmt( """ The maximum end user authentication connection lifetime to the LDAP server in seconds. When this lifetime is exceeded, the connection will be unbound and removed from the connection pool. This option has no effect unless `[ldap] use_auth_pool` is also enabled. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ url, randomize_urls, user, password, suffix, query_scope, page_size, alias_dereferencing, debug_level, chase_referrals, user_tree_dn, user_filter, user_objectclass, user_id_attribute, user_name_attribute, user_description_attribute, user_mail_attribute, user_pass_attribute, user_enabled_attribute, user_enabled_invert, user_enabled_mask, user_enabled_default, user_attribute_ignore, user_default_project_id_attribute, user_enabled_emulation, user_enabled_emulation_dn, user_enabled_emulation_use_group_config, user_additional_attribute_mapping, group_tree_dn, group_filter, group_objectclass, group_id_attribute, group_name_attribute, group_member_attribute, group_members_are_ids, group_desc_attribute, group_attribute_ignore, group_additional_attribute_mapping, group_ad_nesting, tls_cacertfile, tls_cacertdir, use_tls, tls_req_cert, connection_timeout, use_pool, pool_size, pool_retry_max, pool_retry_delay, pool_connection_timeout, pool_connection_lifetime, use_auth_pool, auth_pool_size, auth_pool_connection_lifetime, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/oauth1.py0000664000175000017500000000355700000000000020036 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the OAuth backend driver in the `keystone.oauth1` namespace. Typically, there is no reason to set this option unless you are providing a custom entry point. """ ), ) request_token_duration = cfg.IntOpt( 'request_token_duration', min=0, default=28800, help=utils.fmt( """ Number of seconds for the OAuth Request Token to remain valid after being created. This is the amount of time the user has to authorize the token. Setting this option to zero means that request tokens will last forever. """ ), ) access_token_duration = cfg.IntOpt( 'access_token_duration', min=0, default=86400, help=utils.fmt( """ Number of seconds for the OAuth Access Token to remain valid after being created. This is the amount of time the consumer has to interact with the service provider (which is typically keystone). Setting this option to zero means that access tokens will last forever. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, request_token_duration, access_token_duration, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/oauth2.py0000664000175000017500000000327300000000000020032 0ustar00zuulzuul00000000000000# Copyright 2022 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils oauth2_authn_methods = cfg.ListOpt( 'oauth2_authn_methods', default=['tls_client_auth', 'client_secret_basic'], help=utils.fmt( """ The OAuth2.0 authentication method supported by the system when user obtains an access token through the OAuth2.0 token endpoint. This option can be set to certificate or secret. If the option is not set, the default value is certificate. When the option is set to secret, the OAuth2.0 token endpoint uses client_secret_basic method for authentication, otherwise tls_client_auth method is used for authentication. """ ), ) oauth2_cert_dn_mapping_id = cfg.StrOpt( 'oauth2_cert_dn_mapping_id', default='oauth2_mapping', help=utils.fmt( """ Used to define the mapping rule id. When not set, the mapping rule id is oauth2_mapping. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [oauth2_authn_methods, oauth2_cert_dn_mapping_id] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/opts.py0000664000175000017500000000534000000000000017612 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Single point of entry to generate the sample configuration file. This module collects all the necessary info from the other modules in this package. It is assumed that: * Every other module in this package has a 'list_opts' function which returns a dict where: * The keys are strings which are the group names. * The value of each key is a list of config options for that group. * The conf package doesn't have further packages with config options. * This module is only used in the context of sample file generation. """ import collections import importlib import os import pkgutil LIST_OPTS_FUNC_NAME = 'list_opts' IGNORED_MODULES = ('opts', 'constants', 'utils') def list_opts(): opts = collections.defaultdict(list) module_names = _list_module_names() imported_modules = _import_modules(module_names) _append_config_options(imported_modules, opts) return _tupleize(opts) def _tupleize(d): """Convert a dict of options to the 2-tuple format.""" return [(key, value) for key, value in d.items()] def _list_module_names(): module_names = [] package_path = os.path.dirname(os.path.abspath(__file__)) for _, module_name, ispkg in pkgutil.iter_modules(path=[package_path]): if module_name in IGNORED_MODULES or ispkg: # Skip this module. continue else: module_names.append(module_name) return module_names def _import_modules(module_names): imported_modules = [] for module_name in module_names: full_module_path = '.'.join(__name__.split('.')[:-1] + [module_name]) module = importlib.import_module(full_module_path) if not hasattr(module, LIST_OPTS_FUNC_NAME): raise Exception( "The module '%s' should have a '%s' function which " "returns the config options." % (full_module_path, LIST_OPTS_FUNC_NAME) ) else: imported_modules.append(module) return imported_modules def _append_config_options(imported_modules, config_options): for module in imported_modules: configs = module.list_opts() for key, val in configs.items(): config_options[key].extend(val) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/policy.py0000664000175000017500000000252300000000000020124 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the policy backend driver in the `keystone.policy` namespace. Supplied drivers are `rules` (which does not support any CRUD operations for the v3 policy API) and `sql`. Typically, there is no reason to set this option unless you are providing a custom entry point. """ ), ) list_limit = cfg.IntOpt( 'list_limit', help=utils.fmt( """ Maximum number of entities that will be returned in a policy collection. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, list_limit, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/receipt.py0000664000175000017500000000507000000000000020260 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils expiration = cfg.IntOpt( 'expiration', default=300, min=0, max=86400, help=utils.fmt( """ The amount of time that a receipt should remain valid (in seconds). This value should always be very short, as it represents how long a user has to reattempt auth with the missing auth methods. """ ), ) provider = cfg.StrOpt( 'provider', default='fernet', help=utils.fmt( """ Entry point for the receipt provider in the `keystone.receipt.provider` namespace. The receipt provider controls the receipt construction and validation operations. Keystone includes just the `fernet` receipt provider for now. `fernet` receipts do not need to be persisted at all, but require that you run `keystone-manage fernet_setup` (also see the `keystone-manage fernet_rotate` command). """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for caching receipt creation and validation data. This has no effect unless global caching is enabled, or if cache_on_issue is disabled as we only cache receipts on issue. """ ), ) cache_time = cfg.IntOpt( 'cache_time', default=300, min=0, help=utils.fmt( """ The number of seconds to cache receipt creation and validation data. This has no effect unless both global and `[receipt] caching` are enabled. """ ), ) cache_on_issue = cfg.BoolOpt( 'cache_on_issue', default=True, help=utils.fmt( """ Enable storing issued receipt data to receipt validation cache so that first receipt validation doesn't actually cause full validation cycle. This option has no effect unless global caching and receipt caching are enabled. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ expiration, provider, caching, cache_time, cache_on_issue, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/resource.py0000664000175000017500000000751300000000000020460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the resource driver in the `keystone.resource` namespace. Only a `sql` driver is supplied by keystone. Unless you are writing proprietary drivers for keystone, you do not need to set this option. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, deprecated_opts=[cfg.DeprecatedOpt('caching', group='assignment')], help=utils.fmt( """ Toggle for resource caching. This has no effect unless global caching is enabled. """ ), ) cache_time = cfg.IntOpt( 'cache_time', deprecated_opts=[cfg.DeprecatedOpt('cache_time', group='assignment')], help=utils.fmt( """ Time to cache resource data in seconds. This has no effect unless global caching is enabled. """ ), ) list_limit = cfg.IntOpt( 'list_limit', deprecated_opts=[cfg.DeprecatedOpt('list_limit', group='assignment')], help=utils.fmt( """ Maximum number of entities that will be returned in a resource collection. """ ), ) admin_project_domain_name = cfg.StrOpt( 'admin_project_domain_name', help=utils.fmt( """ Name of the domain that owns the `admin_project_name`. If left unset, then there is no admin project. `[resource] admin_project_name` must also be set to use this option. """ ), ) admin_project_name = cfg.StrOpt( 'admin_project_name', help=utils.fmt( """ This is a special project which represents cloud-level administrator privileges across services. Tokens scoped to this project will contain a true `is_admin_project` attribute to indicate to policy systems that the role assignments on that specific project should apply equally across every project. If left unset, then there is no admin project, and thus no explicit means of cross-project role assignments. `[resource] admin_project_domain_name` must also be set to use this option. """ ), ) project_name_url_safe = cfg.StrOpt( 'project_name_url_safe', choices=['off', 'new', 'strict'], default='off', help=utils.fmt( """ This controls whether the names of projects are restricted from containing URL-reserved characters. If set to `new`, attempts to create or update a project with a URL-unsafe name will fail. If set to `strict`, attempts to scope a token with a URL-unsafe project name will fail, thereby forcing all project names to be updated to be URL-safe. """ ), ) domain_name_url_safe = cfg.StrOpt( 'domain_name_url_safe', choices=['off', 'new', 'strict'], default='off', help=utils.fmt( """ This controls whether the names of domains are restricted from containing URL-reserved characters. If set to `new`, attempts to create or update a domain with a URL-unsafe name will fail. If set to `strict`, attempts to scope a token with a URL-unsafe domain name will fail, thereby forcing all domain names to be updated to be URL-safe. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, caching, cache_time, list_limit, admin_project_domain_name, admin_project_name, project_name_url_safe, domain_name_url_safe, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/revoke.py0000664000175000017500000000365500000000000020127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the token revocation backend driver in the `keystone.revoke` namespace. Keystone only provides a `sql` driver, so there is no reason to set this option unless you are providing a custom entry point. """ ), ) expiration_buffer = cfg.IntOpt( 'expiration_buffer', default=1800, min=0, help=utils.fmt( """ The number of seconds after a token has expired before a corresponding revocation event may be purged from the backend. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for revocation event caching. This has no effect unless global caching is enabled. """ ), ) cache_time = cfg.IntOpt( 'cache_time', default=3600, deprecated_opts=[ cfg.DeprecatedOpt('revocation_cache_time', group='token') ], help=utils.fmt( """ Time to cache the revocation list and the revocation events (in seconds). This has no effect unless global and `[revoke] caching` are both enabled. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, expiration_buffer, caching, cache_time, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/role.py0000664000175000017500000000366400000000000017575 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils # The role driver has no default for backward compatibility reasons. If role # driver is not specified, the assignment driver chooses the backend. driver = cfg.StrOpt( 'driver', help=utils.fmt( """ Entry point for the role backend driver in the `keystone.role` namespace. Keystone only provides a `sql` driver, so there's no reason to change this unless you are providing a custom entry point. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for role caching. This has no effect unless global caching is enabled. In a typical deployment, there is no reason to disable this. """ ), ) cache_time = cfg.IntOpt( 'cache_time', help=utils.fmt( """ Time to cache role data, in seconds. This has no effect unless both global caching and `[role] caching` are enabled. """ ), ) list_limit = cfg.IntOpt( 'list_limit', help=utils.fmt( """ Maximum number of entities that will be returned in a role collection. This may be useful to tune if you have a large number of discrete roles in your deployment. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, caching, cache_time, list_limit, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/saml.py0000664000175000017500000001357700000000000017574 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import constants from keystone.conf import utils assertion_expiration_time = cfg.IntOpt( 'assertion_expiration_time', default=3600, help=utils.fmt( """ Determines the lifetime for any SAML assertions generated by keystone, using `NotOnOrAfter` attributes. """ ), ) xmlsec1_binary = cfg.StrOpt( 'xmlsec1_binary', default='xmlsec1', help=utils.fmt( """ Name of, or absolute path to, the binary to be used for XML signing. Although only the XML Security Library (`xmlsec1`) is supported, it may have a non-standard name or path on your system. If keystone cannot find the binary itself, you may need to install the appropriate package, use this option to specify an absolute path, or adjust keystone's PATH environment variable. """ ), ) certfile = cfg.StrOpt( 'certfile', default=constants._CERTFILE, help=utils.fmt( """ Absolute path to the public certificate file to use for SAML signing. The value cannot contain a comma (`,`). """ ), ) keyfile = cfg.StrOpt( 'keyfile', default=constants._KEYFILE, help=utils.fmt( """ Absolute path to the private key file to use for SAML signing. The value cannot contain a comma (`,`). """ ), ) idp_entity_id = cfg.URIOpt( 'idp_entity_id', max_length=1024, help=utils.fmt( """ This is the unique entity identifier of the identity provider (keystone) to use when generating SAML assertions. This value is required to generate identity provider metadata and must be a URI (a URL is recommended). For example: `https://keystone.example.com/v3/OS-FEDERATION/saml2/idp`. """ ), ) idp_sso_endpoint = cfg.URIOpt( 'idp_sso_endpoint', help=utils.fmt( """ This is the single sign-on (SSO) service location of the identity provider which accepts HTTP POST requests. A value is required to generate identity provider metadata. For example: `https://keystone.example.com/v3/OS-FEDERATION/saml2/sso`. """ ), ) idp_lang = cfg.StrOpt( 'idp_lang', default='en', help=utils.fmt( """ This is the language used by the identity provider's organization. """ ), ) idp_organization_name = cfg.StrOpt( 'idp_organization_name', default='SAML Identity Provider', help=utils.fmt( """ This is the name of the identity provider's organization. """ ), ) idp_organization_display_name = cfg.StrOpt( 'idp_organization_display_name', default='OpenStack SAML Identity Provider', help=utils.fmt( """ This is the name of the identity provider's organization to be displayed. """ ), ) idp_organization_url = cfg.URIOpt( 'idp_organization_url', default='https://example.com/', help=utils.fmt( """ This is the URL of the identity provider's organization. The URL referenced here should be useful to humans. """ ), ) idp_contact_company = cfg.StrOpt( 'idp_contact_company', default='Example, Inc.', help=utils.fmt( """ This is the company name of the identity provider's contact person. """ ), ) idp_contact_name = cfg.StrOpt( 'idp_contact_name', default='SAML Identity Provider Support', help=utils.fmt( """ This is the given name of the identity provider's contact person. """ ), ) idp_contact_surname = cfg.StrOpt( 'idp_contact_surname', default='Support', help=utils.fmt( """ This is the surname of the identity provider's contact person. """ ), ) idp_contact_email = cfg.StrOpt( 'idp_contact_email', default='support@example.com', help=utils.fmt( """ This is the email address of the identity provider's contact person. """ ), ) idp_contact_telephone = cfg.StrOpt( 'idp_contact_telephone', default='+1 800 555 0100', help=utils.fmt( """ This is the telephone number of the identity provider's contact person. """ ), ) idp_contact_type = cfg.StrOpt( 'idp_contact_type', default='other', choices=['technical', 'support', 'administrative', 'billing', 'other'], help=utils.fmt( """ This is the type of contact that best describes the identity provider's contact person. """ ), ) idp_metadata_path = cfg.StrOpt( 'idp_metadata_path', default='/etc/keystone/saml2_idp_metadata.xml', help=utils.fmt( """ Absolute path to the identity provider metadata file. This file should be generated with the `keystone-manage saml_idp_metadata` command. There is typically no reason to change this value. """ ), ) relay_state_prefix = cfg.StrOpt( 'relay_state_prefix', default='ss:mem:', help=utils.fmt( """ The prefix of the RelayState SAML attribute to use when generating enhanced client and proxy (ECP) assertions. In a typical deployment, there is no reason to change this value. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ assertion_expiration_time, xmlsec1_binary, certfile, keyfile, idp_entity_id, idp_sso_endpoint, idp_lang, idp_organization_name, idp_organization_display_name, idp_organization_url, idp_contact_company, idp_contact_name, idp_contact_surname, idp_contact_email, idp_contact_telephone, idp_contact_type, idp_metadata_path, relay_state_prefix, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/security_compliance.py0000664000175000017500000001360300000000000022667 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils disable_user_account_days_inactive = cfg.IntOpt( 'disable_user_account_days_inactive', min=1, help=utils.fmt( """ The maximum number of days a user can go without authenticating before being considered "inactive" and automatically disabled (locked). This feature is disabled by default; set any value to enable it. This feature depends on the `sql` backend for the `[identity] driver`. When a user exceeds this threshold and is considered "inactive", the user's `enabled` attribute in the HTTP API may not match the value of the user's `enabled` column in the user table. """ ), ) lockout_failure_attempts = cfg.IntOpt( 'lockout_failure_attempts', min=1, help=utils.fmt( """ The maximum number of times that a user can fail to authenticate before the user account is locked for the number of seconds specified by `[security_compliance] lockout_duration`. This feature is disabled by default. If this feature is enabled and `[security_compliance] lockout_duration` is not set, then users may be locked out indefinitely until the user is explicitly enabled via the API. This feature depends on the `sql` backend for the `[identity] driver`. """ ), ) lockout_duration = cfg.IntOpt( 'lockout_duration', default=1800, min=1, help=utils.fmt( """ The number of seconds a user account will be locked when the maximum number of failed authentication attempts (as specified by `[security_compliance] lockout_failure_attempts`) is exceeded. Setting this option will have no effect unless you also set `[security_compliance] lockout_failure_attempts` to a non-zero value. This feature depends on the `sql` backend for the `[identity] driver`. """ ), ) password_expires_days = cfg.IntOpt( 'password_expires_days', min=1, help=utils.fmt( """ The number of days for which a password will be considered valid before requiring it to be changed. This feature is disabled by default. If enabled, new password changes will have an expiration date, however existing passwords would not be impacted. This feature depends on the `sql` backend for the `[identity] driver`. """ ), ) unique_last_password_count = cfg.IntOpt( 'unique_last_password_count', default=0, min=0, help=utils.fmt( """ This controls the number of previous user password iterations to keep in history, in order to enforce that newly created passwords are unique. The total number which includes the new password should not be greater or equal to this value. Setting the value to zero (the default) disables this feature. Thus, to enable this feature, values must be greater than 0. This feature depends on the `sql` backend for the `[identity] driver`. """ ), ) minimum_password_age = cfg.IntOpt( 'minimum_password_age', default=0, min=0, help=utils.fmt( """ The number of days that a password must be used before the user can change it. This prevents users from changing their passwords immediately in order to wipe out their password history and reuse an old password. This feature does not prevent administrators from manually resetting passwords. It is disabled by default and allows for immediate password changes. This feature depends on the `sql` backend for the `[identity] driver`. Note: If `[security_compliance] password_expires_days` is set, then the value for this option should be less than the `password_expires_days`. """ ), ) password_regex = cfg.StrOpt( 'password_regex', help=utils.fmt( r""" The regular expression used to validate password strength requirements. By default, the regular expression will match any password. The following is an example of a pattern which requires at least 1 letter, 1 digit, and have a minimum length of 7 characters: ^(?=.*\\\d)(?=.*[a-zA-Z]).{7,}$ This feature depends on the `sql` backend for the `[identity] driver`. """ ), ) # noqa: W605 password_regex_description = cfg.StrOpt( 'password_regex_description', help=utils.fmt( """ Describe your password regular expression here in language for humans. If a password fails to match the regular expression, the contents of this configuration variable will be returned to users to explain why their requested password was insufficient. """ ), ) change_password_upon_first_use = cfg.BoolOpt( 'change_password_upon_first_use', default=False, help=utils.fmt( """ Enabling this option requires users to change their password when the user is created, or upon administrative reset. Before accessing any services, affected users will have to change their password. To ignore this requirement for specific users, such as service users, set the `options` attribute `ignore_change_password_upon_first_use` to `True` for the desired user via the update user API. This feature is disabled by default. This feature is only applicable with the `sql` backend for the `[identity] driver`. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ disable_user_account_days_inactive, lockout_failure_attempts, lockout_duration, password_expires_days, unique_last_password_count, minimum_password_age, password_regex, password_regex_description, change_password_upon_first_use, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/shadow_users.py0000664000175000017500000000234400000000000021334 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the shadow users backend driver in the `keystone.identity.shadow_users` namespace. This driver is used for persisting local user references to externally-managed identities (via federation, LDAP, etc). Keystone only provides a `sql` driver, so there is no reason to change this option unless you are providing a custom entry point. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/token.py0000664000175000017500000001232500000000000017746 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_log import versionutils from keystone.conf import utils expiration = cfg.IntOpt( 'expiration', default=3600, min=0, max=sys.maxsize, help=utils.fmt( """ The amount of time that a token should remain valid (in seconds). Drastically reducing this value may break "long-running" operations that involve multiple services to coordinate together, and will force users to authenticate with keystone more frequently. Drastically increasing this value will increase the number of tokens that will be simultaneously valid. Keystone tokens are also bearer tokens, so a shorter duration will also reduce the potential security impact of a compromised token. """ ), ) provider = cfg.StrOpt( 'provider', default='fernet', help=utils.fmt( """ Entry point for the token provider in the `keystone.token.provider` namespace. The token provider controls the token construction, validation, and revocation operations. Supported upstream providers are `fernet` and `jws`. Neither `fernet` or `jws` tokens require persistence and both require additional setup. If using `fernet`, you're required to run `keystone-manage fernet_setup`, which creates symmetric keys used to encrypt tokens. If using `jws`, you're required to generate an ECDSA keypair using a SHA-256 hash algorithm for signing and validating token, which can be done with `keystone-manage create_jws_keypair`. Note that `fernet` tokens are encrypted and `jws` tokens are only signed. Please be sure to consider this if your deployment has security requirements regarding payload contents used to generate token IDs. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for caching token creation and validation data. This has no effect unless global caching is enabled. """ ), ) cache_time = cfg.IntOpt( 'cache_time', min=0, max=sys.maxsize, help=utils.fmt( """ The number of seconds to cache token creation and validation data. This has no effect unless both global and `[token] caching` are enabled. """ ), ) revoke_by_id = cfg.BoolOpt( 'revoke_by_id', default=True, help=utils.fmt( """ This toggles support for revoking individual tokens by the token identifier and thus various token enumeration operations (such as listing all tokens issued to a specific user). These operations are used to determine the list of tokens to consider revoked. Do not disable this option if you're using the `kvs` `[revoke] driver`. """ ), ) allow_rescope_scoped_token = cfg.BoolOpt( 'allow_rescope_scoped_token', default=True, help=utils.fmt( """ This toggles whether scoped tokens may be re-scoped to a new project or domain, thereby preventing users from exchanging a scoped token (including those with a default project scope) for any other token. This forces users to either authenticate for unscoped tokens (and later exchange that unscoped token for tokens with a more specific scope) or to provide their credentials in every request for a scoped token to avoid re-scoping altogether. """ ), ) cache_on_issue = cfg.BoolOpt( 'cache_on_issue', default=True, deprecated_since=versionutils.deprecated.STEIN, deprecated_reason=utils.fmt( """ Keystone already exposes a configuration option for caching tokens. Having a separate configuration option to cache tokens when they are issued is redundant, unnecessarily complicated, and is misleading if token caching is disabled because tokens will still be pre-cached by default when they are issued. The ability to pre-cache tokens when they are issued is going to rely exclusively on the ``keystone.conf [token] caching`` option in the future. """ ), deprecated_for_removal=True, help=utils.fmt( """ Enable storing issued token data to token validation cache so that first token validation doesn't actually cause full validation cycle. This option has no effect unless global caching is enabled and will still cache tokens even if `[token] caching = False`. """ ), ) allow_expired_window = cfg.IntOpt( 'allow_expired_window', default=48 * 60 * 60, help=utils.fmt( """ This controls the number of seconds that a token can be retrieved for beyond the built-in expiry time. This allows long running operations to succeed. Defaults to two days. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ expiration, provider, caching, cache_time, revoke_by_id, allow_rescope_scoped_token, cache_on_issue, allow_expired_window, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/tokenless_auth.py0000664000175000017500000000461000000000000021654 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils trusted_issuer = cfg.MultiStrOpt( 'trusted_issuer', default=[], help=utils.fmt( """ The list of distinguished names which identify trusted issuers of client certificates allowed to use X.509 tokenless authorization. If the option is absent then no certificates will be allowed. The format for the values of a distinguished name (DN) must be separated by a comma and contain no spaces. Furthermore, because an individual DN may contain commas, this configuration option may be repeated multiple times to represent multiple values. For example, keystone.conf would include two consecutive lines in order to trust two different DNs, such as `trusted_issuer = CN=john,OU=keystone,O=openstack` and `trusted_issuer = CN=mary,OU=eng,O=abc`. """ ), ) protocol = cfg.StrOpt( 'protocol', default='x509', help=utils.fmt( """ The federated protocol ID used to represent X.509 tokenless authorization. This is used in combination with the value of `[tokenless_auth] issuer_attribute` to find a corresponding federated mapping. In a typical deployment, there is no reason to change this value. """ ), ) issuer_attribute = cfg.StrOpt( 'issuer_attribute', default='SSL_CLIENT_I_DN', help=utils.fmt( """ The name of the WSGI environment variable used to pass the issuer of the client certificate to keystone. This attribute is used as an identity provider ID for the X.509 tokenless authorization along with the protocol to look up its corresponding mapping. In a typical deployment, there is no reason to change this value. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ trusted_issuer, protocol, issuer_attribute, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/totp.py0000664000175000017500000000203400000000000017610 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils included_previous_windows = cfg.IntOpt( 'included_previous_windows', default=1, min=0, max=10, help=utils.fmt( """ The number of previous windows to check when processing TOTP passcodes. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ included_previous_windows, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/trust.py0000664000175000017500000000334100000000000020005 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils allow_redelegation = cfg.BoolOpt( 'allow_redelegation', default=False, help=utils.fmt( """ Allows authorization to be redelegated from one user to another, effectively chaining trusts together. When disabled, the `remaining_uses` attribute of a trust is constrained to be zero. """ ), ) max_redelegation_count = cfg.IntOpt( 'max_redelegation_count', default=3, help=utils.fmt( """ Maximum number of times that authorization can be redelegated from one user to another in a chain of trusts. This number may be reduced further for a specific trust. """ ), ) driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the trust backend driver in the `keystone.trust` namespace. Keystone only provides a `sql` driver, so there is no reason to change this unless you are providing a custom entry point. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ allow_redelegation, max_redelegation_count, driver, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/unified_limit.py0000664000175000017500000000444200000000000021450 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils driver = cfg.StrOpt( 'driver', default='sql', help=utils.fmt( """ Entry point for the unified limit backend driver in the `keystone.unified_limit` namespace. Keystone only provides a `sql` driver, so there's no reason to change this unless you are providing a custom entry point. """ ), ) caching = cfg.BoolOpt( 'caching', default=True, help=utils.fmt( """ Toggle for unified limit caching. This has no effect unless global caching is enabled. In a typical deployment, there is no reason to disable this. """ ), ) cache_time = cfg.IntOpt( 'cache_time', help=utils.fmt( """ Time to cache unified limit data, in seconds. This has no effect unless both global caching and `[unified_limit] caching` are enabled. """ ), ) list_limit = cfg.IntOpt( 'list_limit', help=utils.fmt( """ Maximum number of entities that will be returned in a unified limit collection. This may be useful to tune if you have a large number of unified limits in your deployment. """ ), ) enforcement_model = cfg.StrOpt( 'enforcement_model', default='flat', choices=['flat', 'strict_two_level'], help=utils.fmt( """ The enforcement model to use when validating limits associated to projects. Enforcement models will behave differently depending on the existing limits, which may result in backwards incompatible changes if a model is switched in a running deployment. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ driver, caching, cache_time, list_limit, enforcement_model, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/utils.py0000664000175000017500000000216400000000000017766 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def fmt(docstr): """Format a docstring for use as documentation in sample config.""" # Replace newlines with spaces, as docstrings contain literal newlines that # should not be rendered into the sample configuration file (instead, line # wrappings should be applied automatically). docstr = docstr.replace('\n', ' ') # Because it's common for docstrings to begin and end with a newline, there # is now whitespace at the beginning and end of the documentation as a side # effect of replacing newlines with spaces. docstr = docstr.strip() return docstr ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/conf/wsgi.py0000664000175000017500000000341400000000000017576 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from keystone.conf import utils debug_middleware = cfg.BoolOpt( 'debug_middleware', default=False, help=utils.fmt( """ If set to true, this enables the oslo debug middleware in Keystone. This Middleware prints a lot of information about the request and the response. It is useful for getting information about the data on the wire (decoded) and passed to the WSGI application pipeline. This middleware has no effect on the "debug" setting in the [DEFAULT] section of the config file or setting Keystone's log-level to "DEBUG"; it is specific to debugging the WSGI data as it enters and leaves Keystone (specific request-related data). This option is used for introspection on the request and response data between the web server (apache, nginx, etc) and Keystone. This middleware is inserted as the first element in the middleware chain and will show the data closest to the wire. WARNING: NOT INTENDED FOR USE IN PRODUCTION. THIS MIDDLEWARE CAN AND WILL EMIT SENSITIVE/PRIVILEGED DATA. """ ), ) GROUP_NAME = __name__.split('.')[-1] ALL_OPTS = [ debug_middleware, ] def register_opts(conf): conf.register_opts(ALL_OPTS, group=GROUP_NAME) def list_opts(): return {GROUP_NAME: ALL_OPTS} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5181139 keystone-26.0.0/keystone/credential/0000775000175000017500000000000000000000000017436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/__init__.py0000664000175000017500000000125200000000000021547 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.credential.core import * # noqa from keystone.credential import provider # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5181139 keystone-26.0.0/keystone/credential/backends/0000775000175000017500000000000000000000000021210 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/backends/__init__.py0000664000175000017500000000000000000000000023307 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/backends/base.py0000664000175000017500000000752700000000000022507 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from oslo_log import log from keystone import exception LOG = log.getLogger(__name__) class CredentialDriverBase(metaclass=abc.ABCMeta): # credential crud @abc.abstractmethod def create_credential(self, credential_id, credential): """Create a new credential. :raises keystone.exception.Conflict: If a duplicate credential exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_credentials(self, hints): """List all credentials. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: a list of credential_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_credentials_for_user(self, user_id, type=None): """List credentials for a user. :param user_id: ID of a user to filter credentials by. :param type: type of credentials to filter on. :returns: a list of credential_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_credential(self, credential_id): """Get a credential by ID. :returns: credential_ref :raises keystone.exception.CredentialNotFound: If credential doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_credential(self, credential_id, credential): """Update an existing credential. :raises keystone.exception.CredentialNotFound: If credential doesn't exist. :raises keystone.exception.Conflict: If a duplicate credential exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_credential(self, credential_id): """Delete an existing credential. :raises keystone.exception.CredentialNotFound: If credential doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_credentials_for_project(self, project_id): """Delete all credentials for a project.""" self._delete_credentials(lambda cr: cr['project_id'] == project_id) @abc.abstractmethod def delete_credentials_for_user(self, user_id): """Delete all credentials for a user.""" self._delete_credentials(lambda cr: cr['user_id'] == user_id) def _delete_credentials(self, match_fn): """Do the actual credential deletion work (default implementation). :param match_fn: function that takes a credential dict as the parameter and returns true or false if the identifier matches the credential dict. """ for cr in self.list_credentials(): if match_fn(cr): try: self.credential_api.delete_credential(cr['id']) except exception.CredentialNotFound: LOG.debug( 'Deletion of credential is not required: %s', cr['id'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/backends/sql.py0000664000175000017500000001134600000000000022366 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import api as oslo_db_api from sqlalchemy.ext.hybrid import hybrid_property from keystone.common import driver_hints from keystone.common import sql from keystone.credential.backends import base from keystone import exception class CredentialModel(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'credential' attributes = [ 'id', 'user_id', 'project_id', 'encrypted_blob', 'type', 'key_hash', ] id = sql.Column(sql.String(64), primary_key=True) user_id = sql.Column(sql.String(64), nullable=False) project_id = sql.Column(sql.String(64)) _encrypted_blob = sql.Column('encrypted_blob', sql.Text(), nullable=False) type = sql.Column(sql.String(255), nullable=False) key_hash = sql.Column(sql.String(64), nullable=False) extra = sql.Column(sql.JsonBlob()) @hybrid_property def encrypted_blob(self): return self._encrypted_blob @encrypted_blob.setter # type: ignore[no-redef] def encrypted_blob(self, encrypted_blob): # Make sure to hand over the encrypted credential as a string value # to the backend driver to avoid the sql drivers (esp. psycopg2) # treating this as binary data and e.g. hex-escape it. if isinstance(encrypted_blob, bytes): encrypted_blob = encrypted_blob.decode('utf-8') self._encrypted_blob = encrypted_blob class Credential(base.CredentialDriverBase): # credential crud @sql.handle_conflicts(conflict_type='credential') def create_credential(self, credential_id, credential): with sql.session_for_write() as session: ref = CredentialModel.from_dict(credential) session.add(ref) return ref.to_dict() @driver_hints.truncated def list_credentials(self, hints): with sql.session_for_read() as session: credentials = session.query(CredentialModel) credentials = sql.filter_limit_query( CredentialModel, credentials, hints ) return [s.to_dict() for s in credentials] def list_credentials_for_user(self, user_id, type=None): with sql.session_for_read() as session: query = session.query(CredentialModel) query = query.filter_by(user_id=user_id) if type: query = query.filter_by(type=type) refs = query.all() return [ref.to_dict() for ref in refs] def _get_credential(self, session, credential_id): ref = session.get(CredentialModel, credential_id) if ref is None: raise exception.CredentialNotFound(credential_id=credential_id) return ref def get_credential(self, credential_id): with sql.session_for_read() as session: return self._get_credential(session, credential_id).to_dict() @sql.handle_conflicts(conflict_type='credential') def update_credential(self, credential_id, credential): with sql.session_for_write() as session: ref = self._get_credential(session, credential_id) old_dict = ref.to_dict() for k in credential: old_dict[k] = credential[k] new_credential = CredentialModel.from_dict(old_dict) for attr in CredentialModel.attributes: if attr != 'id': setattr(ref, attr, getattr(new_credential, attr)) ref.extra = new_credential.extra return ref.to_dict() def delete_credential(self, credential_id): with sql.session_for_write() as session: ref = self._get_credential(session, credential_id) session.delete(ref) def delete_credentials_for_project(self, project_id): with sql.session_for_write() as session: query = session.query(CredentialModel) query = query.filter_by(project_id=project_id) query.delete() @oslo_db_api.wrap_db_retry(retry_on_deadlock=True) def delete_credentials_for_user(self, user_id): with sql.session_for_write() as session: query = session.query(CredentialModel) query = query.filter_by(user_id=user_id) query.delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/core.py0000664000175000017500000002141500000000000020743 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Credential service.""" import json from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import notifications CONF = keystone.conf.CONF MEMOIZE = cache.get_memoization_decorator(group='credential') PROVIDERS = provider_api.ProviderAPIs class Manager(manager.Manager): """Default pivot point for the Credential backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.credential' _provides_api = 'credential_api' _CRED = 'credential' def __init__(self): super().__init__(CONF.credential.driver) def _decrypt_credential(self, credential): """Return a decrypted credential reference.""" if credential['type'] == 'ec2': decrypted_blob = json.loads( PROVIDERS.credential_provider_api.decrypt( credential['encrypted_blob'], ) ) else: decrypted_blob = PROVIDERS.credential_provider_api.decrypt( credential['encrypted_blob'] ) credential['blob'] = decrypted_blob credential.pop('key_hash', None) credential.pop('encrypted_blob', None) return credential def _encrypt_credential(self, credential): """Return an encrypted credential reference.""" credential_copy = credential.copy() if credential.get('type', None) == 'ec2': # NOTE(lbragstad): When dealing with ec2 credentials, it's possible # for the `blob` to be a dictionary. Let's make sure we are # encrypting a string otherwise encryption will fail. encrypted_blob, key_hash = ( PROVIDERS.credential_provider_api.encrypt( json.dumps(credential['blob']) ) ) else: encrypted_blob, key_hash = ( PROVIDERS.credential_provider_api.encrypt(credential['blob']) ) credential_copy['encrypted_blob'] = encrypted_blob credential_copy['key_hash'] = key_hash credential_copy.pop('blob', None) return credential_copy def _assert_limit_not_exceeded(self, user_id): user_limit = CONF.credential.user_limit if user_limit >= 0: cred_count = len(self.list_credentials_for_user(user_id)) if cred_count >= user_limit: raise exception.CredentialLimitExceeded(limit=user_limit) @manager.response_truncated def list_credentials(self, hints=None): credentials = self.driver.list_credentials( hints or driver_hints.Hints() ) for credential in credentials: credential = self._decrypt_credential(credential) return credentials def list_credentials_for_user(self, user_id, type=None): credentials = self._list_credentials_for_user(user_id, type) for credential in credentials: credential = self._decrypt_credential(credential) return credentials @MEMOIZE def _list_credentials_for_user(self, user_id, type): """List credentials for a specific user.""" return self.driver.list_credentials_for_user(user_id, type) def get_credential(self, credential_id): """Return a credential reference.""" credential = self._get_credential(credential_id) return self._decrypt_credential(credential) @MEMOIZE def _get_credential(self, credential_id): return self.driver.get_credential(credential_id) def create_credential(self, credential_id, credential, initiator=None): """Create a credential.""" credential_copy = self._encrypt_credential(credential) user_id = credential_copy['user_id'] self._assert_limit_not_exceeded(user_id) ref = self.driver.create_credential(credential_id, credential_copy) if MEMOIZE.should_cache(ref): self._get_credential.set(ref, credential_copy, credential_id) self._list_credentials_for_user.invalidate( self, ref['user_id'], ref['type'] ) self._list_credentials_for_user.invalidate( self, ref['user_id'], None ) ref.pop('key_hash', None) ref.pop('encrypted_blob', None) ref['blob'] = credential['blob'] notifications.Audit.created(self._CRED, credential_id, initiator) return ref def _validate_credential_update(self, credential_id, credential): # ec2 credentials require a "project_id" to be functional. Before we # update, check the case where a non-ec2 credential changes its type # to be "ec2", but has no associated "project_id", either in the # request or already set in the database if credential.get('type', '').lower() == 'ec2' and not credential.get( 'project_id' ): existing_cred = self.get_credential(credential_id) if not existing_cred['project_id']: raise exception.ValidationError( attribute='project_id', target='credential' ) def update_credential(self, credential_id, credential): """Update an existing credential.""" self._validate_credential_update(credential_id, credential) if 'blob' in credential: credential_copy = self._encrypt_credential(credential) else: credential_copy = credential.copy() existing_credential = self.get_credential(credential_id) existing_blob = existing_credential['blob'] ref = self.driver.update_credential(credential_id, credential_copy) if MEMOIZE.should_cache(ref): self._get_credential.set(ref, self, credential_id) self._list_credentials_for_user.invalidate( self, ref['user_id'], ref['type'] ) self._list_credentials_for_user.invalidate( self, ref['user_id'], None ) ref.pop('key_hash', None) ref.pop('encrypted_blob', None) # If the update request contains a `blob` attribute - we should return # that in the update response. If not, then we should return the # existing `blob` attribute since it wasn't updated. if credential.get('blob'): ref['blob'] = credential['blob'] else: ref['blob'] = existing_blob return ref def delete_credential(self, credential_id, initiator=None): """Delete a credential.""" cred = self.get_credential(credential_id) self.driver.delete_credential(credential_id) self._get_credential.invalidate(self, credential_id) self._list_credentials_for_user.invalidate( self, cred['user_id'], cred['type'] ) self._list_credentials_for_user.invalidate(self, cred['user_id'], None) notifications.Audit.deleted(self._CRED, credential_id, initiator) def delete_credentials_for_project(self, project_id): """Delete all credentials for a project.""" hints = driver_hints.Hints() hints.add_filter('project_id', project_id) creds = self.driver.list_credentials(hints) self.driver.delete_credentials_for_project(project_id) for cred in creds: self._get_credential.invalidate(self, cred['id']) self._list_credentials_for_user.invalidate( self, cred['user_id'], cred['type'] ) self._list_credentials_for_user.invalidate( self, cred['user_id'], None ) def delete_credentials_for_user(self, user_id): """Delete all credentials for a user.""" creds = self.driver.list_credentials_for_user(user_id) self.driver.delete_credentials_for_user(user_id) for cred in creds: self._get_credential.invalidate(self, cred['id']) self._list_credentials_for_user.invalidate( self, user_id, cred['type'] ) self._list_credentials_for_user.invalidate( self, cred['user_id'], None ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/provider.py0000664000175000017500000000151100000000000021640 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import manager import keystone.conf CONF = keystone.conf.CONF class Manager(manager.Manager): driver_namespace = 'keystone.credential.provider' _provides_api = 'credential_provider_api' def __init__(self): super().__init__(CONF.credential.provider) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5181139 keystone-26.0.0/keystone/credential/providers/0000775000175000017500000000000000000000000021453 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/providers/__init__.py0000664000175000017500000000000000000000000023552 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/providers/core.py0000664000175000017500000000225700000000000022763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc class Provider(metaclass=abc.ABCMeta): """Interface for credential providers that support encryption.""" @abc.abstractmethod def encrypt(self, credential): """Encrypt a credential. :param str credential: credential to encrypt :returns: encrypted credential str :raises: keystone.exception.CredentialEncryptionError """ @abc.abstractmethod def decrypt(self, credential): """Decrypt a credential. :param str credential: credential to decrypt :returns: credential str as plaintext :raises: keystone.exception.CredentialEncryptionError """ ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/credential/providers/fernet/0000775000175000017500000000000000000000000022736 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/providers/fernet/__init__.py0000664000175000017500000000114200000000000025045 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.credential.providers.fernet.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/providers/fernet/core.py0000664000175000017500000001117100000000000024241 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from cryptography import fernet from oslo_log import log from keystone.common import fernet_utils import keystone.conf from keystone.credential.providers import core from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) # NOTE(lbragstad): Credential key rotation operates slightly different than # token key rotation. Each credential holds a hash of the key that encrypted # it. This is important for credential key rotation because it helps us make # sure we don't over-rotate credential keys. During a rotation of credential # keys, if any credential has not been re-encrypted with the current primary # key, we can abandon the key rotation until all credentials have been migrated # to the new primary key. If we don't take this step, it is possible that we # could remove a key used to encrypt credentials, leaving them unrecoverable. # This also means that we don't need to expose a `[credential] max_active_keys` # option through configuration. Instead we will use a global variable and share # that across all places that need to use FernetUtils for credential # encryption. MAX_ACTIVE_KEYS = 3 def get_multi_fernet_keys(): key_utils = fernet_utils.FernetUtils( CONF.credential.key_repository, MAX_ACTIVE_KEYS, 'credential' ) keys = key_utils.load_keys(use_null_key=True) fernet_keys = [fernet.Fernet(key) for key in keys] crypto = fernet.MultiFernet(fernet_keys) return crypto, keys def primary_key_hash(keys): """Calculate a hash of the primary key used for encryption.""" if isinstance(keys[0], str): keys[0] = keys[0].encode('utf-8') # NOTE(lhinds) This is marked as #nosec since bandit will see SHA1 which # is marked as insecure. However, this hash function is used alongside # encrypted blobs to implement HMAC-SHA1, which is currently not insecure # but will still trigger when scanned by bandit. return hashlib.sha1(keys[0]).hexdigest() # nosec class Provider(core.Provider): def encrypt(self, credential): """Attempt to encrypt a plaintext credential. :param credential: a plaintext representation of a credential :returns: an encrypted credential """ crypto, keys = get_multi_fernet_keys() if keys[0] == fernet_utils.NULL_KEY: LOG.warning( 'Encrypting credentials with the null key. Please properly ' 'encrypt credentials using `keystone-manage credential_setup`,' ' `keystone-manage credential_migrate`, and `keystone-manage ' 'credential_rotate`' ) try: return ( crypto.encrypt(credential.encode('utf-8')), primary_key_hash(keys), ) except (TypeError, ValueError) as e: msg = 'Credential could not be encrypted: %s' % str(e) tr_msg = _('Credential could not be encrypted: %s') % str(e) LOG.error(msg) raise exception.CredentialEncryptionError(tr_msg) def decrypt(self, credential): """Attempt to decrypt a credential. :param credential: an encrypted credential string :returns: a decrypted credential """ key_utils = fernet_utils.FernetUtils( CONF.credential.key_repository, MAX_ACTIVE_KEYS, 'credential' ) keys = key_utils.load_keys(use_null_key=True) fernet_keys = [fernet.Fernet(key) for key in keys] crypto = fernet.MultiFernet(fernet_keys) try: if isinstance(credential, str): credential = credential.encode('utf-8') return crypto.decrypt(credential).decode('utf-8') except (fernet.InvalidToken, TypeError, ValueError): msg = ( 'Credential could not be decrypted. Please contact the ' 'administrator' ) tr_msg = _( 'Credential could not be decrypted. Please contact the ' 'administrator' ) LOG.error(msg) raise exception.CredentialEncryptionError(tr_msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/credential/schema.py0000664000175000017500000000260300000000000021251 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. _credential_properties = { 'blob': {'type': 'string'}, 'project_id': {'type': 'string'}, 'type': {'type': 'string'}, 'user_id': {'type': 'string'}, } credential_create = { 'type': 'object', 'properties': _credential_properties, 'additionalProperties': True, 'oneOf': [ { 'title': 'ec2 credential requires project_id', 'required': ['blob', 'type', 'user_id', 'project_id'], 'properties': {'type': {'enum': ['ec2']}}, }, { 'title': 'non-ec2 credential does not require project_id', 'required': ['blob', 'type', 'user_id'], 'properties': {'type': {'not': {'enum': ['ec2']}}}, }, ], } credential_update = { 'type': 'object', 'properties': _credential_properties, 'minProperties': 1, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/endpoint_policy/0000775000175000017500000000000000000000000020523 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/endpoint_policy/__init__.py0000664000175000017500000000112600000000000022634 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.endpoint_policy.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/endpoint_policy/backends/0000775000175000017500000000000000000000000022275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/endpoint_policy/backends/__init__.py0000664000175000017500000000000000000000000024374 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/endpoint_policy/backends/base.py0000664000175000017500000001316400000000000023566 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class EndpointPolicyDriverBase(metaclass=abc.ABCMeta): """Interface description for an Endpoint Policy driver.""" @abc.abstractmethod def create_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): """Create a policy association. :param policy_id: identity of policy that is being associated :type policy_id: string :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param service_id: identity of the service to associate :type service_id: string :param region_id: identity of the region to associate :type region_id: string :returns: None There are three types of association permitted: - Endpoint (in which case service and region must be None) - Service and region (in which endpoint must be None) - Service (in which case endpoint and region must be None) """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): """Check existence of a policy association. :param policy_id: identity of policy that is being associated :type policy_id: string :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param service_id: identity of the service to associate :type service_id: string :param region_id: identity of the region to associate :type region_id: string :raises keystone.exception.PolicyAssociationNotFound: If there is no match for the specified association. :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): """Delete a policy association. :param policy_id: identity of policy that is being associated :type policy_id: string :param endpoint_id: identity of endpoint to associate :type endpoint_id: string :param service_id: identity of the service to associate :type service_id: string :param region_id: identity of the region to associate :type region_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_policy_association( self, endpoint_id=None, service_id=None, region_id=None ): """Get the policy for an explicit association. This method is not exposed as a public API, but is used by get_policy_for_endpoint(). :param endpoint_id: identity of endpoint :type endpoint_id: string :param service_id: identity of the service :type service_id: string :param region_id: identity of the region :type region_id: string :raises keystone.exception.PolicyAssociationNotFound: If there is no match for the specified association. :returns: dict containing policy_id (value is a tuple containing only the policy_id) """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_associations_for_policy(self, policy_id): """List the associations for a policy. This method is not exposed as a public API, but is used by list_endpoints_for_policy(). :param policy_id: identity of policy :type policy_id: string :returns: List of association dicts """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_endpoint(self, endpoint_id): """Remove all the policy associations with the specific endpoint. :param endpoint_id: identity of endpoint to check :type endpoint_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_service(self, service_id): """Remove all the policy associations with the specific service. :param service_id: identity of endpoint to check :type service_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_region(self, region_id): """Remove all the policy associations with the specific region. :param region_id: identity of endpoint to check :type region_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_association_by_policy(self, policy_id): """Remove all the policy associations with the specific policy. :param policy_id: identity of endpoint to check :type policy_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/endpoint_policy/backends/sql.py0000664000175000017500000001405000000000000023446 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import sqlalchemy from keystone.common import sql from keystone.endpoint_policy.backends import base from keystone import exception class PolicyAssociation(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'policy_association' attributes = ['policy_id', 'endpoint_id', 'region_id', 'service_id'] # The id column is never exposed outside this module. It only exists to # provide a primary key, given that the real columns we would like to use # (endpoint_id, service_id, region_id) can be null id = sql.Column(sql.String(64), primary_key=True) policy_id = sql.Column(sql.String(64), nullable=False) endpoint_id = sql.Column(sql.String(64), nullable=True) service_id = sql.Column(sql.String(64), nullable=True) region_id = sql.Column(sql.String(64), nullable=True) __table_args__ = ( sql.UniqueConstraint('endpoint_id', 'service_id', 'region_id'), ) def to_dict(self): """Return the model's attributes as a dictionary. We override the standard method in order to hide the id column, since this only exists to provide the table with a primary key. """ d = {} for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class EndpointPolicy(base.EndpointPolicyDriverBase): def create_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): with sql.session_for_write() as session: try: # See if there is already a row for this association, and if # so, update it with the new policy_id query = session.query(PolicyAssociation) query = query.filter_by(endpoint_id=endpoint_id) query = query.filter_by(service_id=service_id) query = query.filter_by(region_id=region_id) association = query.one() association.policy_id = policy_id except sql.NotFound: association = PolicyAssociation( id=uuid.uuid4().hex, policy_id=policy_id, endpoint_id=endpoint_id, service_id=service_id, region_id=region_id, ) session.add(association) def check_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): sql_constraints = sqlalchemy.and_( PolicyAssociation.policy_id == policy_id, PolicyAssociation.endpoint_id == endpoint_id, PolicyAssociation.service_id == service_id, PolicyAssociation.region_id == region_id, ) # NOTE(henry-nash): Getting a single value to save object # management overhead. with sql.session_for_read() as session: if ( session.query(PolicyAssociation.id) .filter(sql_constraints) .distinct() .count() == 0 ): raise exception.PolicyAssociationNotFound() def delete_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(policy_id=policy_id) query = query.filter_by(endpoint_id=endpoint_id) query = query.filter_by(service_id=service_id) query = query.filter_by(region_id=region_id) query.delete() def get_policy_association( self, endpoint_id=None, service_id=None, region_id=None ): sql_constraints = sqlalchemy.and_( PolicyAssociation.endpoint_id == endpoint_id, PolicyAssociation.service_id == service_id, PolicyAssociation.region_id == region_id, ) try: with sql.session_for_read() as session: policy_id = ( session.query(PolicyAssociation.policy_id) .filter(sql_constraints) .distinct() .one() ) return {'policy_id': policy_id} except sql.NotFound: raise exception.PolicyAssociationNotFound() def list_associations_for_policy(self, policy_id): with sql.session_for_read() as session: query = session.query(PolicyAssociation) query = query.filter_by(policy_id=policy_id) return [ref.to_dict() for ref in query.all()] def delete_association_by_endpoint(self, endpoint_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(endpoint_id=endpoint_id) query.delete() def delete_association_by_service(self, service_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(service_id=service_id) query.delete() def delete_association_by_region(self, region_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(region_id=region_id) query.delete() def delete_association_by_policy(self, policy_id): with sql.session_for_write() as session: query = session.query(PolicyAssociation) query = query.filter_by(policy_id=policy_id) query.delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/endpoint_policy/core.py0000664000175000017500000002735200000000000022036 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from keystone.common import manager from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs class Manager(manager.Manager): """Default pivot point for the Endpoint Policy backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.endpoint_policy' _provides_api = 'endpoint_policy_api' def __init__(self): super().__init__(CONF.endpoint_policy.driver) def _assert_valid_association(self, endpoint_id, service_id, region_id): """Assert that the association is supported. There are three types of association supported: - Endpoint (in which case service and region must be None) - Service and region (in which endpoint must be None) - Service (in which case endpoint and region must be None) """ if ( endpoint_id is not None and service_id is None and region_id is None ): return if ( service_id is not None and region_id is not None and endpoint_id is None ): return if ( service_id is not None and endpoint_id is None and region_id is None ): return raise exception.InvalidPolicyAssociation( endpoint_id=endpoint_id, service_id=service_id, region_id=region_id ) def create_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): self._assert_valid_association(endpoint_id, service_id, region_id) self.driver.create_policy_association( policy_id, endpoint_id, service_id, region_id ) def check_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): self._assert_valid_association(endpoint_id, service_id, region_id) self.driver.check_policy_association( policy_id, endpoint_id, service_id, region_id ) def delete_policy_association( self, policy_id, endpoint_id=None, service_id=None, region_id=None ): self._assert_valid_association(endpoint_id, service_id, region_id) self.driver.delete_policy_association( policy_id, endpoint_id, service_id, region_id ) def list_endpoints_for_policy(self, policy_id): def _get_endpoint(endpoint_id, policy_id): try: return PROVIDERS.catalog_api.get_endpoint(endpoint_id) except exception.EndpointNotFound: msg = ( 'Endpoint %(endpoint_id)s referenced in ' 'association for policy %(policy_id)s not found.' ) LOG.warning( msg, {'policy_id': policy_id, 'endpoint_id': endpoint_id} ) raise def _get_endpoints_for_service(service_id, endpoints): # TODO(henry-nash): Consider optimizing this in the future by # adding an explicit list_endpoints_for_service to the catalog API. return [ep for ep in endpoints if ep['service_id'] == service_id] def _get_endpoints_for_service_and_region( service_id, region_id, endpoints, regions ): # TODO(henry-nash): Consider optimizing this in the future. # The lack of a two-way pointer in the region tree structure # makes this somewhat inefficient. def _recursively_get_endpoints_for_region( region_id, service_id, endpoint_list, region_list, endpoints_found, regions_examined, ): """Recursively search down a region tree for endpoints. :param region_id: the point in the tree to examine :param service_id: the service we are interested in :param endpoint_list: list of all endpoints :param region_list: list of all regions :param endpoints_found: list of matching endpoints found so far - which will be updated if more are found in this iteration :param regions_examined: list of regions we have already looked at - used to spot illegal circular references in the tree to avoid never completing search :returns: list of endpoints that match """ if region_id in regions_examined: msg = ( 'Circular reference or a repeated entry found ' 'in region tree - %(region_id)s.' ) LOG.error(msg, {'region_id': ref.region_id}) return regions_examined.append(region_id) endpoints_found += [ ep for ep in endpoint_list if ep['service_id'] == service_id and ep['region_id'] == region_id ] for region in region_list: if region['parent_region_id'] == region_id: _recursively_get_endpoints_for_region( region['id'], service_id, endpoints, regions, endpoints_found, regions_examined, ) endpoints_found = [] regions_examined = [] # Now walk down the region tree _recursively_get_endpoints_for_region( region_id, service_id, endpoints, regions, endpoints_found, regions_examined, ) return endpoints_found matching_endpoints = [] endpoints = PROVIDERS.catalog_api.list_endpoints() regions = PROVIDERS.catalog_api.list_regions() for ref in self.list_associations_for_policy(policy_id): if ref.get('endpoint_id') is not None: matching_endpoints.append( _get_endpoint(ref['endpoint_id'], policy_id) ) continue if ( ref.get('service_id') is not None and ref.get('region_id') is None ): matching_endpoints += _get_endpoints_for_service( ref['service_id'], endpoints ) continue if ( ref.get('service_id') is not None and ref.get('region_id') is not None ): matching_endpoints += _get_endpoints_for_service_and_region( ref['service_id'], ref['region_id'], endpoints, regions ) continue msg = ( 'Unsupported policy association found - ' 'Policy %(policy_id)s, Endpoint %(endpoint_id)s, ' 'Service %(service_id)s, Region %(region_id)s, ' ) LOG.warning( msg, { 'policy_id': policy_id, 'endpoint_id': ref['endpoint_id'], 'service_id': ref['service_id'], 'region_id': ref['region_id'], }, ) return matching_endpoints def get_policy_for_endpoint(self, endpoint_id): def _get_policy(policy_id, endpoint_id): try: return PROVIDERS.policy_api.get_policy(policy_id) except exception.PolicyNotFound: msg = ( 'Policy %(policy_id)s referenced in association ' 'for endpoint %(endpoint_id)s not found.' ) LOG.warning( msg, {'policy_id': policy_id, 'endpoint_id': endpoint_id} ) raise def _look_for_policy_for_region_and_service(endpoint): """Look in the region and its parents for a policy. Examine the region of the endpoint for a policy appropriate for the service of the endpoint. If there isn't a match, then chase up the region tree to find one. """ region_id = endpoint['region_id'] regions_examined = [] while region_id is not None: try: ref = self.get_policy_association( service_id=endpoint['service_id'], region_id=region_id ) return ref['policy_id'] except exception.PolicyAssociationNotFound: # nosec # There wasn't one for that region & service, handle below. pass # There wasn't one for that region & service, let's # chase up the region tree regions_examined.append(region_id) region = PROVIDERS.catalog_api.get_region(region_id) region_id = None if region.get('parent_region_id') is not None: region_id = region['parent_region_id'] if region_id in regions_examined: msg = ( 'Circular reference or a repeated entry ' 'found in region tree - %(region_id)s.' ) LOG.error(msg, {'region_id': region_id}) break # First let's see if there is a policy explicitly defined for # this endpoint. try: ref = self.get_policy_association(endpoint_id=endpoint_id) return _get_policy(ref['policy_id'], endpoint_id) except exception.PolicyAssociationNotFound: # nosec # There wasn't a policy explicitly defined for this endpoint, # handled below. pass # There wasn't a policy explicitly defined for this endpoint, so # now let's see if there is one for the Region & Service. endpoint = PROVIDERS.catalog_api.get_endpoint(endpoint_id) policy_id = _look_for_policy_for_region_and_service(endpoint) if policy_id is not None: return _get_policy(policy_id, endpoint_id) # Finally, just check if there is one for the service. try: ref = self.get_policy_association( service_id=endpoint['service_id'] ) return _get_policy(ref['policy_id'], endpoint_id) except exception.PolicyAssociationNotFound: # nosec # No policy is associated with endpoint, handled below. pass msg = _('No policy is associated with endpoint %(endpoint_id)s.') % { 'endpoint_id': endpoint_id } raise exception.NotFound(msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/exception.py0000664000175000017500000006371600000000000017711 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import typing as ty from oslo_log import log from oslo_utils import encodeutils import keystone.conf from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) KEYSTONE_API_EXCEPTIONS = set() # Tests use this to make exception message format errors fatal _FATAL_EXCEPTION_FORMAT_ERRORS = False def _format_with_unicode_kwargs(msg_format, kwargs): try: return msg_format % kwargs except UnicodeDecodeError: try: kwargs = {k: encodeutils.safe_decode(v) for k, v in kwargs.items()} except UnicodeDecodeError: # NOTE(jamielennox): This is the complete failure case # at least by showing the template we have some idea # of where the error is coming from return msg_format return msg_format % kwargs class _KeystoneExceptionMeta(type): """Automatically Register the Exceptions in 'KEYSTONE_API_EXCEPTIONS' list. The `KEYSTONE_API_EXCEPTIONS` list is utilized by flask to register a handler to emit sane details when the exception occurs. """ def __new__(mcs, name, bases, class_dict): """Create a new instance and register with KEYSTONE_API_EXCEPTIONS.""" cls = type.__new__(mcs, name, bases, class_dict) KEYSTONE_API_EXCEPTIONS.add(cls) return cls class Error(Exception, metaclass=_KeystoneExceptionMeta): """Base error class. Child classes should define an HTTP status code, title, and a message_format. """ code: ty.Optional[int] = None title: ty.Optional[str] = None message_format: ty.Optional[str] = None def __init__(self, message=None, **kwargs): try: message = self._build_message(message, **kwargs) except KeyError: # if you see this warning in your logs, please raise a bug report if _FATAL_EXCEPTION_FORMAT_ERRORS: raise else: LOG.warning('missing exception kwargs (programmer error)') message = self.message_format super().__init__(message) def _build_message(self, message, **kwargs): """Build and returns an exception message. :raises KeyError: given insufficient kwargs """ if message: return message return _format_with_unicode_kwargs(self.message_format, kwargs) class ValidationError(Error): message_format = _( "Expecting to find %(attribute)s in %(target)s." " The server could not comply with the request" " since it is either malformed or otherwise" " incorrect. The client is assumed to be in error." ) code = int(http.client.BAD_REQUEST) title = http.client.responses[http.client.BAD_REQUEST] class URLValidationError(ValidationError): message_format = _( "Cannot create an endpoint with an invalid URL: %(url)s." ) class PasswordValidationError(ValidationError): message_format = _("Password validation error: %(detail)s.") class PasswordRequirementsValidationError(PasswordValidationError): message_format = _( "The password does not match the requirements: %(detail)s." ) class PasswordHistoryValidationError(PasswordValidationError): message_format = _( "The new password cannot be identical to a " "previous password. The total number which " "includes the new password must be unique is " "%(unique_count)s." ) class PasswordAgeValidationError(PasswordValidationError): message_format = _( "You cannot change your password at this time due " "to the minimum password age. Once you change your " "password, it must be used for %(min_age_days)d day(s) " "before it can be changed. Please try again in " "%(days_left)d day(s) or contact your administrator to " "reset your password." ) class PasswordSelfServiceDisabled(PasswordValidationError): message_format = _( "You cannot change your password at this time due " "to password policy disallowing password changes. " "Please contact your administrator to reset your " "password." ) class SchemaValidationError(ValidationError): # NOTE(lbragstad): For whole OpenStack message consistency, this error # message has been written in a format consistent with WSME. message_format = _("%(detail)s") class ValidationTimeStampError(Error): message_format = _( "Timestamp not in expected format." " The server could not comply with the request" " since it is either malformed or otherwise" " incorrect. The client is assumed to be in error." ) code = int(http.client.BAD_REQUEST) title = http.client.responses[http.client.BAD_REQUEST] class InvalidOperatorError(ValidationError): message_format = _( "The given operator %(_op)s is not valid." " It must be one of the following:" " 'eq', 'neq', 'lt', 'lte', 'gt', or 'gte'." ) class ValidationExpirationError(Error): message_format = _( "The 'expires_at' must not be before now." " The server could not comply with the request" " since it is either malformed or otherwise" " incorrect. The client is assumed to be in error." ) code = int(http.client.BAD_REQUEST) title = http.client.responses[http.client.BAD_REQUEST] class StringLengthExceeded(ValidationError): message_format = _( "String length exceeded. The length of" " string '%(string)s' exceeds the limit" " of column %(type)s(CHAR(%(length)d))." ) class AmbiguityError(ValidationError): message_format = _( "There are multiple %(resource)s entities named" " '%(name)s'. Please use ID instead of names to" " resolve the ambiguity." ) class ApplicationCredentialValidationError(ValidationError): message_format = _("Invalid application credential: %(detail)s") class CircularRegionHierarchyError(Error): message_format = _( "The specified parent region %(parent_region_id)s " "would create a circular region hierarchy." ) code = int(http.client.BAD_REQUEST) title = http.client.responses[http.client.BAD_REQUEST] class ForbiddenNotSecurity(Error): """When you want to return a 403 Forbidden response but not security. Use this for errors where the message is always safe to present to the user and won't give away extra information. """ code = int(http.client.FORBIDDEN) title = http.client.responses[http.client.FORBIDDEN] class PasswordVerificationError(ForbiddenNotSecurity): message_format = _( "The password length must be less than or equal " "to %(size)i. The server could not comply with the " "request because the password is invalid." ) class RegionDeletionError(ForbiddenNotSecurity): message_format = _( "Unable to delete region %(region_id)s because it or " "its child regions have associated endpoints." ) class ApplicationCredentialLimitExceeded(ForbiddenNotSecurity): message_format = _( "Unable to create additional application credentials, " "maximum of %(limit)d already exceeded for user." ) class CredentialLimitExceeded(ForbiddenNotSecurity): message_format = _( "Unable to create additional credentials, maximum " "of %(limit)d already exceeded for user." ) class SecurityError(Error): """Security error exception. Avoids exposing details of security errors, unless in insecure_debug mode. """ amendment = _('(Disable insecure_debug mode to suppress these details.)') def __deepcopy__(self): """Override the default deepcopy. Keystone :class:`keystone.exception.Error` accepts an optional message that will be used when rendering the exception object as a string. If not provided the object's message_format attribute is used instead. :class:`keystone.exception.SecurityError` is a little different in that it only uses the message provided to the initializer when keystone is in `insecure_debug` mode. Instead it will use its `message_format`. This is to ensure that sensitive details are not leaked back to the caller in a production deployment. This dual mode for string rendering causes some odd behaviour when combined with oslo_i18n translation. Any object used as a value for formatting a translated string is deep copied. The copy causes an issue. The deep copy process actually creates a new exception instance with the rendered string. Then when that new instance is rendered as a string to use for substitution a warning is logged. This is because the code tries to use the `message_format` in secure mode, but the required kwargs are not in the deep copy. The end result is not an error because when the KeyError is caught the instance's ``message`` is used instead and this has the properly translated message. The only indication that something is wonky is a message in the warning log. """ return self def _build_message(self, message, **kwargs): """Only returns detailed messages in insecure_debug mode.""" if message and CONF.insecure_debug: if isinstance(message, str): # Only do replacement if message is string. The message is # sometimes a different exception or bytes, which would raise # TypeError. message = _format_with_unicode_kwargs(message, kwargs) return _('%(message)s %(amendment)s') % { 'message': message, 'amendment': self.amendment, } return _format_with_unicode_kwargs(self.message_format, kwargs) class Unauthorized(SecurityError): message_format = _("The request you have made requires authentication.") code = int(http.client.UNAUTHORIZED) title = http.client.responses[http.client.UNAUTHORIZED] class InsufficientAuthMethods(Error): # NOTE(adriant): This is an internal only error that is built into # an auth receipt response. message_format = _( "Insufficient auth methods received for %(user_id)s. " "Auth Methods Provided: %(methods)s." ) code = 401 title = 'Unauthorized' def __init__(self, message=None, user_id=None, methods=None): methods_str = '[%s]' % ','.join(methods) super().__init__(message, user_id=user_id, methods=methods_str) self.user_id = user_id self.methods = methods class ReceiptNotFound(Unauthorized): message_format = _("Could not find auth receipt: %(receipt_id)s.") class PasswordExpired(Unauthorized): message_format = _( "The password is expired and needs to be changed for " "user: %(user_id)s." ) class AuthPluginException(Unauthorized): message_format = _("Authentication plugin error.") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.authentication = {} class UserDisabled(Unauthorized): message_format = _("The account is disabled for user: %(user_id)s.") class AccountLocked(Unauthorized): message_format = _("The account is locked for user: %(user_id)s.") class AuthMethodNotSupported(AuthPluginException): message_format = _("Attempted to authenticate with an unsupported method.") def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.authentication = {'methods': CONF.auth.methods} class ApplicationCredentialAuthError(AuthPluginException): message_format = _( "Error authenticating with application credential: %(detail)s" ) class AdditionalAuthRequired(AuthPluginException): message_format = _("Additional authentications steps required.") def __init__(self, auth_response=None, **kwargs): super().__init__(message=None, **kwargs) self.authentication = auth_response class Forbidden(SecurityError): message_format = _( "You are not authorized to perform the requested action." ) code = int(http.client.FORBIDDEN) title = http.client.responses[http.client.FORBIDDEN] class ForbiddenAction(Forbidden): message_format = _( "You are not authorized to perform the" " requested action: %(action)s." ) class CrossBackendNotAllowed(Forbidden): message_format = _( "Group membership across backend boundaries is not " "allowed. Group in question is %(group_id)s, " "user is %(user_id)s." ) class InvalidPolicyAssociation(Forbidden): message_format = _( "Invalid mix of entities for policy association: " "only Endpoint, Service, or Region+Service allowed. " "Request was - Endpoint: %(endpoint_id)s, " "Service: %(service_id)s, Region: %(region_id)s." ) class InvalidDomainConfig(Forbidden): message_format = _("Invalid domain specific configuration: %(reason)s.") class InvalidLimit(Forbidden): message_format = _("Invalid resource limit: %(reason)s.") class LimitTreeExceedError(Exception): def __init__(self, project_id, max_limit_depth): super().__init__( _( "Keystone cannot start due to project hierarchical depth in the " "current deployment (project_ids: %(project_id)s) exceeds the " "enforcement model's maximum limit of %(max_limit_depth)s. Please " "use a different enforcement model to correct the issue." ) % {'project_id': project_id, 'max_limit_depth': max_limit_depth} ) class NotFound(Error): message_format = _("Could not find: %(target)s.") code = int(http.client.NOT_FOUND) title = http.client.responses[http.client.NOT_FOUND] class EndpointNotFound(NotFound): message_format = _("Could not find endpoint: %(endpoint_id)s.") class PolicyNotFound(NotFound): message_format = _("Could not find policy: %(policy_id)s.") class PolicyAssociationNotFound(NotFound): message_format = _("Could not find policy association.") class RoleNotFound(NotFound): message_format = _("Could not find role: %(role_id)s.") class ImpliedRoleNotFound(NotFound): message_format = _("%(prior_role_id)s does not imply %(implied_role_id)s.") class InvalidImpliedRole(Forbidden): message_format = _("%(role_id)s cannot be an implied roles.") class DomainSpecificRoleMismatch(Forbidden): message_format = _( "Project %(project_id)s must be in the same domain " "as the role %(role_id)s being assigned." ) class DomainSpecificRoleNotWithinIdPDomain(Forbidden): message_format = _( "role: %(role_name)s must be within the same domain as " "the identity provider: %(identity_provider)s." ) class DomainIdInvalid(ValidationError): message_format = _("Domain ID does not conform to required UUID format.") class RoleAssignmentNotFound(NotFound): message_format = _( "Could not find role assignment with role: " "%(role_id)s, user or group: %(actor_id)s, " "project, domain, or system: %(target_id)s." ) class RegionNotFound(NotFound): message_format = _("Could not find region: %(region_id)s.") class ServiceNotFound(NotFound): message_format = _("Could not find service: %(service_id)s.") class DomainNotFound(NotFound): message_format = _("Could not find domain: %(domain_id)s.") class ProjectNotFound(NotFound): message_format = _("Could not find project: %(project_id)s.") class ProjectTagNotFound(NotFound): message_format = _("Could not find project tag: %(project_tag)s.") class TokenNotFound(NotFound): message_format = _("Could not find token: %(token_id)s.") class UserNotFound(NotFound): message_format = _("Could not find user: %(user_id)s.") class GroupNotFound(NotFound): message_format = _("Could not find group: %(group_id)s.") class MappingNotFound(NotFound): message_format = _("Could not find mapping: %(mapping_id)s.") class TrustNotFound(NotFound): message_format = _("Could not find trust: %(trust_id)s.") class TrustUseLimitReached(Forbidden): message_format = _("No remaining uses for trust: %(trust_id)s.") class CredentialNotFound(NotFound): message_format = _("Could not find credential: %(credential_id)s.") class VersionNotFound(NotFound): message_format = _("Could not find version: %(version)s.") class EndpointGroupNotFound(NotFound): message_format = _("Could not find Endpoint Group: %(endpoint_group_id)s.") class IdentityProviderNotFound(NotFound): message_format = _("Could not find Identity Provider: %(idp_id)s.") class ServiceProviderNotFound(NotFound): message_format = _("Could not find Service Provider: %(sp_id)s.") class FederatedProtocolNotFound(NotFound): message_format = _( "Could not find federated protocol %(protocol_id)s for" " Identity Provider: %(idp_id)s." ) class PublicIDNotFound(NotFound): # This is used internally and mapped to either User/GroupNotFound or, # Assertion before the exception leaves Keystone. message_format = "%(id)s" class RegisteredLimitNotFound(NotFound): message_format = _("Could not find registered limit for %(id)s.") class LimitNotFound(NotFound): message_format = _("Could not find limit for %(id)s.") class NoLimitReference(Forbidden): message_format = _( "Unable to create a limit that has no corresponding " "registered limit." ) class RegisteredLimitError(ForbiddenNotSecurity): message_format = _( "Unable to update or delete registered limit %(id)s " "because there are project limits associated with it." ) class DomainConfigNotFound(NotFound): message_format = _( 'Could not find %(group_or_option)s in domain ' 'configuration for domain %(domain_id)s.' ) class ConfigRegistrationNotFound(Exception): # This is used internally between the domain config backend and the # manager, so should not escape to the client. If it did, it is a coding # error on our part, and would end up, appropriately, as a 500 error. pass class ApplicationCredentialNotFound(NotFound): message_format = _( "Could not find Application Credential: " "%(application_credential_id)s." ) class AccessRuleNotFound(NotFound): message_format = _("Could not find Access Rule: %(access_rule_id)s.") class Conflict(Error): message_format = _( "Conflict occurred attempting to store %(type)s - %(details)s." ) code = int(http.client.CONFLICT) title = http.client.responses[http.client.CONFLICT] class UnexpectedError(SecurityError): """Avoids exposing details of failures, unless in insecure_debug mode.""" message_format = _( "An unexpected error prevented the server " "from fulfilling your request." ) debug_message_format = _( "An unexpected error prevented the server " "from fulfilling your request: %(exception)s." ) def _build_message(self, message, **kwargs): # Ensure that exception has a value to be extra defensive for # substitutions and make sure the exception doesn't raise an # exception. kwargs.setdefault('exception', '') return super()._build_message( message or self.debug_message_format, **kwargs ) code = int(http.client.INTERNAL_SERVER_ERROR) title = http.client.responses[http.client.INTERNAL_SERVER_ERROR] class TrustConsumeMaximumAttempt(UnexpectedError): debug_message_format = _( "Unable to consume trust %(trust_id)s. Unable to acquire lock." ) class MalformedEndpoint(UnexpectedError): debug_message_format = _( "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." ) class MappedGroupNotFound(UnexpectedError): debug_message_format = _( "Group %(group_id)s returned by mapping " "%(mapping_id)s was not found in the backend." ) class MetadataFileError(UnexpectedError): debug_message_format = _("Error while reading metadata file: %(reason)s.") class DirectMappingError(UnexpectedError): debug_message_format = _( "Local section in mapping %(mapping_id)s refers " "to a remote match that doesn't exist " "(e.g. {0} in a local section)." ) class AssignmentTypeCalculationError(UnexpectedError): debug_message_format = _( 'Unexpected combination of grant attributes - ' 'User: %(user_id)s, Group: %(group_id)s, Project: %(project_id)s, ' 'Domain: %(domain_id)s.' ) class NotImplemented(Error): message_format = _( "The action you have requested has not been implemented." ) code = int(http.client.NOT_IMPLEMENTED) title = http.client.responses[http.client.NOT_IMPLEMENTED] class Gone(Error): message_format = _( "The service you have requested is no" " longer available on this server." ) code = int(http.client.GONE) title = http.client.responses[http.client.GONE] class ConfigFileNotFound(UnexpectedError): debug_message_format = _( "The Keystone configuration file %(config_file)s " "could not be found." ) class KeysNotFound(UnexpectedError): debug_message_format = _( 'No encryption keys found; run keystone-manage ' 'fernet_setup to bootstrap one.' ) class MultipleSQLDriversInConfig(UnexpectedError): debug_message_format = _( 'The Keystone domain-specific configuration has ' 'specified more than one SQL driver (only one is ' 'permitted): %(source)s.' ) class MigrationNotProvided(Exception): def __init__(self, mod_name, path): super().__init__( _( "%(mod_name)s doesn't provide database migrations. The migration" " repository path at %(path)s doesn't exist or isn't a directory." ) % {'mod_name': mod_name, 'path': path} ) class UnsupportedTokenVersionException(UnexpectedError): debug_message_format = _('Token version is unrecognizable or unsupported.') class SAMLSigningError(UnexpectedError): debug_message_format = _( 'Unable to sign SAML assertion. It is likely ' 'that this server does not have xmlsec1 ' 'installed or this is the result of ' 'misconfiguration. Reason %(reason)s.' ) class OAuthHeadersMissingError(UnexpectedError): debug_message_format = _( 'No Authorization headers found, cannot proceed ' 'with OAuth related calls. If running under ' 'HTTPd or Apache, ensure WSGIPassAuthorization ' 'is set to On.' ) class TokenlessAuthConfigError(ValidationError): message_format = _( 'Could not determine Identity Provider ID. The ' 'configuration option %(issuer_attribute)s ' 'was not found in the request environment.' ) class CredentialEncryptionError(Exception): message_format = _( "An unexpected error prevented the server " "from accessing encrypted credentials." ) class LDAPServerConnectionError(UnexpectedError): debug_message_format = _( 'Unable to establish a connection to LDAP Server (%(url)s).' ) class LDAPInvalidCredentialsError(UnexpectedError): message_format = _( 'Unable to authenticate against Identity backend - ' 'Invalid username or password' ) class LDAPSizeLimitExceeded(UnexpectedError): message_format = _( 'Number of User/Group entities returned by LDAP ' 'exceeded size limit. Contact your LDAP ' 'administrator.' ) class CacheDeserializationError(Exception): def __init__(self, obj, data): super().__init__( _('Failed to deserialize %(obj)s. Data is %(data)s') % {'obj': obj, 'data': data} ) class ResourceUpdateForbidden(ForbiddenNotSecurity): message_format = _( 'Unable to update immutable %(type)s resource: ' '`%(resource_id)s. Set resource option "immutable" ' 'to false first.' ) class ResourceDeleteForbidden(ForbiddenNotSecurity): message_format = _( 'Unable to delete immutable %(type)s resource: ' '`%(resource_id)s. Set resource option "immutable" ' 'to false first.' ) class OAuth2Error(Error): def __init__(self, code, title, error_title, message): self.code = code self.title = title self.error_title = error_title self.message_format = message class OAuth2InvalidClient(OAuth2Error): def __init__(self, code, title, message): error_title = 'invalid_client' super().__init__(code, title, error_title, message) class OAuth2InvalidRequest(OAuth2Error): def __init__(self, code, title, message): error_title = 'invalid_request' super().__init__(code, title, error_title, message) class OAuth2UnsupportedGrantType(OAuth2Error): def __init__(self, code, title, message): error_title = 'unsupported_grant_type' super().__init__(code, title, error_title, message) class OAuth2OtherError(OAuth2Error): def __init__(self, code, title, message): error_title = 'other_error' super().__init__(code, title, error_title, message) class RedirectRequired(Exception): """Error class for redirection. Child classes should define an HTTP redirect url message_format. """ redirect_url = None code = http.client.FOUND def __init__(self, redirect_url, **kwargs): self.redirect_url = redirect_url super().__init__(**kwargs) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/federation/0000775000175000017500000000000000000000000017444 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/__init__.py0000664000175000017500000000117100000000000021555 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.federation.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/federation/backends/0000775000175000017500000000000000000000000021216 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/backends/__init__.py0000664000175000017500000000000000000000000023315 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/backends/base.py0000664000175000017500000002560200000000000022507 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class FederationDriverBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_idp(self, idp_id, idp): """Create an identity provider. :param idp_id: ID of IdP object :type idp_id: string :param idp: idp object :type idp: dict :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_idp(self, idp_id): """Delete an identity provider. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_idp(self, idp_id): """Get an identity provider by ID. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_idp_from_remote_id(self, remote_id): """Get an identity provider by remote ID. :param remote_id: ID of remote IdP :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_idp(self, idp_id, idp): """Update an identity provider by ID. :param idp_id: ID of IdP object :type idp_id: string :param idp: idp object :type idp: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: idp ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_protocol(self, idp_id, protocol_id, protocol): """Add an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :param protocol: protocol object :type protocol: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_protocol(self, idp_id, protocol_id, protocol): """Change an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :param protocol: protocol object :type protocol: dict :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_protocol(self, idp_id, protocol_id): """Get an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: protocol ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_protocols(self, idp_id): """List an IdP's supported protocols. :param idp_id: ID of IdP object :type idp_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :returns: list of protocol ref :rtype: list of dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_protocol(self, idp_id, protocol_id): """Delete an IdP-Protocol configuration. :param idp_id: ID of IdP object :type idp_id: string :param protocol_id: ID of protocol object :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_mapping(self, mapping_id, mapping): """Create a mapping. :param mapping_id: ID of mapping object :type mapping_id: string :param mapping: mapping ref with mapping name :type mapping: dict :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_mapping(self, mapping_id): """Delete a mapping. :param mapping_id: id of mapping to delete :type mapping_ref: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_mapping(self, mapping_id, mapping_ref): """Update a mapping. :param mapping_id: id of mapping to update :type mapping_id: string :param mapping_ref: new mapping ref :type mapping_ref: dict :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_mappings(self): """List all mappings. :returns: list of mapping refs :rtype: list of dicts """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_mapping(self, mapping_id): """Get a mapping, returns the mapping based on mapping_id. :param mapping_id: id of mapping to get :type mapping_ref: string :raises keystone.exception.MappingNotFound: If the mapping cannot be found. :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id): """Get mapping based on idp_id and protocol_id. :param idp_id: id of the identity provider :type idp_id: string :param protocol_id: id of the protocol :type protocol_id: string :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. :raises keystone.exception.FederatedProtocolNotFound: If the federated protocol cannot be found. :returns: mapping ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_sp(self, sp_id, sp): """Create a service provider. :param sp_id: id of the service provider :type sp_id: string :param sp: service provider object :type sp: dict :returns: service provider ref :rtype: dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_sp(self, sp_id): """Delete a service provider. :param sp_id: id of the service provider :type sp_id: string :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_sp(self, sp_id): """Get a service provider. :param sp_id: id of the service provider :type sp_id: string :returns: service provider ref :rtype: dict :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_sp(self, sp_id, sp): """Update a service provider. :param sp_id: id of the service provider :type sp_id: string :param sp: service prvider object :type sp: dict :returns: service provider ref :rtype: dict :raises keystone.exception.ServiceProviderNotFound: If the service provider doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_enabled_service_providers(self): """List enabled service providers for Service Catalog. Service Provider in a catalog contains three attributes: ``id``, ``auth_url``, ``sp_url``, where: - id is a unique, user defined identifier for service provider object - auth_url is an authentication URL of remote Keystone - sp_url a URL accessible at the remote service provider where SAML assertion is transmitted. :returns: list of dictionaries with enabled service providers :rtype: list of dicts """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_idps(self, hints): """List all identity providers. :param hints: filter hints which the driver should implement if at all possible. :returns: list of idp refs :rtype: list of dicts :raises keystone.exception.IdentityProviderNotFound: If the IdP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_sps(self, hints): """List all service providers. :param hints: filter hints which the driver should implement if at all possible. :returns: List of service provider ref objects :rtype: list of dicts :raises keystone.exception.ServiceProviderNotFound: If the SP doesn't exist. """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/backends/sql.py0000664000175000017500000003671400000000000022402 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_serialization import jsonutils from sqlalchemy import orm from keystone.common import sql import keystone.conf from keystone import exception from keystone.federation.backends import base from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) class FederationProtocolModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'federation_protocol' attributes = ['id', 'idp_id', 'mapping_id', 'remote_id_attribute'] mutable_attributes = frozenset(['mapping_id', 'remote_id_attribute']) id = sql.Column(sql.String(64), primary_key=True) idp_id = sql.Column( sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), primary_key=True, ) mapping_id = sql.Column(sql.String(64), nullable=False) remote_id_attribute = sql.Column(sql.String(64)) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class IdentityProviderModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'identity_provider' attributes = [ 'id', 'domain_id', 'enabled', 'description', 'remote_ids', 'authorization_ttl', ] mutable_attributes = frozenset( ['description', 'enabled', 'remote_ids', 'authorization_ttl'] ) id = sql.Column(sql.String(64), primary_key=True) domain_id = sql.Column(sql.String(64), nullable=False) enabled = sql.Column(sql.Boolean, nullable=False) description = sql.Column(sql.Text(), nullable=True) authorization_ttl = sql.Column(sql.Integer, nullable=True) remote_ids = orm.relationship( 'IdPRemoteIdsModel', order_by='IdPRemoteIdsModel.remote_id', cascade='all, delete-orphan', ) expiring_user_group_memberships = orm.relationship( 'ExpiringUserGroupMembership', cascade='all, delete-orphan', backref="idp", ) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() remote_ids_list = new_dictionary.pop('remote_ids', None) if not remote_ids_list: remote_ids_list = [] identity_provider = cls(**new_dictionary) remote_ids = [] # NOTE(fmarco76): the remote_ids_list contains only remote ids # associated with the IdP because of the "relationship" established in # sqlalchemy and corresponding to the FK in the idp_remote_ids table for remote in remote_ids_list: remote_ids.append(IdPRemoteIdsModel(remote_id=remote)) identity_provider.remote_ids = remote_ids return identity_provider def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) d['remote_ids'] = [] for remote in self.remote_ids: d['remote_ids'].append(remote.remote_id) return d class IdPRemoteIdsModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'idp_remote_ids' attributes = ['idp_id', 'remote_id'] mutable_attributes = frozenset(['idp_id', 'remote_id']) idp_id = sql.Column( sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), ) remote_id = sql.Column(sql.String(255), primary_key=True) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class MappingModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'mapping' attributes = ['id', 'rules', 'schema_version'] id = sql.Column(sql.String(64), primary_key=True) rules = sql.Column(sql.JsonBlob(), nullable=False) schema_version = sql.Column( sql.String(5), nullable=False, server_default='1.0' ) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() new_dictionary['rules'] = jsonutils.dumps(new_dictionary['rules']) return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) d['rules'] = jsonutils.loads(d['rules']) return d class ServiceProviderModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'service_provider' attributes = [ 'auth_url', 'id', 'enabled', 'description', 'relay_state_prefix', 'sp_url', ] mutable_attributes = frozenset( ['auth_url', 'description', 'enabled', 'relay_state_prefix', 'sp_url'] ) id = sql.Column(sql.String(64), primary_key=True) enabled = sql.Column(sql.Boolean, nullable=False) description = sql.Column(sql.Text(), nullable=True) auth_url = sql.Column(sql.String(256), nullable=False) sp_url = sql.Column(sql.String(256), nullable=False) relay_state_prefix = sql.Column(sql.String(256), nullable=False) @classmethod def from_dict(cls, dictionary): new_dictionary = dictionary.copy() return cls(**new_dictionary) def to_dict(self): """Return a dictionary with model's attributes.""" d = dict() for attr in self.__class__.attributes: d[attr] = getattr(self, attr) return d class Federation(base.FederationDriverBase): _CONFLICT_LOG_MSG = 'Conflict %(conflict_type)s: %(details)s' def _handle_idp_conflict(self, e): conflict_type = 'identity_provider' details = str(e) LOG.debug( self._CONFLICT_LOG_MSG, {'conflict_type': conflict_type, 'details': details}, ) if 'remote_id' in details: msg = _('Duplicate remote ID: %s') else: msg = _('Duplicate entry: %s') msg = msg % e.value raise exception.Conflict(type=conflict_type, details=msg) # Identity Provider CRUD def create_idp(self, idp_id, idp): idp['id'] = idp_id try: with sql.session_for_write() as session: idp_ref = IdentityProviderModel.from_dict(idp) session.add(idp_ref) return idp_ref.to_dict() except sql.DBDuplicateEntry as e: self._handle_idp_conflict(e) def delete_idp(self, idp_id): with sql.session_for_write() as session: self._delete_assigned_protocols(session, idp_id) idp_ref = self._get_idp(session, idp_id) session.delete(idp_ref) def _get_idp(self, session, idp_id): idp_ref = session.get(IdentityProviderModel, idp_id) if not idp_ref: raise exception.IdentityProviderNotFound(idp_id=idp_id) return idp_ref def _get_idp_from_remote_id(self, session, remote_id): q = session.query(IdPRemoteIdsModel) q = q.filter_by(remote_id=remote_id) try: return q.one() except sql.NotFound: raise exception.IdentityProviderNotFound(idp_id=remote_id) def list_idps(self, hints=None): with sql.session_for_read() as session: query = session.query(IdentityProviderModel) idps = sql.filter_limit_query(IdentityProviderModel, query, hints) idps_list = [idp.to_dict() for idp in idps] return idps_list def get_idp(self, idp_id): with sql.session_for_read() as session: idp_ref = self._get_idp(session, idp_id) return idp_ref.to_dict() def get_idp_from_remote_id(self, remote_id): with sql.session_for_read() as session: ref = self._get_idp_from_remote_id(session, remote_id) return ref.to_dict() def update_idp(self, idp_id, idp): try: with sql.session_for_write() as session: idp_ref = self._get_idp(session, idp_id) old_idp = idp_ref.to_dict() old_idp.update(idp) new_idp = IdentityProviderModel.from_dict(old_idp) for attr in IdentityProviderModel.mutable_attributes: setattr(idp_ref, attr, getattr(new_idp, attr)) return idp_ref.to_dict() except sql.DBDuplicateEntry as e: self._handle_idp_conflict(e) # Protocol CRUD def _get_protocol(self, session, idp_id, protocol_id): q = session.query(FederationProtocolModel) q = q.filter_by(id=protocol_id, idp_id=idp_id) try: return q.one() except sql.NotFound: kwargs = {'protocol_id': protocol_id, 'idp_id': idp_id} raise exception.FederatedProtocolNotFound(**kwargs) @sql.handle_conflicts(conflict_type='federation_protocol') def create_protocol(self, idp_id, protocol_id, protocol): protocol['id'] = protocol_id protocol['idp_id'] = idp_id with sql.session_for_write() as session: self._get_idp(session, idp_id) protocol_ref = FederationProtocolModel.from_dict(protocol) session.add(protocol_ref) return protocol_ref.to_dict() def update_protocol(self, idp_id, protocol_id, protocol): with sql.session_for_write() as session: proto_ref = self._get_protocol(session, idp_id, protocol_id) old_proto = proto_ref.to_dict() old_proto.update(protocol) new_proto = FederationProtocolModel.from_dict(old_proto) for attr in FederationProtocolModel.mutable_attributes: setattr(proto_ref, attr, getattr(new_proto, attr)) return proto_ref.to_dict() def get_protocol(self, idp_id, protocol_id): with sql.session_for_read() as session: protocol_ref = self._get_protocol(session, idp_id, protocol_id) return protocol_ref.to_dict() def list_protocols(self, idp_id): with sql.session_for_read() as session: q = session.query(FederationProtocolModel) q = q.filter_by(idp_id=idp_id) protocols = [protocol.to_dict() for protocol in q] return protocols def delete_protocol(self, idp_id, protocol_id): with sql.session_for_write() as session: key_ref = self._get_protocol(session, idp_id, protocol_id) session.delete(key_ref) def _delete_assigned_protocols(self, session, idp_id): query = session.query(FederationProtocolModel) query = query.filter_by(idp_id=idp_id) query.delete() # Mapping CRUD def _get_mapping(self, session, mapping_id): mapping_ref = session.get(MappingModel, mapping_id) if not mapping_ref: raise exception.MappingNotFound(mapping_id=mapping_id) return mapping_ref @sql.handle_conflicts(conflict_type='mapping') def create_mapping(self, mapping_id, mapping): ref = {} ref['id'] = mapping_id ref['rules'] = mapping.get('rules') ref['schema_version'] = mapping.get('schema_version') with sql.session_for_write() as session: mapping_ref = MappingModel.from_dict(ref) session.add(mapping_ref) return mapping_ref.to_dict() def delete_mapping(self, mapping_id): with sql.session_for_write() as session: mapping_ref = self._get_mapping(session, mapping_id) session.delete(mapping_ref) def list_mappings(self): with sql.session_for_read() as session: mappings = session.query(MappingModel) return [x.to_dict() for x in mappings] def get_mapping(self, mapping_id): with sql.session_for_read() as session: mapping_ref = self._get_mapping(session, mapping_id) return mapping_ref.to_dict() @sql.handle_conflicts(conflict_type='mapping') def update_mapping(self, mapping_id, mapping): ref = {} ref['id'] = mapping_id ref['rules'] = mapping.get('rules') if mapping.get('schema_version'): ref['schema_version'] = mapping.get('schema_version') with sql.session_for_write() as session: mapping_ref = self._get_mapping(session, mapping_id) old_mapping = mapping_ref.to_dict() old_mapping.update(ref) new_mapping = MappingModel.from_dict(old_mapping) for attr in MappingModel.attributes: setattr(mapping_ref, attr, getattr(new_mapping, attr)) return mapping_ref.to_dict() def get_mapping_from_idp_and_protocol(self, idp_id, protocol_id): with sql.session_for_read() as session: protocol_ref = self._get_protocol(session, idp_id, protocol_id) mapping_id = protocol_ref.mapping_id mapping_ref = self._get_mapping(session, mapping_id) return mapping_ref.to_dict() # Service Provider CRUD @sql.handle_conflicts(conflict_type='service_provider') def create_sp(self, sp_id, sp): sp['id'] = sp_id with sql.session_for_write() as session: sp_ref = ServiceProviderModel.from_dict(sp) session.add(sp_ref) return sp_ref.to_dict() def delete_sp(self, sp_id): with sql.session_for_write() as session: sp_ref = self._get_sp(session, sp_id) session.delete(sp_ref) def _get_sp(self, session, sp_id): sp_ref = session.get(ServiceProviderModel, sp_id) if not sp_ref: raise exception.ServiceProviderNotFound(sp_id=sp_id) return sp_ref def list_sps(self, hints=None): with sql.session_for_read() as session: query = session.query(ServiceProviderModel) sps = sql.filter_limit_query(ServiceProviderModel, query, hints) sps_list = [sp.to_dict() for sp in sps] return sps_list def get_sp(self, sp_id): with sql.session_for_read() as session: sp_ref = self._get_sp(session, sp_id) return sp_ref.to_dict() def update_sp(self, sp_id, sp): with sql.session_for_write() as session: sp_ref = self._get_sp(session, sp_id) old_sp = sp_ref.to_dict() old_sp.update(sp) new_sp = ServiceProviderModel.from_dict(old_sp) for attr in ServiceProviderModel.mutable_attributes: setattr(sp_ref, attr, getattr(new_sp, attr)) return sp_ref.to_dict() def get_enabled_service_providers(self): with sql.session_for_read() as session: service_providers = session.query(ServiceProviderModel) service_providers = service_providers.filter_by(enabled=True) return service_providers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/constants.py0000664000175000017500000000123100000000000022027 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. FEDERATION = 'OS-FEDERATION' IDENTITY_PROVIDER = 'OS-FEDERATION:identity_provider' PROTOCOL = 'OS-FEDERATION:protocol' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/core.py0000664000175000017500000001625100000000000020753 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Federation service.""" import uuid from oslo_log import log from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.federation import utils from keystone.i18n import _ from keystone import notifications LOG = log.getLogger(__name__) # This is a general cache region for service providers. MEMOIZE = cache.get_memoization_decorator(group='federation') CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class Manager(manager.Manager): """Default pivot point for the Federation backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.federation' _provides_api = 'federation_api' def __init__(self): super().__init__(CONF.federation.driver) notifications.register_event_callback( notifications.ACTIONS.internal, notifications.DOMAIN_DELETED, self._cleanup_identity_provider, ) def _cleanup_identity_provider( self, service, resource_type, operation, payload ): domain_id = payload['resource_info'] hints = driver_hints.Hints() hints.add_filter('domain_id', domain_id) idps = self.driver.list_idps(hints=hints) for idp in idps: try: self.delete_idp(idp['id']) except exception.IdentityProviderNotFound: LOG.debug( ( 'Identity Provider %(idpid)s not found when ' 'deleting domain contents for %(domainid)s, ' 'continuing with cleanup.' ), {'idpid': idp['id'], 'domainid': domain_id}, ) def create_idp(self, idp_id, idp): auto_created_domain = False if not idp.get('domain_id'): idp['domain_id'] = self._create_idp_domain(idp_id) auto_created_domain = True else: self._assert_valid_domain_id(idp['domain_id']) try: return self.driver.create_idp(idp_id, idp) except exception.Conflict: # If there is a conflict storing the Identity Provider in the # backend, then we need to make sure we clean up the domain we just # created for it and raise the Conflict exception afterwards. if auto_created_domain: self._cleanup_idp_domain(idp['domain_id']) raise def delete_idp(self, idp_id): self.driver.delete_idp(idp_id) # NOTE(lbragstad): If an identity provider is removed from the system, # then we need to invalidate the token cache. Otherwise it will be # possible for federated tokens to be considered valid after a service # provider removes a federated identity provider resource. reason = ( 'The token cache is being invalidated because identity provider ' '%(idp_id)s has been deleted. Authorization for federated users ' 'will be recalculated and enforced accordingly the next time ' 'they authenticate or validate a token.' % {'idp_id': idp_id} ) notifications.invalidate_token_cache_notification(reason) def _cleanup_idp_domain(self, domain_id): domain = {'enabled': False} PROVIDERS.resource_api.update_domain(domain_id, domain) PROVIDERS.resource_api.delete_domain(domain_id) def _create_idp_domain(self, idp_id): domain_id = uuid.uuid4().hex desc = 'Auto generated federated domain for Identity Provider: ' desc += idp_id domain = { 'id': domain_id, 'name': domain_id, 'description': desc, 'enabled': True, } PROVIDERS.resource_api.create_domain(domain['id'], domain) return domain_id def _assert_valid_domain_id(self, domain_id): PROVIDERS.resource_api.get_domain(domain_id) @MEMOIZE def get_enabled_service_providers(self): """List enabled service providers for Service Catalog. Service Provider in a catalog contains three attributes: ``id``, ``auth_url``, ``sp_url``, where: - id is a unique, user defined identifier for service provider object - auth_url is an authentication URL of remote Keystone - sp_url a URL accessible at the remote service provider where SAML assertion is transmitted. :returns: list of dictionaries with enabled service providers :rtype: list of dicts """ def normalize(sp): ref = {'auth_url': sp.auth_url, 'id': sp.id, 'sp_url': sp.sp_url} return ref service_providers = self.driver.get_enabled_service_providers() return [normalize(sp) for sp in service_providers] def create_sp(self, sp_id, service_provider): sp_ref = self.driver.create_sp(sp_id, service_provider) self.get_enabled_service_providers.invalidate(self) return sp_ref def delete_sp(self, sp_id): self.driver.delete_sp(sp_id) self.get_enabled_service_providers.invalidate(self) def update_sp(self, sp_id, service_provider): sp_ref = self.driver.update_sp(sp_id, service_provider) self.get_enabled_service_providers.invalidate(self) return sp_ref def evaluate(self, idp_id, protocol_id, assertion_data): mapping = self.get_mapping_from_idp_and_protocol(idp_id, protocol_id) rule_processor = utils.create_attribute_mapping_rules_processor( mapping ) mapped_properties = rule_processor.process(assertion_data) return mapped_properties, mapping['id'] def create_protocol(self, idp_id, protocol_id, protocol): self._validate_mapping_exists(protocol['mapping_id']) return self.driver.create_protocol(idp_id, protocol_id, protocol) def delete_protocol(self, idp_id, protocol_id): hints = driver_hints.Hints() hints.add_filter('protocol_id', protocol_id) self.driver.delete_protocol(idp_id, protocol_id) def update_protocol(self, idp_id, protocol_id, protocol): self._validate_mapping_exists(protocol['mapping_id']) return self.driver.update_protocol(idp_id, protocol_id, protocol) def _validate_mapping_exists(self, mapping_id): try: self.driver.get_mapping(mapping_id) except exception.MappingNotFound: msg = _('Invalid mapping id: %s') raise exception.ValidationError(message=(msg % mapping_id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/idp.py0000664000175000017500000006537400000000000020611 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os import subprocess # nosec : see comments in the code below import uuid from oslo_log import log from oslo_utils import fileutils from oslo_utils import timeutils import saml2 from saml2 import client_base from saml2 import md from saml2.profile import ecp from saml2 import saml from saml2 import samlp from saml2.schema import soapenv from saml2 import sigver from saml2 import xmldsig from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ LOG = log.getLogger(__name__) CONF = keystone.conf.CONF class SAMLGenerator: """A class to generate SAML assertions.""" def __init__(self): self.assertion_id = uuid.uuid4().hex def samlize_token( self, issuer, recipient, user, user_domain_name, roles, project, project_domain_name, groups, expires_in=None, ): """Convert Keystone attributes to a SAML assertion. :param issuer: URL of the issuing party :type issuer: string :param recipient: URL of the recipient :type recipient: string :param user: User name :type user: string :param user_domain_name: User Domain name :type user_domain_name: string :param roles: List of role names :type roles: list :param project: Project name :type project: string :param project_domain_name: Project Domain name :type project_domain_name: string :param groups: List of strings of user groups and domain name, where strings are serialized dictionaries. :type groups: list :param expires_in: Sets how long the assertion is valid for, in seconds :type expires_in: int :returns: XML object """ expiration_time = self._determine_expiration_time(expires_in) status = self._create_status() saml_issuer = self._create_issuer(issuer) subject = self._create_subject(user, expiration_time, recipient) attribute_statement = self._create_attribute_statement( user, user_domain_name, roles, project, project_domain_name, groups ) authn_statement = self._create_authn_statement(issuer, expiration_time) signature = self._create_signature() assertion = self._create_assertion( saml_issuer, signature, subject, authn_statement, attribute_statement, ) assertion = _sign_assertion(assertion) response = self._create_response( saml_issuer, status, assertion, recipient ) return response def _determine_expiration_time(self, expires_in): if expires_in is None: expires_in = CONF.saml.assertion_expiration_time now = timeutils.utcnow() future = now + datetime.timedelta(seconds=expires_in) return utils.isotime(future, subsecond=True) def _create_status(self): """Create an object that represents a SAML Status. :returns: XML object """ status = samlp.Status() status_code = samlp.StatusCode() status_code.value = samlp.STATUS_SUCCESS status_code.set_text('') status.status_code = status_code return status def _create_issuer(self, issuer_url): """Create an object that represents a SAML Issuer. https://acme.com/FIM/sps/openstack/saml20 :returns: XML object """ issuer = saml.Issuer() issuer.format = saml.NAMEID_FORMAT_ENTITY issuer.set_text(issuer_url) return issuer def _create_subject(self, user, expiration_time, recipient): """Create an object that represents a SAML Subject. john@smith.com :returns: XML object """ name_id = saml.NameID() name_id.set_text(user) subject_conf_data = saml.SubjectConfirmationData() subject_conf_data.recipient = recipient subject_conf_data.not_on_or_after = expiration_time subject_conf = saml.SubjectConfirmation() subject_conf.method = saml.SCM_BEARER subject_conf.subject_confirmation_data = subject_conf_data subject = saml.Subject() subject.subject_confirmation = subject_conf subject.name_id = name_id return subject def _create_attribute_statement( self, user, user_domain_name, roles, project, project_domain_name, groups, ): """Create an object that represents a SAML AttributeStatement. test_user Default admin member development Default JSON:{"name":"group1","domain":{"name":"Default"}} JSON:{"name":"group2","domain":{"name":"Default"}} :returns: XML object """ def _build_attribute(attribute_name, attribute_values): attribute = saml.Attribute() attribute.name = attribute_name for value in attribute_values: attribute_value = saml.AttributeValue() attribute_value.set_text(value) attribute.attribute_value.append(attribute_value) return attribute user_attribute = _build_attribute('openstack_user', [user]) roles_attribute = _build_attribute('openstack_roles', roles) project_attribute = _build_attribute('openstack_project', [project]) project_domain_attribute = _build_attribute( 'openstack_project_domain', [project_domain_name] ) user_domain_attribute = _build_attribute( 'openstack_user_domain', [user_domain_name] ) attribute_statement = saml.AttributeStatement() attribute_statement.attribute.append(user_attribute) attribute_statement.attribute.append(roles_attribute) attribute_statement.attribute.append(project_attribute) attribute_statement.attribute.append(project_domain_attribute) attribute_statement.attribute.append(user_domain_attribute) if groups: groups_attribute = _build_attribute('openstack_groups', groups) attribute_statement.attribute.append(groups_attribute) return attribute_statement def _create_authn_statement(self, issuer, expiration_time): """Create an object that represents a SAML AuthnStatement. urn:oasis:names:tc:SAML:2.0:ac:classes:Password https://acme.com/FIM/sps/openstack/saml20 :returns: XML object """ authn_statement = saml.AuthnStatement() authn_statement.authn_instant = utils.isotime() authn_statement.session_index = uuid.uuid4().hex authn_statement.session_not_on_or_after = expiration_time authn_context = saml.AuthnContext() authn_context_class = saml.AuthnContextClassRef() authn_context_class.set_text(saml.AUTHN_PASSWORD) authn_authority = saml.AuthenticatingAuthority() authn_authority.set_text(issuer) authn_context.authn_context_class_ref = authn_context_class authn_context.authenticating_authority = authn_authority authn_statement.authn_context = authn_context return authn_statement def _create_assertion( self, issuer, signature, subject, authn_statement, attribute_statement ): """Create an object that represents a SAML Assertion. ... ... ... ... ... :returns: XML object """ assertion = saml.Assertion() assertion.id = self.assertion_id assertion.issue_instant = utils.isotime() assertion.version = '2.0' assertion.issuer = issuer assertion.signature = signature assertion.subject = subject assertion.authn_statement = authn_statement assertion.attribute_statement = attribute_statement return assertion def _create_response(self, issuer, status, assertion, recipient): """Create an object that represents a SAML Response. ... ... ... :returns: XML object """ response = samlp.Response() response.id = uuid.uuid4().hex response.destination = recipient response.issue_instant = utils.isotime() response.version = '2.0' response.issuer = issuer response.status = status response.assertion = assertion return response def _create_signature(self): """Create an object that represents a SAML . This must be filled with algorithms that the signing binary will apply in order to sign the whole message. Currently we enforce X509 signing. Example of the template:: :returns: XML object """ canonicalization_method = xmldsig.CanonicalizationMethod() # TODO(stephenfin): Drop when we remove support for pysaml < 7.1.0 if hasattr(xmldsig, 'TRANSFORM_C14N'): # >= 7.1.0 canonicalization_method.algorithm = xmldsig.TRANSFORM_C14N else: # < 7.1.0 canonicalization_method.algorithm = xmldsig.ALG_EXC_C14N signature_method = xmldsig.SignatureMethod( algorithm=xmldsig.SIG_RSA_SHA1 ) transforms = xmldsig.Transforms() envelope_transform = xmldsig.Transform( algorithm=xmldsig.TRANSFORM_ENVELOPED ) # TODO(stephenfin): Drop when we remove support for pysaml < 7.1.0 if hasattr(xmldsig, 'TRANSFORM_C14N'): # >= 7.1.0 c14_transform = xmldsig.Transform(algorithm=xmldsig.TRANSFORM_C14N) else: # < 7.1.0 c14_transform = xmldsig.Transform(algorithm=xmldsig.ALG_EXC_C14N) transforms.transform = [envelope_transform, c14_transform] digest_method = xmldsig.DigestMethod(algorithm=xmldsig.DIGEST_SHA1) digest_value = xmldsig.DigestValue() reference = xmldsig.Reference() reference.uri = '#' + self.assertion_id reference.digest_method = digest_method reference.digest_value = digest_value reference.transforms = transforms signed_info = xmldsig.SignedInfo() signed_info.canonicalization_method = canonicalization_method signed_info.signature_method = signature_method signed_info.reference = reference key_info = xmldsig.KeyInfo() key_info.x509_data = xmldsig.X509Data() signature = xmldsig.Signature() signature.signed_info = signed_info signature.signature_value = xmldsig.SignatureValue() signature.key_info = key_info return signature def _verify_assertion_binary_is_installed(): """Make sure the specified xmlsec binary is installed. If the binary specified in configuration isn't installed, make sure we leave some sort of useful error message for operators since the absense of it is going to throw an HTTP 500. """ try: # `check_output` just returns the output of whatever is passed in # (hence the name). We don't really care about where the location of # the binary exists, though. We just want to make sure it's actually # installed and if an `CalledProcessError` isn't thrown, it is. subprocess.check_output( # nosec : The contents of this command are # coming from either the default # configuration value for # CONF.saml.xmlsec1_binary or an operator # supplied location for that binary. In # either case, it is safe to assume this # input is coming from a trusted source and # not a possible attacker (over the API). ['/usr/bin/which', CONF.saml.xmlsec1_binary] ) except subprocess.CalledProcessError: msg = ( 'Unable to locate %(binary)s binary on the system. Check to make ' 'sure it is installed.' ) % {'binary': CONF.saml.xmlsec1_binary} tr_msg = _( 'Unable to locate %(binary)s binary on the system. Check to ' 'make sure it is installed.' ) % {'binary': CONF.saml.xmlsec1_binary} LOG.error(msg) raise exception.SAMLSigningError(reason=tr_msg) def _sign_assertion(assertion): """Sign a SAML assertion. This method utilizes ``xmlsec1`` binary and signs SAML assertions in a separate process. ``xmlsec1`` cannot read input data from stdin so the prepared assertion needs to be serialized and stored in a temporary file. This file will be deleted immediately after ``xmlsec1`` returns. The signed assertion is redirected to a standard output and read using ``subprocess.PIPE`` redirection. A ``saml.Assertion`` class is created from the signed string again and returned. Parameters that are required in the CONF:: * xmlsec_binary * private key file path * public key file path :returns: XML object """ # Ensure that the configured certificate paths do not contain any commas, # before we string format a comma in between them and cause xmlsec1 to # explode like a thousand fiery supernovas made entirely of unsigned SAML. for option in ('keyfile', 'certfile'): if ',' in getattr(CONF.saml, option, ''): raise exception.UnexpectedError( 'The configuration value in `keystone.conf [saml] %s` cannot ' 'contain a comma (`,`). Please fix your configuration.' % option ) # xmlsec1 --sign --privkey-pem privkey,cert --id-attr:ID certificates = '{idp_private_key},{idp_public_key}'.format( idp_public_key=CONF.saml.certfile, idp_private_key=CONF.saml.keyfile, ) # Verify that the binary used to create the assertion actually exists on # the system. If it doesn't, log a warning for operators to go and install # it. Requests for assertions will fail with HTTP 500s until the package is # installed, so providing something useful in the logs is about the best we # can do. _verify_assertion_binary_is_installed() command_list = [ CONF.saml.xmlsec1_binary, '--sign', '--privkey-pem', certificates, '--id-attr:ID', 'Assertion', ] file_path = None try: # NOTE(gyee): need to make the namespace prefixes explicit so # they won't get reassigned when we wrap the assertion into # SAML2 response file_path = fileutils.write_to_tempfile( assertion.to_string( nspair={'saml': saml2.NAMESPACE, 'xmldsig': xmldsig.NAMESPACE} ) ) command_list.append(file_path) stdout = subprocess.check_output( command_list, # nosec : The contents # of the command list are coming from # a trusted source because the # executable and arguments all either # come from the config file or are # hardcoded. The command list is # initialized earlier in this function # to a list and it's still a list at # this point in the function. There is # no opportunity for an attacker to # attempt command injection via string # parsing. stderr=subprocess.STDOUT, ) except Exception as e: msg = 'Error when signing assertion, reason: %(reason)s%(output)s' LOG.error( msg, { 'reason': e, 'output': ' ' + e.output if hasattr(e, 'output') else '', }, ) raise exception.SAMLSigningError(reason=e) finally: try: if file_path: os.remove(file_path) except OSError: # nosec # The file is already gone, good. pass return saml2.create_class_from_xml_string(saml.Assertion, stdout) class MetadataGenerator: """A class for generating SAML IdP Metadata.""" def generate_metadata(self): """Generate Identity Provider Metadata. Generate and format metadata into XML that can be exposed and consumed by a federated Service Provider. :returns: XML object. :raises keystone.exception.ValidationError: If the required config options aren't set. """ self._ensure_required_values_present() entity_descriptor = self._create_entity_descriptor() entity_descriptor.idpsso_descriptor = self._create_idp_sso_descriptor() return entity_descriptor def _create_entity_descriptor(self): ed = md.EntityDescriptor() ed.entity_id = CONF.saml.idp_entity_id return ed def _create_idp_sso_descriptor(self): def get_cert(): try: return sigver.read_cert_from_file(CONF.saml.certfile, 'pem') except (OSError, sigver.CertificateError) as e: msg = ( 'Cannot open certificate %(cert_file)s.' 'Reason: %(reason)s' ) % {'cert_file': CONF.saml.certfile, 'reason': e} tr_msg = _( 'Cannot open certificate %(cert_file)s.' 'Reason: %(reason)s' ) % {'cert_file': CONF.saml.certfile, 'reason': e} LOG.error(msg) raise OSError(tr_msg) def key_descriptor(): cert = get_cert() return md.KeyDescriptor( key_info=xmldsig.KeyInfo( x509_data=xmldsig.X509Data( x509_certificate=xmldsig.X509Certificate(text=cert) ) ), use='signing', ) def single_sign_on_service(): idp_sso_endpoint = CONF.saml.idp_sso_endpoint return md.SingleSignOnService( binding=saml2.BINDING_URI, location=idp_sso_endpoint ) def organization(): name = md.OrganizationName( lang=CONF.saml.idp_lang, text=CONF.saml.idp_organization_name ) display_name = md.OrganizationDisplayName( lang=CONF.saml.idp_lang, text=CONF.saml.idp_organization_display_name, ) url = md.OrganizationURL( lang=CONF.saml.idp_lang, text=CONF.saml.idp_organization_url ) return md.Organization( organization_display_name=display_name, organization_url=url, organization_name=name, ) def contact_person(): company = md.Company(text=CONF.saml.idp_contact_company) given_name = md.GivenName(text=CONF.saml.idp_contact_name) surname = md.SurName(text=CONF.saml.idp_contact_surname) email = md.EmailAddress(text=CONF.saml.idp_contact_email) telephone = md.TelephoneNumber( text=CONF.saml.idp_contact_telephone ) contact_type = CONF.saml.idp_contact_type return md.ContactPerson( company=company, given_name=given_name, sur_name=surname, email_address=email, telephone_number=telephone, contact_type=contact_type, ) def name_id_format(): return md.NameIDFormat(text=saml.NAMEID_FORMAT_TRANSIENT) idpsso = md.IDPSSODescriptor() idpsso.protocol_support_enumeration = samlp.NAMESPACE idpsso.key_descriptor = key_descriptor() idpsso.single_sign_on_service = single_sign_on_service() idpsso.name_id_format = name_id_format() if self._check_organization_values(): idpsso.organization = organization() if self._check_contact_person_values(): idpsso.contact_person = contact_person() return idpsso def _ensure_required_values_present(self): """Ensure idp_sso_endpoint and idp_entity_id have values.""" if CONF.saml.idp_entity_id is None: msg = _('Ensure configuration option idp_entity_id is set.') raise exception.ValidationError(msg) if CONF.saml.idp_sso_endpoint is None: msg = _('Ensure configuration option idp_sso_endpoint is set.') raise exception.ValidationError(msg) def _check_contact_person_values(self): """Determine if contact information is included in metadata.""" # Check if we should include contact information params = [ CONF.saml.idp_contact_company, CONF.saml.idp_contact_name, CONF.saml.idp_contact_surname, CONF.saml.idp_contact_email, CONF.saml.idp_contact_telephone, CONF.saml.idp_contact_type, ] for value in params: if value is None: return False return True def _check_organization_values(self): """Determine if organization information is included in metadata.""" params = [ CONF.saml.idp_organization_name, CONF.saml.idp_organization_display_name, CONF.saml.idp_organization_url, ] for value in params: if value is None: return False return True class ECPGenerator: """A class for generating an ECP assertion.""" @staticmethod def generate_ecp(saml_assertion, relay_state_prefix): ecp_generator = ECPGenerator() header = ecp_generator._create_header(relay_state_prefix) body = ecp_generator._create_body(saml_assertion) envelope = soapenv.Envelope(header=header, body=body) return envelope def _create_header(self, relay_state_prefix): relay_state_text = relay_state_prefix + uuid.uuid4().hex relay_state = ecp.RelayState( actor=client_base.ACTOR, must_understand='1', text=relay_state_text ) header = soapenv.Header() header.extension_elements = [ saml2.element_to_extension_element(relay_state) ] return header def _create_body(self, saml_assertion): body = soapenv.Body() body.extension_elements = [ saml2.element_to_extension_element(saml_assertion) ] return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/schema.py0000664000175000017500000000760300000000000021264 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types basic_property_id = { 'type': 'object', 'properties': {'id': {'type': 'string'}}, 'required': ['id'], 'additionalProperties': False, } saml_create = { 'type': 'object', 'properties': { 'identity': { 'type': 'object', 'properties': { 'token': basic_property_id, 'methods': {'type': 'array'}, }, 'required': ['token'], 'additionalProperties': False, }, 'scope': { 'type': 'object', 'properties': {'service_provider': basic_property_id}, 'required': ['service_provider'], 'additionalProperties': False, }, }, 'required': ['identity', 'scope'], 'additionalProperties': False, } _service_provider_properties = { # NOTE(rodrigods): The database accepts URLs with 256 as max length, # but parameter_types.url uses 225 as max length. 'auth_url': parameter_types.url, 'sp_url': parameter_types.url, 'description': validation.nullable(parameter_types.description), 'enabled': parameter_types.boolean, 'relay_state_prefix': validation.nullable(parameter_types.description), } service_provider_create = { 'type': 'object', 'properties': _service_provider_properties, # NOTE(rodrigods): 'id' is not required since it is passed in the URL 'required': ['auth_url', 'sp_url'], 'additionalProperties': False, } service_provider_update = { 'type': 'object', 'properties': _service_provider_properties, # Make sure at least one property is being updated 'minProperties': 1, 'additionalProperties': False, } _identity_provider_properties_create = { 'enabled': parameter_types.boolean, 'description': validation.nullable(parameter_types.description), 'domain_id': validation.nullable(parameter_types.id_string), 'authorization_ttl': validation.nullable(parameter_types.integer_min0), 'remote_ids': { 'type': ['array', 'null'], 'items': {'type': 'string'}, 'uniqueItems': True, }, } _identity_provider_properties_update = { 'enabled': parameter_types.boolean, 'description': validation.nullable(parameter_types.description), 'authorization_ttl': validation.nullable(parameter_types.integer_min0), 'remote_ids': { 'type': ['array', 'null'], 'items': {'type': 'string'}, 'uniqueItems': True, }, } identity_provider_create = { 'type': 'object', 'properties': _identity_provider_properties_create, 'additionalProperties': False, } identity_provider_update = { 'type': 'object', 'properties': _identity_provider_properties_update, # Make sure at least one property is being updated 'minProperties': 1, 'additionalProperties': False, } _remote_id_attribute_properties = { 'type': 'string', 'maxLength': 64, } _protocol_properties = { 'mapping_id': parameter_types.mapping_id_string, 'remote_id_attribute': _remote_id_attribute_properties, } protocol_create = { 'type': 'object', 'properties': _protocol_properties, 'required': ['mapping_id'], 'additionalProperties': False, } protocol_update = { 'type': 'object', 'properties': _protocol_properties, 'minProperties': 1, 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/federation/utils.py0000664000175000017500000011042300000000000021157 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities for Federation Extension.""" import ast import copy import re import flask import jsonschema from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import timeutils from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs class UserType: """User mapping type.""" EPHEMERAL = 'ephemeral' LOCAL = 'local' ROLE_PROPERTIES = { "type": "array", "items": { "type": "object", "required": ["name"], "properties": { "name": {"type": "string"}, }, "additionalProperties": False, }, } PROJECTS_SCHEMA = { "type": "array", "items": { "type": "object", "required": ["name", "roles"], "additionalProperties": False, "properties": {"name": {"type": "string"}, "roles": ROLE_PROPERTIES}, }, } IDP_ATTRIBUTE_MAPPING_SCHEMA_1_0 = { "type": "object", "required": ['rules'], "properties": { "rules": { "minItems": 1, "type": "array", "items": { "type": "object", "required": ['local', 'remote'], "additionalProperties": False, "properties": { "local": { "type": "array", "items": { "type": "object", "additionalProperties": False, "properties": { "user": { "type": "object", "properties": { "id": {"type": "string"}, "name": {"type": "string"}, "email": {"type": "string"}, "domain": { "$ref": "#/definitions/domain" }, "type": { "type": "string", "enum": [ UserType.EPHEMERAL, UserType.LOCAL, ], }, }, "additionalProperties": False, }, "projects": PROJECTS_SCHEMA, "group": { "type": "object", "oneOf": [ {"$ref": "#/definitions/group_by_id"}, { "$ref": "#/definitions/group_by_name" }, ], }, "groups": {"type": "string"}, "group_ids": {"type": "string"}, "domain": {"$ref": "#/definitions/domain"}, }, }, }, "remote": { "minItems": 1, "type": "array", "items": { "type": "object", "oneOf": [ {"$ref": "#/definitions/empty"}, {"$ref": "#/definitions/any_one_of"}, {"$ref": "#/definitions/not_any_of"}, {"$ref": "#/definitions/blacklist"}, {"$ref": "#/definitions/whitelist"}, ], }, }, }, }, }, "schema_version": {"name": {"type": "string"}}, }, "definitions": { "empty": { "type": "object", "required": ['type'], "properties": { "type": {"type": "string"}, }, "additionalProperties": False, }, "any_one_of": { "type": "object", "additionalProperties": False, "required": ['type', 'any_one_of'], "properties": { "type": {"type": "string"}, "any_one_of": {"type": "array"}, "regex": {"type": "boolean"}, }, }, "not_any_of": { "type": "object", "additionalProperties": False, "required": ['type', 'not_any_of'], "properties": { "type": {"type": "string"}, "not_any_of": {"type": "array"}, "regex": {"type": "boolean"}, }, }, "blacklist": { "type": "object", "additionalProperties": False, "required": ['type', 'blacklist'], "properties": { "type": {"type": "string"}, "blacklist": {"type": "array"}, "regex": {"type": "boolean"}, }, }, "whitelist": { "type": "object", "additionalProperties": False, "required": ['type', 'whitelist'], "properties": { "type": {"type": "string"}, "whitelist": {"type": "array"}, "regex": {"type": "boolean"}, }, }, "domain": { "type": "object", "properties": { "id": {"type": "string"}, "name": {"type": "string"}, }, "additionalProperties": False, }, "group_by_id": { "type": "object", "properties": {"id": {"type": "string"}}, "additionalProperties": False, "required": ["id"], }, "group_by_name": { "type": "object", "properties": { "name": {"type": "string"}, "domain": {"$ref": "#/definitions/domain"}, }, "additionalProperties": False, "required": ["name", "domain"], }, }, } # `IDP_ATTRIBUTE_MAPPING_SCHEMA_2_0` adds the domain option for projects, # the goal is to work in a similar fashion as `user` and `groups` properties IDP_ATTRIBUTE_MAPPING_SCHEMA_2_0 = copy.deepcopy( IDP_ATTRIBUTE_MAPPING_SCHEMA_1_0 ) PROJECTS_SCHEMA_2_0 = copy.deepcopy(PROJECTS_SCHEMA) PROJECTS_SCHEMA_2_0["items"]["properties"]["domain"] = { # type: ignore[index] "$ref": "#/definitions/domain" } IDP_ATTRIBUTE_MAPPING_SCHEMA_2_0['properties']['rules']['items']['properties'][ # type: ignore[index] 'local' ][ 'items' ][ 'properties' ][ 'projects' ] = PROJECTS_SCHEMA_2_0 def get_default_attribute_mapping_schema_version(): return CONF.federation.attribute_mapping_default_schema_version class DirectMaps: """An abstraction around the remote matches. Each match is treated internally as a list. """ def __init__(self): self._matches = [] def __str__(self): """Return the direct map array as a string.""" return '%s' % self._matches def add(self, values): """Add a matched value to the list of matches. :param list value: the match to save """ self._matches.append(values) def __getitem__(self, idx): """Used by Python when executing ``''.format(*DirectMaps())``.""" value = self._matches[idx] if isinstance(value, list) and len(value) == 1: return value[0] else: return value def validate_mapping_structure(ref): version = ref.get( 'schema_version', get_default_attribute_mapping_schema_version() ) LOG.debug( "Validating mapping [%s] using validator from version [%s].", ref, version, ) v = jsonschema.Draft4Validator( IDP_ATTRIBUTE_MAPPING_SCHEMAS[version]['schema'] ) messages = '' for error in sorted(v.iter_errors(ref), key=str): messages = messages + error.message + "\n" if messages: raise exception.ValidationError(messages) def validate_expiration(token): token_expiration_datetime = timeutils.normalize_time( timeutils.parse_isotime(token.expires_at) ) if timeutils.utcnow() > token_expiration_datetime: raise exception.Unauthorized(_('Federation token is expired')) def get_remote_id_parameter(idp, protocol): # NOTE(marco-fargetta): Since we support any protocol ID, we attempt to # retrieve the remote_id_attribute of the protocol ID. It will look up # first if the remote_id_attribute exists. protocol_ref = PROVIDERS.federation_api.get_protocol(idp['id'], protocol) remote_id_parameter = protocol_ref.get('remote_id_attribute') if remote_id_parameter: return remote_id_parameter else: # If it's not registered in the config, then register the option and # try again. This allows the user to register protocols other than # oidc and saml2. try: remote_id_parameter = CONF[protocol]['remote_id_attribute'] except AttributeError: # TODO(dolph): Move configuration registration to keystone.conf CONF.register_opt( cfg.StrOpt('remote_id_attribute'), group=protocol ) try: remote_id_parameter = CONF[protocol]['remote_id_attribute'] except AttributeError: # nosec # No remote ID attr, will be logged and use the default # instead. pass if not remote_id_parameter: LOG.debug( 'Cannot find "remote_id_attribute" in configuration ' 'group %s. Trying default location in ' 'group federation.', protocol, ) remote_id_parameter = CONF.federation.remote_id_attribute return remote_id_parameter def validate_idp(idp, protocol, assertion): """The IdP providing the assertion should be registered for the mapping.""" remote_id_parameter = get_remote_id_parameter(idp, protocol) if not remote_id_parameter or not idp['remote_ids']: LOG.debug('Impossible to identify the IdP %s ', idp['id']) # If nothing is defined, the administrator may want to # allow the mapping of every IdP return try: idp_remote_identifier = assertion[remote_id_parameter] except KeyError: msg = _('Could not find Identity Provider identifier in environment') raise exception.ValidationError(msg) if idp_remote_identifier not in idp['remote_ids']: msg = _( 'Incoming identity provider identifier not included ' 'among the accepted identifiers.' ) raise exception.Forbidden(msg) def validate_mapped_group_ids(group_ids, mapping_id, identity_api): """Iterate over group ids and make sure they are present in the backend. This call is not transactional. :param group_ids: IDs of the groups to be checked :type group_ids: list of str :param mapping_id: id of the mapping used for this operation :type mapping_id: str :param identity_api: Identity Manager object used for communication with backend :type identity_api: identity.Manager :raises keystone.exception.MappedGroupNotFound: If the group returned by mapping was not found in the backend. """ for group_id in group_ids: try: identity_api.get_group(group_id) except exception.GroupNotFound: raise exception.MappedGroupNotFound( group_id=group_id, mapping_id=mapping_id ) # TODO(marek-denis): Optimize this function, so the number of calls to the # backend are minimized. def transform_to_group_ids( group_names, mapping_id, identity_api, resource_api ): """Transform groups identified by name/domain to their ids. Function accepts list of groups identified by a name and domain giving a list of group ids in return. A message is logged if the group doesn't exist in the backend. Example of group_names parameter:: [ { "name": "group_name", "domain": { "id": "domain_id" }, }, { "name": "group_name_2", "domain": { "name": "domain_name" } } ] :param group_names: list of group identified by name and its domain. :type group_names: list :param mapping_id: id of the mapping used for mapping assertion into local credentials :type mapping_id: str :param identity_api: identity_api object :param resource_api: resource manager object :returns: generator object with group ids """ def resolve_domain(domain): """Return domain id. Input is a dictionary with a domain identified either by a ``id`` or a ``name``. In the latter case system will attempt to fetch domain object from the backend. :returns: domain's id :rtype: str """ domain_id = domain.get('id') or resource_api.get_domain_by_name( domain.get('name') ).get('id') return domain_id for group in group_names: try: group_dict = identity_api.get_group_by_name( group['name'], resolve_domain(group['domain']) ) yield group_dict['id'] except exception.GroupNotFound: LOG.debug('Group %s has no entry in the backend', group['name']) def get_assertion_params_from_env(): LOG.debug('Environment variables: %s', flask.request.environ) prefix = CONF.federation.assertion_prefix for k, v in list(flask.request.environ.items()): if not k.startswith(prefix): continue # These bytes may be decodable as ISO-8859-1 according to Section # 3.2.4 of RFC 7230. Let's assume that our web server plugins are # correctly encoding the data. if not isinstance(v, str) and getattr(v, 'decode', False): v = v.decode('ISO-8859-1') yield (k, v) class RuleProcessor: """A class to process assertions and mapping rules.""" class _EvalType: """Mapping rule evaluation types.""" ANY_ONE_OF = 'any_one_of' NOT_ANY_OF = 'not_any_of' BLACKLIST = 'blacklist' WHITELIST = 'whitelist' def __init__(self, mapping_id, rules): """Initialize RuleProcessor. Example rules can be found at: :class:`keystone.tests.mapping_fixtures` :param mapping_id: id for the mapping :type mapping_id: string :param rules: rules from a mapping :type rules: dict """ self.mapping_id = mapping_id self.rules = rules def process(self, assertion_data): """Transform assertion to a dictionary. The dictionary contains mapping of user name and group ids based on mapping rules. This function will iterate through the mapping rules to find assertions that are valid. :param assertion_data: an assertion containing values from an IdP :type assertion_data: dict Example assertion_data:: { 'Email': 'testacct@example.com', 'UserName': 'testacct', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'Tester' } :returns: dictionary with user and group_ids The expected return structure is:: { 'name': 'foobar', 'group_ids': ['abc123', 'def456'], 'group_names': [ { 'name': 'group_name_1', 'domain': { 'name': 'domain1' } }, { 'name': 'group_name_1_1', 'domain': { 'name': 'domain1' } }, { 'name': 'group_name_2', 'domain': { 'id': 'xyz132' } } ] } """ # Assertions will come in as string key-value pairs, and will use a # semi-colon to indicate multiple values, i.e. groups. # This will create a new dictionary where the values are arrays, and # any multiple values are stored in the arrays. LOG.debug('assertion data: %s', assertion_data) assertion = { n: v.split(';') for n, v in assertion_data.items() if isinstance(v, str) } LOG.debug('assertion: %s', assertion) identity_values = [] LOG.debug('rules: %s', self.rules) for rule in self.rules: direct_maps = self._verify_all_requirements( rule['remote'], assertion ) # If the compare comes back as None, then the rule did not apply # to the assertion data, go on to the next rule if direct_maps is None: continue # If there are no direct mappings, then add the local mapping # directly to the array of saved values. However, if there is # a direct mapping, then perform variable replacement. if not direct_maps: identity_values += rule['local'] else: for local in rule['local']: new_local = self._update_local_mapping(local, direct_maps) identity_values.append(new_local) LOG.debug('identity_values: %s', identity_values) mapped_properties = self._transform(identity_values) LOG.debug('mapped_properties: %s', mapped_properties) return mapped_properties @staticmethod def _ast_literal_eval(value): try: values = ast.literal_eval(value) # We expect a list here, but literal_eval will successfully # parse and return a single value. We raise ValueError if so. if not isinstance(values, list): raise ValueError except (ValueError, SyntaxError): values = [value] return values def _normalize_groups(self, identity_value): # In this case, identity_value['groups'] is a string # representation of a list, and we want a real list. This is # due to the way we do direct mapping substitutions today (see # function _update_local_mapping() ) if 'name' in identity_value['groups']: group_names_list = self._ast_literal_eval(identity_value['groups']) def convert_json(group): if group.startswith('JSON:'): return jsonutils.loads(group.lstrip('JSON:')) return group group_dicts = [convert_json(g) for g in group_names_list] for g in group_dicts: if 'domain' not in g: msg = _( "Invalid rule: %(identity_value)s. Both " "'groups' and 'domain' keywords must be " "specified." ) msg = msg % {'identity_value': identity_value} raise exception.ValidationError(msg) else: if 'domain' not in identity_value: msg = _( "Invalid rule: %(identity_value)s. Both " "'groups' and 'domain' keywords must be " "specified." ) msg = msg % {'identity_value': identity_value} raise exception.ValidationError(msg) group_names_list = self._ast_literal_eval(identity_value['groups']) domain = identity_value['domain'] group_dicts = [ {'name': name, 'domain': domain} for name in group_names_list ] return group_dicts def normalize_user(self, user, default_mapping_domain): """Parse and validate user mapping.""" if user.get('type') is None: user['type'] = UserType.EPHEMERAL if user.get('type') not in (UserType.EPHEMERAL, UserType.LOCAL): msg = _("User type %s not supported") % user.get('type') raise exception.ValidationError(msg) def extract_groups(self, groups_by_domain): for groups in list(groups_by_domain.values()): yield from list({g['name']: g for g in groups}.values()) def _transform(self, identity_values): """Transform local mappings, to an easier to understand format. Transform the incoming array to generate the return value for the process function. Generating content for Keystone tokens will be easier if some pre-processing is done at this level. :param identity_values: local mapping from valid evaluations :type identity_values: array of dict Example identity_values:: [ { 'group': {'id': '0cd5e9'}, 'user': { 'email': 'bob@example.com' }, }, { 'groups': ['member', 'admin', tester'], 'domain': { 'name': 'default_domain' } }, { 'group_ids': ['abc123', 'def456', '0cd5e9'] } ] :returns: dictionary with user name, group_ids and group_names. :rtype: dict """ # initialize the group_ids as a set to eliminate duplicates user = {} group_ids = set() group_names = list() groups_by_domain = dict() projects = [] # if mapping yield no valid identity values, we should bail right away # instead of continuing on with a normalized bogus user if not identity_values: msg = ( "Could not map any federated user properties to identity " "values. Check debug logs or the mapping used for " "additional details." ) tr_msg = _( "Could not map any federated user properties to " "identity values. Check debug logs or the mapping " "used for additional details." ) LOG.warning(msg) raise exception.ValidationError(tr_msg) for identity_value in identity_values: if 'user' in identity_value: # if a mapping outputs more than one user name, log it if user: LOG.warning( 'Ignoring user [%s]', identity_value.get('user') ) else: user = identity_value.get('user') if 'group' in identity_value: group = identity_value['group'] if 'id' in group: group_ids.add(group['id']) elif 'name' in group: groups = self.process_group_by_name( group, groups_by_domain ) group_names.extend(groups) if 'groups' in identity_value: group_dicts = self._normalize_groups(identity_value) group_names.extend(group_dicts) if 'group_ids' in identity_value: # If identity_values['group_ids'] is a string representation # of a list, parse it to a real list. Also, if the provided # group_ids parameter contains only one element, it will be # parsed as a simple string, and not a list or the # representation of a list. group_ids.update( self._ast_literal_eval(identity_value['group_ids']) ) if 'projects' in identity_value: projects = self.extract_projects(identity_value) self.normalize_user(user, identity_value.get('domain')) return { 'user': user, 'group_ids': list(group_ids), 'group_names': group_names, 'projects': projects, } def process_group_by_name(self, group, groups_by_domain): domain = group['domain'].get('name') or group['domain'].get('id') groups_by_domain.setdefault(domain, list()).append(group) return self.extract_groups(groups_by_domain) def extract_projects(self, identity_value): return identity_value.get('projects', []) def _update_local_mapping(self, local, direct_maps): """Replace any {0}, {1} ... values with data from the assertion. :param local: local mapping reference that needs to be updated :type local: dict :param direct_maps: identity values used to update local :type direct_maps: keystone.federation.utils.DirectMaps Example local:: {'user': {'name': '{0} {1}', 'email': '{2}'}} Example direct_maps:: [['Bob'], ['Thompson'], ['bob@example.com']] :returns: new local mapping reference with replaced values. The expected return structure is:: {'user': {'name': 'Bob Thompson', 'email': 'bob@example.org'}} :raises keystone.exception.DirectMappingError: when referring to a remote match from a local section of a rule """ LOG.debug('direct_maps: %s', direct_maps) LOG.debug('local: %s', local) new = {} for k, v in local.items(): if isinstance(v, dict): new_value = self._update_local_mapping(v, direct_maps) elif isinstance(v, list): new_value = [ self._update_local_mapping(item, direct_maps) for item in v ] else: try: new_value = v.format(*direct_maps) except IndexError: raise exception.DirectMappingError( mapping_id=self.mapping_id ) new[k] = new_value return new def _verify_all_requirements(self, requirements, assertion): """Compare remote requirements of a rule against the assertion. If a value of ``None`` is returned, the rule with this assertion doesn't apply. If an array of zero length is returned, then there are no direct mappings to be performed, but the rule is valid. Otherwise, then it will first attempt to filter the values according to blacklist or whitelist rules and finally return the values in order, to be directly mapped. :param requirements: list of remote requirements from rules :type requirements: list Example requirements:: [ { "type": "UserName" }, { "type": "orgPersonType", "any_one_of": [ "Customer" ] }, { "type": "ADFS_GROUPS", "whitelist": [ "g1", "g2", "g3", "g4" ] } ] :param assertion: dict of attributes from an IdP :type assertion: dict Example assertion:: { 'UserName': ['testacct'], 'LastName': ['Account'], 'orgPersonType': ['Tester'], 'Email': ['testacct@example.com'], 'FirstName': ['Test'], 'ADFS_GROUPS': ['g1', 'g2'] } :returns: identity values used to update local :rtype: keystone.federation.utils.DirectMaps or None """ direct_maps = DirectMaps() for requirement in requirements: requirement_type = requirement['type'] direct_map_values = assertion.get(requirement_type) regex = requirement.get('regex', False) if not direct_map_values: return None any_one_values = requirement.get(self._EvalType.ANY_ONE_OF) if any_one_values is not None: if self._evaluate_requirement( any_one_values, direct_map_values, self._EvalType.ANY_ONE_OF, regex, ): continue else: return None not_any_values = requirement.get(self._EvalType.NOT_ANY_OF) if not_any_values is not None: if self._evaluate_requirement( not_any_values, direct_map_values, self._EvalType.NOT_ANY_OF, regex, ): continue else: return None # If 'any_one_of' or 'not_any_of' are not found, then values are # within 'type'. Attempt to find that 'type' within the assertion, # and filter these values if 'whitelist' or 'blacklist' is set. blacklisted_values = requirement.get(self._EvalType.BLACKLIST) whitelisted_values = requirement.get(self._EvalType.WHITELIST) # If a blacklist or whitelist is used, we want to map to the # whole list instead of just its values separately. if blacklisted_values is not None: direct_map_values = self._evaluate_requirement( blacklisted_values, direct_map_values, self._EvalType.BLACKLIST, regex, ) elif whitelisted_values is not None: direct_map_values = self._evaluate_requirement( whitelisted_values, direct_map_values, self._EvalType.WHITELIST, regex, ) direct_maps.add(direct_map_values) LOG.debug('updating a direct mapping: %s', direct_map_values) return direct_maps def _evaluate_values_by_regex(self, values, assertion_values): return [ assertion for assertion in assertion_values if any([re.search(regex, assertion) for regex in values]) ] def _evaluate_requirement( self, values, assertion_values, eval_type, regex ): """Evaluate the incoming requirement and assertion. Filter the incoming assertions against the requirement values. If regex is specified, the assertion list is filtered by checking if any of the requirement regexes matches. Otherwise, the list is filtered by string equality with any of the allowed values. Once the assertion values are filtered, the output is determined by the evaluation type: any_one_of: return True if there are any matches, False otherwise not_any_of: return True if there are no matches, False otherwise blacklist: return the incoming values minus any matches whitelist: return only the matched values :param values: list of allowed values, defined in the requirement :type values: list :param assertion_values: The values from the assertion to evaluate :type assertion_values: list/string :param eval_type: determine how to evaluate requirements :type eval_type: string :param regex: perform evaluation with regex :type regex: boolean :returns: list of filtered assertion values (if evaluation type is 'blacklist' or 'whitelist'), or boolean indicating if the assertion values fulfill the requirement (if evaluation type is 'any_one_of' or 'not_any_of') """ if regex: matches = self._evaluate_values_by_regex(values, assertion_values) else: matches = set(values).intersection(set(assertion_values)) if eval_type == self._EvalType.ANY_ONE_OF: return bool(matches) elif eval_type == self._EvalType.NOT_ANY_OF: return not bool(matches) elif eval_type == self._EvalType.BLACKLIST: return list(set(assertion_values).difference(set(matches))) elif eval_type == self._EvalType.WHITELIST: return list(matches) else: raise exception.UnexpectedError( _('Unexpected evaluation type "%(eval_type)s"') % {'eval_type': eval_type} ) def assert_enabled_identity_provider(federation_api, idp_id): identity_provider = federation_api.get_idp(idp_id) if identity_provider.get('enabled') is not True: msg = f'Identity Provider {idp_id} is disabled' tr_msg = _('Identity Provider %(idp)s is disabled') % {'idp': idp_id} LOG.debug(msg) raise exception.Forbidden(tr_msg) def assert_enabled_service_provider_object(service_provider): if service_provider.get('enabled') is not True: sp_id = service_provider['id'] msg = f'Service Provider {sp_id} is disabled' tr_msg = _('Service Provider %(sp)s is disabled') % {'sp': sp_id} LOG.debug(msg) raise exception.Forbidden(tr_msg) class RuleProcessorToHonorDomainOption(RuleProcessor): """Handles the default domain configured in the attribute mapping. This rule processor is designed to handle the `domain` attribute configured at the root of the attribute mapping. When this attribute is configured, we should take it as the default one for the attribute mapping, instead of the domain of the IdP. Moreover, we should respect the override to it that can take place at the `groups`, `user`, and `projects` attributes definition. """ def __init__(self, mapping_id, rules): super().__init__(mapping_id, rules) def extract_projects(self, identity_value): projects = identity_value.get("projects", []) default_mapping_domain = identity_value.get("domain") for project in projects: if not project.get("domain"): LOG.debug( "Configuring the domain [%s] for project [%s].", default_mapping_domain, project, ) project["domain"] = default_mapping_domain return projects def normalize_user(self, user, default_mapping_domain): super().normalize_user(user, default_mapping_domain) if not user.get("domain"): LOG.debug( "Configuring the domain [%s] for user [%s].", default_mapping_domain, user, ) user["domain"] = default_mapping_domain else: LOG.debug( "The user [%s] was configured with a domain. " "Therefore, we do not need to define.", user, ) IDP_ATTRIBUTE_MAPPING_SCHEMAS = { "1.0": { "schema": IDP_ATTRIBUTE_MAPPING_SCHEMA_1_0, "processor": RuleProcessor, }, "2.0": { "schema": IDP_ATTRIBUTE_MAPPING_SCHEMA_2_0, "processor": RuleProcessorToHonorDomainOption, }, } def create_attribute_mapping_rules_processor(mapping): version = mapping.get( 'schema_version', get_default_attribute_mapping_schema_version() ) return IDP_ATTRIBUTE_MAPPING_SCHEMAS[version]['processor']( mapping['id'], mapping['rules'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/i18n.py0000664000175000017500000000155500000000000016463 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html . """ import oslo_i18n oslo_i18n.enable_lazy() _translators = oslo_i18n.TranslatorFactory(domain='keystone') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/identity/0000775000175000017500000000000000000000000017155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/__init__.py0000664000175000017500000000124700000000000021272 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.identity.core import * # noqa from keystone.identity import generator # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/identity/backends/0000775000175000017500000000000000000000000020727 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/__init__.py0000664000175000017500000000000000000000000023026 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/base.py0000664000175000017500000003457300000000000022227 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import keystone.conf from keystone import exception CONF = keystone.conf.CONF def filter_user(user_ref): """Filter out private items in a user dict. 'password', 'tenants' and 'groups' are never returned. :returns: user_ref """ if user_ref: user_ref = user_ref.copy() user_ref.pop('password', None) user_ref.pop('tenants', None) user_ref.pop('groups', None) user_ref.pop('domains', None) try: user_ref['extra'].pop('password', None) user_ref['extra'].pop('tenants', None) except KeyError: # nosec # ok to not have extra in the user_ref. pass if 'password_expires_at' not in user_ref: user_ref['password_expires_at'] = None return user_ref class IdentityDriverBase(metaclass=abc.ABCMeta): """Interface description for an Identity driver. The schema for users and groups is different depending on whether the driver is domain aware or not (as returned by self.is_domain_aware()). If the driver is not domain aware: * domain_id will be not be included in the user / group passed in to create_user / create_group * the domain_id should not be returned in user / group refs. They'll be overwritten. The password_expires_at in the user schema is a read-only attribute, meaning that it is expected in the response, but not in the request. User schema (if driver is domain aware):: type: object properties: id: type: string name: type: string domain_id: type: string password: type: string password_expires_at: type: datetime enabled: type: boolean default_project_id: type: string required: [id, name, domain_id, enabled] additionalProperties: True User schema (if driver is not domain aware):: type: object properties: id: type: string name: type: string password: type: string password_expires_at: type: datetime enabled: type: boolean default_project_id: type: string required: [id, name, enabled] additionalProperties: True # Note that domain_id is not allowed as a property Group schema (if driver is domain aware):: type: object properties: id: type: string name: type: string domain_id: type: string description: type: string required: [id, name, domain_id] additionalProperties: True Group schema (if driver is not domain aware):: type: object properties: id: type: string name: type: string description: type: string required: [id, name] additionalProperties: True # Note that domain_id is not allowed as a property """ # @classmethod # def register_opts(cls, conf): # """Register driver specific configuration options. # For domain configuration being stored in the database it is necessary # for the driver to register configuration options. This method is # optional and if it is not present no options are registered. # """ # pass def _get_conf(self): try: return self.conf or CONF except AttributeError: return CONF def _get_list_limit(self): conf = self._get_conf() # use list_limit from domain-specific config. If list_limit in # domain-specific config is not set, look it up in the default config return ( conf.identity.list_limit or conf.list_limit or CONF.identity.list_limit or CONF.list_limit ) def is_domain_aware(self): """Indicate if the driver supports domains.""" return True @property def is_sql(self): """Indicate if this Driver uses SQL.""" return False @property def multiple_domains_supported(self): return ( self.is_domain_aware() or CONF.identity.domain_specific_drivers_enabled ) def generates_uuids(self): """Indicate if Driver generates UUIDs as the local entity ID.""" return True @abc.abstractmethod def authenticate(self, user_id, password): """Authenticate a given user and password. :param str user_id: User ID :param str password: Password :returns: user. See user schema in :class:`~.IdentityDriverBase`. :rtype: dict :raises AssertionError: If user or password is invalid. """ raise exception.NotImplemented() # pragma: no cover # user crud @abc.abstractmethod def create_user(self, user_id, user): """Create a new user. :param str user_id: user ID. The driver can ignore this value. :param dict user: user info. See user schema in :class:`~.IdentityDriverBase`. :returns: user, matching the user schema. The driver should not return the password. :rtype: dict :raises keystone.exception.Conflict: If a duplicate user exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_users(self, hints): """List users in the system. :param hints: filter hints which the driver should implement if at all possible. :type hints: keystone.common.driver_hints.Hints :returns: a list of users or an empty list. See user schema in :class:`~.IdentityDriverBase`. :rtype: list of dict """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def unset_default_project_id(self, project_id): """Unset a user's default project given a specific project ID. :param str project_id: project ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_users_in_group(self, group_id, hints): """List users in a group. :param str group_id: the group in question :param hints: filter hints which the driver should implement if at all possible. :type hints: keystone.common.driver_hints.Hints :returns: a list of users or an empty list. See user schema in :class:`~.IdentityDriverBase`. :rtype: list of dict :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_user(self, user_id): """Get a user by ID. :param str user_id: User ID. :returns: user. See user schema in :class:`~.IdentityDriverBase`. :rtype: dict :raises keystone.exception.UserNotFound: If the user doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_user(self, user_id, user): """Update an existing user. :param str user_id: User ID. :param dict user: User modification. See user schema in :class:`~.IdentityDriverBase`. Properties set to None will be removed. Required properties cannot be removed. :returns: user. See user schema in :class:`~.IdentityDriverBase`. :raises keystone.exception.UserNotFound: If the user doesn't exist. :raises keystone.exception.Conflict: If a duplicate user exists in the same domain. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def change_password(self, user_id, new_password): """Self-service password change. :param str user_id: User ID. :param str new_password: New password. :raises keystone.exception.UserNotFound: If the user doesn't exist. :raises keystone.exception.PasswordValidation: If password fails validation """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_user_to_group(self, user_id, group_id): """Add a user to a group. :param str user_id: User ID. :param str group_id: Group ID. :raises keystone.exception.UserNotFound: If the user doesn't exist. :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def check_user_in_group(self, user_id, group_id): """Check if a user is a member of a group. :param str user_id: User ID. :param str group_id: Group ID. :raises keystone.exception.NotFound: If the user is not a member of the group. :raises keystone.exception.UserNotFound: If the user doesn't exist. :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def remove_user_from_group(self, user_id, group_id): """Remove a user from a group. :param str user_id: User ID. :param str group_id: Group ID. :raises keystone.exception.NotFound: If the user is not in the group. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_user(self, user_id): """Delete an existing user. :raises keystone.exception.UserNotFound: If the user doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_user_by_name(self, user_name, domain_id): """Get a user by name. :returns: user_ref :raises keystone.exception.UserNotFound: If the user doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def reset_last_active(self): """Resets null last_active_at values. This method looks for all users in the database that have a null value for last_updated_at and resets that value to the current time. """ raise exception.NotImplemented() # pragma: no cover # group crud @abc.abstractmethod def create_group(self, group_id, group): """Create a new group. :param str group_id: group ID. The driver can ignore this value. :param dict group: group info. See group schema in :class:`~.IdentityDriverBase`. :returns: group, matching the group schema. :rtype: dict :raises keystone.exception.Conflict: If a duplicate group exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_groups(self, hints): """List groups in the system. :param hints: filter hints which the driver should implement if at all possible. :type hints: keystone.common.driver_hints.Hints :returns: a list of group_refs or an empty list. See group schema in :class:`~.IdentityDriverBase`. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_groups_for_user(self, user_id, hints): """List groups a user is in. :param str user_id: the user in question :param hints: filter hints which the driver should implement if at all possible. :type hints: keystone.common.driver_hints.Hints :returns: a list of group_refs or an empty list. See group schema in :class:`~.IdentityDriverBase`. :raises keystone.exception.UserNotFound: If the user doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_group(self, group_id): """Get a group by ID. :param str group_id: group ID. :returns: group info. See group schema in :class:`~.IdentityDriverBase` :rtype: dict :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_group_by_name(self, group_name, domain_id): """Get a group by name. :param str group_name: group name. :param str domain_id: domain ID. :returns: group info. See group schema in :class:`~.IdentityDriverBase`. :rtype: dict :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_group(self, group_id, group): """Update an existing group. :param str group_id: Group ID. :param dict group: Group modification. See group schema in :class:`~.IdentityDriverBase`. Required properties cannot be removed. :returns: group, matching the group schema. :rtype: dict :raises keystone.exception.GroupNotFound: If the group doesn't exist. :raises keystone.exception.Conflict: If a duplicate group exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_group(self, group_id): """Delete an existing group. :param str group_id: Group ID. :raises keystone.exception.GroupNotFound: If the group doesn't exist. """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/identity/backends/ldap/0000775000175000017500000000000000000000000021647 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/ldap/__init__.py0000664000175000017500000000113500000000000023760 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.identity.backends.ldap.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/ldap/common.py0000664000175000017500000023647700000000000023534 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import codecs import os.path import random import re import sys import typing as ty import uuid import weakref import ldap.controls import ldap.filter import ldappool from oslo_log import log from oslo_utils import reflection from keystone.common import driver_hints from keystone import exception from keystone.i18n import _ from keystone.identity.backends.ldap import models LOG = log.getLogger(__name__) LDAP_VALUES = {'TRUE': True, 'FALSE': False} LDAP_SCOPES = {'one': ldap.SCOPE_ONELEVEL, 'sub': ldap.SCOPE_SUBTREE} LDAP_DEREF = { 'always': ldap.DEREF_ALWAYS, 'default': None, 'finding': ldap.DEREF_FINDING, 'never': ldap.DEREF_NEVER, 'searching': ldap.DEREF_SEARCHING, } LDAP_TLS_CERTS = { 'never': ldap.OPT_X_TLS_NEVER, 'demand': ldap.OPT_X_TLS_DEMAND, 'allow': ldap.OPT_X_TLS_ALLOW, } # RFC 4511 (The LDAP Protocol) defines a list containing only the OID '1.1' to # indicate that no attributes should be returned besides the DN. DN_ONLY = ['1.1'] _utf8_encoder = codecs.getencoder('utf-8') # FIXME(knikolla): This enables writing to the LDAP backend # Only enabled during tests and unsupported WRITABLE = False def utf8_encode(value): """Encode a basestring to UTF-8. If the string is unicode encode it to UTF-8, if the string is str then assume it's already encoded. Otherwise raise a TypeError. :param value: A basestring :returns: UTF-8 encoded version of value :raises TypeError: If value is not basestring """ if isinstance(value, str): return _utf8_encoder(value)[0] elif isinstance(value, bytes): return value else: value_cls_name = reflection.get_class_name( value, fully_qualified=False ) raise TypeError("value must be basestring, not %s" % value_cls_name) _utf8_decoder = codecs.getdecoder('utf-8') def utf8_decode(value): """Decode a from UTF-8 into unicode. If the value is a binary string assume it's UTF-8 encoded and decode it into a unicode string. Otherwise convert the value from its type into a unicode string. :param value: value to be returned as unicode :returns: value as unicode :raises UnicodeDecodeError: for invalid UTF-8 encoding """ if isinstance(value, bytes): try: return _utf8_decoder(value)[0] except UnicodeDecodeError: # NOTE(lbragstad): We could be dealing with a UUID in byte form, # which some LDAP implementations use. uuid_byte_string_length = 16 if len(value) == uuid_byte_string_length: return str(uuid.UUID(bytes_le=value)) else: raise return str(value) def py2ldap(val): """Type convert a Python value to a type accepted by LDAP (unicode). The LDAP API only accepts strings for values therefore convert the value's type to a unicode string. A subsequent type conversion will encode the unicode as UTF-8 as required by the python-ldap API, but for now we just want a string representation of the value. :param val: The value to convert to a LDAP string representation :returns: unicode string representation of value. """ if isinstance(val, bool): return 'TRUE' if val else 'FALSE' else: return str(val) def enabled2py(val): """Similar to ldap2py, only useful for the enabled attribute.""" try: return LDAP_VALUES[val] except KeyError: # nosec # It wasn't a boolean value, will try as an int instead. pass try: return int(val) except ValueError: # nosec # It wasn't an int either, will try as utf8 instead. pass return utf8_decode(val) def ldap2py(val): """Convert an LDAP formatted value to Python type used by OpenStack. Virtually all LDAP values are stored as UTF-8 encoded strings. OpenStack prefers values which are unicode friendly. :param val: LDAP formatted value :returns: val converted to preferred Python type """ return utf8_decode(val) def convert_ldap_result(ldap_result): """Convert LDAP search result to Python types used by OpenStack. Each result tuple is of the form (dn, attrs), where dn is a string containing the DN (distinguished name) of the entry, and attrs is a dictionary containing the attributes associated with the entry. The keys of attrs are strings, and the associated values are lists of strings. OpenStack wants to use Python types of its choosing. Strings will be unicode, truth values boolean, whole numbers int's, etc. DN's are represented as text in python-ldap by default for Python 3 and when bytes_mode=False for Python 2, and therefore do not require decoding. :param ldap_result: LDAP search result :returns: list of 2-tuples containing (dn, attrs) where dn is unicode and attrs is a dict whose values are type converted to OpenStack preferred types. """ py_result = [] at_least_one_referral = False for dn, attrs in ldap_result: ldap_attrs = {} if dn is None: # this is a Referral object, rather than an Entry object at_least_one_referral = True continue for kind, values in attrs.items(): try: val2py = enabled2py if kind == 'enabled' else ldap2py ldap_attrs[kind] = [val2py(x) for x in values] except UnicodeDecodeError: LOG.debug('Unable to decode value for attribute %s', kind) py_result.append((dn, ldap_attrs)) if at_least_one_referral: LOG.debug( 'Referrals were returned and ignored. Enable referral ' 'chasing in keystone.conf via [ldap] chase_referrals' ) return py_result def safe_iter(attrs): if attrs is None: return elif isinstance(attrs, list): yield from attrs else: yield attrs def parse_deref(opt): try: return LDAP_DEREF[opt] except KeyError: raise ValueError( _( 'Invalid LDAP deref option: %(option)s. ' 'Choose one of: %(options)s' ) % { 'option': opt, 'options': ', '.join(LDAP_DEREF.keys()), } ) def parse_tls_cert(opt): try: return LDAP_TLS_CERTS[opt] except KeyError: raise ValueError( _( 'Invalid LDAP TLS certs option: %(option)s. ' 'Choose one of: %(options)s' ) % {'option': opt, 'options': ', '.join(LDAP_TLS_CERTS.keys())} ) def ldap_scope(scope): try: return LDAP_SCOPES[scope] except KeyError: raise ValueError( _('Invalid LDAP scope: %(scope)s. Choose one of: %(options)s') % {'scope': scope, 'options': ', '.join(LDAP_SCOPES.keys())} ) def prep_case_insensitive(value): """Prepare a string for case-insensitive comparison. This is defined in RFC4518. For simplicity, all this function does is lowercase all the characters, strip leading and trailing whitespace, and compress sequences of spaces to a single space. """ value = re.sub(r'\s+', ' ', value.strip().lower()) return value def is_ava_value_equal(attribute_type, val1, val2): """Return True if and only if the AVAs are equal. When comparing AVAs, the equality matching rule for the attribute type should be taken into consideration. For simplicity, this implementation does a case-insensitive comparison. Note that this function uses prep_case_insenstive so the limitations of that function apply here. """ return prep_case_insensitive(val1) == prep_case_insensitive(val2) def is_rdn_equal(rdn1, rdn2): """Return True if and only if the RDNs are equal. * RDNs must have the same number of AVAs. * Each AVA of the RDNs must be the equal for the same attribute type. The order isn't significant. Note that an attribute type will only be in one AVA in an RDN, otherwise the DN wouldn't be valid. * Attribute types aren't case sensitive. Note that attribute type comparison is more complicated than implemented. This function only compares case-insentive. The code should handle multiple names for an attribute type (e.g., cn, commonName, and 2.5.4.3 are the same). Note that this function uses is_ava_value_equal to compare AVAs so the limitations of that function apply here. """ if len(rdn1) != len(rdn2): return False for attr_type_1, val1, dummy in rdn1: found = False for attr_type_2, val2, dummy in rdn2: if attr_type_1.lower() != attr_type_2.lower(): continue found = True if not is_ava_value_equal(attr_type_1, val1, val2): return False break if not found: return False return True def is_dn_equal(dn1, dn2): """Return True if and only if the DNs are equal. Two DNs are equal if they've got the same number of RDNs and if the RDNs are the same at each position. See RFC4517. Note that this function uses is_rdn_equal to compare RDNs so the limitations of that function apply here. :param dn1: Either a string DN or a DN parsed by ldap.dn.str2dn. :param dn2: Either a string DN or a DN parsed by ldap.dn.str2dn. """ if not isinstance(dn1, list): dn1 = ldap.dn.str2dn(dn1) if not isinstance(dn2, list): dn2 = ldap.dn.str2dn(dn2) if len(dn1) != len(dn2): return False for rdn1, rdn2 in zip(dn1, dn2): if not is_rdn_equal(rdn1, rdn2): return False return True def dn_startswith(descendant_dn, dn): """Return True if and only if the descendant_dn is under the dn. :param descendant_dn: Either a string DN or a DN parsed by ldap.dn.str2dn. :param dn: Either a string DN or a DN parsed by ldap.dn.str2dn. """ if not isinstance(descendant_dn, list): descendant_dn = ldap.dn.str2dn(descendant_dn) if not isinstance(dn, list): dn = ldap.dn.str2dn(dn) if len(descendant_dn) <= len(dn): return False # Use the last len(dn) RDNs. return is_dn_equal(descendant_dn[-len(dn) :], dn) class LDAPHandler(metaclass=abc.ABCMeta): """Abstract class which defines methods for a LDAP API provider. Native Keystone values cannot be passed directly into and from the python-ldap API. Type conversion must occur at the LDAP API boundary, examples of type conversions are: * booleans map to the strings 'TRUE' and 'FALSE' * integer values map to their string representation. * unicode strings are encoded in UTF-8 Note, in python-ldap some fields (DNs, RDNs, attribute names, queries) are represented as text (str on Python 3, unicode on Python 2 when bytes_mode=False). For more details see: http://www.python-ldap.org/en/latest/bytes_mode.html#bytes-mode In addition to handling type conversions at the API boundary we have the requirement to support more than one LDAP API provider. Currently we have: * python-ldap, this is the standard LDAP API for Python, it requires access to a live LDAP server. * Fake LDAP which emulates python-ldap. This is used for testing without requiring a live LDAP server. To support these requirements we need a layer that performs type conversions and then calls another LDAP API which is configurable (e.g. either python-ldap or the fake emulation). We have an additional constraint at the time of this writing due to limitations in the logging module. The logging module is not capable of accepting UTF-8 encoded strings, it will throw an encoding exception. Therefore all logging MUST be performed prior to UTF-8 conversion. This means no logging can be performed in the ldap APIs that implement the python-ldap API because those APIs are defined to accept only UTF-8 strings. Thus the layer which performs type conversions must also do the logging. We do the type conversions in two steps, once to convert all Python types to unicode strings, then log, then convert the unicode strings to UTF-8. There are a variety of ways one could accomplish this, we elect to use a chaining technique whereby instances of this class simply call the next member in the chain via the "conn" attribute. The chain is constructed by passing in an existing instance of this class as the conn attribute when the class is instantiated. Here is a brief explanation of why other possible approaches were not used: subclassing To perform the wrapping operations in the correct order the type conversion class would have to subclass each of the API providers. This is awkward, doubles the number of classes, and does not scale well. It requires the type conversion class to be aware of all possible API providers. decorators Decorators provide an elegant solution to wrap methods and would be an ideal way to perform type conversions before calling the wrapped function and then converting the values returned from the wrapped function. However decorators need to be aware of the method signature, it has to know what input parameters need conversion and how to convert the result. For an API like python-ldap which has a large number of different method signatures it would require a large number of specialized decorators. Experience has shown it's very easy to apply the wrong decorator due to the inherent complexity and tendency to cut-n-paste code. Another option is to parameterize the decorator to make it "smart". Experience has shown such decorators become insanely complicated and difficult to understand and debug. Also decorators tend to hide what's really going on when a method is called, the operations being performed are not visible when looking at the implemation of a decorated method, this too experience has shown leads to mistakes. Chaining simplifies both wrapping to perform type conversion as well as the substitution of alternative API providers. One simply creates a new instance of the API interface and insert it at the front of the chain. Type conversions are explicit and obvious. If a new method needs to be added to the API interface one adds it to the abstract class definition. Should one miss adding the new method to any derivations of the abstract class the code will fail to load and run making it impossible to forget updating all the derived classes. """ def __init__(self, conn=None): self.conn = conn @abc.abstractmethod def connect( self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None, debug_level=None, conn_timeout=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None, ): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def set_option(self, option, invalue): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_option(self, option): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def simple_bind_s( self, who='', cred='', serverctrls=None, clientctrls=None ): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def unbind_s(self): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def add_s(self, dn, modlist): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def search_s( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, ): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def search_ext( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0, ): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def result3( self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None ): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def modify_s(self, dn, modlist): raise exception.NotImplemented() # pragma: no cover class PythonLDAPHandler(LDAPHandler): """LDAPHandler implementation which calls the python-ldap API. Note, the python-ldap API requires all string attribute values to be UTF-8 encoded. Note, in python-ldap some fields (DNs, RDNs, attribute names, queries) are represented as text (str on Python 3, unicode on Python 2 when bytes_mode=False). For more details see: http://www.python-ldap.org/en/latest/bytes_mode.html#bytes-mode The KeystoneLDAPHandler enforces this prior to invoking the methods in this class. """ def connect( self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None, debug_level=None, conn_timeout=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None, ): _common_ldap_initialization( url=url, use_tls=use_tls, tls_cacertfile=tls_cacertfile, tls_cacertdir=tls_cacertdir, tls_req_cert=tls_req_cert, debug_level=debug_level, timeout=conn_timeout, ) self.conn = ldap.initialize(url) self.conn.protocol_version = ldap.VERSION3 if alias_dereferencing is not None: self.conn.set_option(ldap.OPT_DEREF, alias_dereferencing) self.page_size = page_size if use_tls: self.conn.start_tls_s() if chase_referrals is not None: self.conn.set_option(ldap.OPT_REFERRALS, int(chase_referrals)) def set_option(self, option, invalue): return self.conn.set_option(option, invalue) def get_option(self, option): return self.conn.get_option(option) def simple_bind_s( self, who='', cred='', serverctrls=None, clientctrls=None ): return self.conn.simple_bind_s(who, cred, serverctrls, clientctrls) def unbind_s(self): return self.conn.unbind_s() def add_s(self, dn, modlist): return self.conn.add_s(dn, modlist) def search_s( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, ): return self.conn.search_s(base, scope, filterstr, attrlist, attrsonly) def search_ext( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0, ): return self.conn.search_ext( base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit, ) def result3( self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None ): # The resp_ctrl_classes parameter is a recent addition to the # API. It defaults to None. We do not anticipate using it. # To run with older versions of python-ldap we do not pass it. return self.conn.result3(msgid, all, timeout) def modify_s(self, dn, modlist): return self.conn.modify_s(dn, modlist) def _common_ldap_initialization( url, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert=None, debug_level=None, timeout=None, ): """LDAP initialization for PythonLDAPHandler and PooledLDAPHandler.""" LOG.debug('LDAP init: url=%s', url) LOG.debug( 'LDAP init: use_tls=%s tls_cacertfile=%s tls_cacertdir=%s ' 'tls_req_cert=%s tls_avail=%s', use_tls, tls_cacertfile, tls_cacertdir, tls_req_cert, ldap.TLS_AVAIL, ) if debug_level is not None: ldap.set_option(ldap.OPT_DEBUG_LEVEL, debug_level) using_ldaps = url.lower().startswith("ldaps") if timeout is not None and timeout > 0: # set network connection timeout ldap.set_option(ldap.OPT_NETWORK_TIMEOUT, timeout) if use_tls and using_ldaps: raise AssertionError(_('Invalid TLS / LDAPS combination')) # The certificate trust options apply for both LDAPS and TLS. if use_tls or using_ldaps: if not ldap.TLS_AVAIL: raise ValueError( _('Invalid LDAP TLS_AVAIL option: %s. TLS not available') % ldap.TLS_AVAIL ) if not tls_cacertfile and not tls_cacertdir: raise ValueError( _( 'You need to set tls_cacertfile or ' 'tls_cacertdir if use_tls is true or ' 'url uses ldaps: scheme.' ) ) if tls_cacertfile: # NOTE(topol) # python ldap TLS does not verify CACERTFILE or CACERTDIR # so we add some extra simple sanity check verification # Also, setting these values globally (i.e. on the ldap object) # works but these values are ignored when setting them on the # connection if not os.path.isfile(tls_cacertfile): raise OSError( _("tls_cacertfile %s not found or is not a file") % tls_cacertfile ) ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile) elif tls_cacertdir: # NOTE(topol) # python ldap TLS does not verify CACERTFILE or CACERTDIR # so we add some extra simple sanity check verification # Also, setting these values globally (i.e. on the ldap object) # works but these values are ignored when setting them on the # connection if not os.path.isdir(tls_cacertdir): raise OSError( _("tls_cacertdir %s not found or is not a directory") % tls_cacertdir ) ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir) if tls_req_cert in list(LDAP_TLS_CERTS.values()): ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert) else: LOG.debug( 'LDAP TLS: invalid TLS_REQUIRE_CERT Option=%s', tls_req_cert ) class AsynchronousMessage: """A container for handling asynchronous LDAP responses. Some LDAP APIs, like `search_ext`, are asynchronous and return a message ID when the server successfully initiates the operation. Clients can use this message ID and the original connection to make the request to fetch the results using `result3`. This object holds the message ID, the original connection, and a callable weak reference Finalizer that cleans up context managers specific to the connection associated to the message ID. :param message_id: The message identifier (str). :param connection: The connection associated with the message identifier (ldappool.StateConnector). The `clean` attribute is a callable that cleans up the context manager used to create or return the connection object (weakref.finalize). """ def __init__(self, message_id, connection, context_manager): self.id = message_id self.connection = connection self.clean = weakref.finalize( self, self._cleanup_connection_context_manager, context_manager ) def _cleanup_connection_context_manager(self, context_manager): context_manager.__exit__(None, None, None) def use_conn_pool(func): """Use this only for connection pool specific ldap API. This adds connection object to decorated API as next argument after self. """ def wrapper(self, *args, **kwargs): # assert isinstance(self, PooledLDAPHandler) with self._get_pool_connection() as conn: self._apply_options(conn) return func(self, conn, *args, **kwargs) return wrapper class PooledLDAPHandler(LDAPHandler): """LDAPHandler implementation which uses pooled connection manager. Pool specific configuration is defined in [ldap] section. All other LDAP configuration is still used from [ldap] section Keystone LDAP authentication logic authenticates an end user using its DN and password via LDAP bind to establish supplied password is correct. This can fill up the pool quickly (as pool re-uses existing connection based on its bind data) and would not leave space in pool for connection re-use for other LDAP operations. Now a separate pool can be established for those requests when related flag 'use_auth_pool' is enabled. That pool can have its own size and connection lifetime. Other pool attributes are shared between those pools. If 'use_pool' is disabled, then 'use_auth_pool' does not matter. If 'use_auth_pool' is not enabled, then connection pooling is not used for those LDAP operations. Note, the python-ldap API requires all string attribute values to be UTF-8 encoded. The KeystoneLDAPHandler enforces this prior to invoking the methods in this class. Note, in python-ldap some fields (DNs, RDNs, attribute names, queries) are represented as text (str on Python 3, unicode on Python 2 when bytes_mode=False). For more details see: http://www.python-ldap.org/en/latest/bytes_mode.html#bytes-mode """ # Added here to allow override for testing Connector = ldappool.StateConnector auth_pool_prefix = 'auth_pool_' connection_pools: dict = {} # static connector pool dict def __init__(self, conn=None, use_auth_pool=False): super().__init__(conn=conn) self.who = '' self.cred = '' self.conn_options = {} # connection specific options self.page_size = None self.use_auth_pool = use_auth_pool self.conn_pool = None def connect( self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None, debug_level=None, conn_timeout=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None, ): _common_ldap_initialization( url=url, use_tls=use_tls, tls_cacertfile=tls_cacertfile, tls_cacertdir=tls_cacertdir, tls_req_cert=tls_req_cert, debug_level=debug_level, timeout=pool_conn_timeout, ) self.page_size = page_size # Following two options are not added in common initialization as they # need to follow a sequence in PythonLDAPHandler code. if alias_dereferencing is not None: self.set_option(ldap.OPT_DEREF, alias_dereferencing) if chase_referrals is not None: self.set_option(ldap.OPT_REFERRALS, int(chase_referrals)) if self.use_auth_pool: # separate pool when use_auth_pool enabled pool_url = self.auth_pool_prefix + url else: pool_url = url try: self.conn_pool = self.connection_pools[pool_url] except KeyError: self.conn_pool = ldappool.ConnectionManager( url, size=pool_size, retry_max=pool_retry_max, retry_delay=pool_retry_delay, timeout=pool_conn_timeout, connector_cls=self.Connector, use_tls=use_tls, max_lifetime=pool_conn_lifetime, ) self.connection_pools[pool_url] = self.conn_pool def set_option(self, option, invalue): self.conn_options[option] = invalue def get_option(self, option): value = self.conn_options.get(option) # if option was not specified explicitly, then use connection default # value for that option if there. if value is None: with self._get_pool_connection() as conn: value = conn.get_option(option) return value def _apply_options(self, conn): # if connection has a lifetime, then it already has options specified if conn.get_lifetime() > 30: return for option, invalue in self.conn_options.items(): conn.set_option(option, invalue) def _get_pool_connection(self): return self.conn_pool.connection(self.who, self.cred) def simple_bind_s( self, who='', cred='', serverctrls=None, clientctrls=None ): # Not using use_conn_pool decorator here as this API takes cred as # input. self.who = who self.cred = cred with self._get_pool_connection() as conn: self._apply_options(conn) def unbind_s(self): # After connection generator is done `with` statement execution block # connection is always released via finally block in ldappool. # So this unbind is a no op. pass @use_conn_pool def add_s(self, conn, dn, modlist): return conn.add_s(dn, modlist) @use_conn_pool def search_s( self, conn, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, ): return conn.search_s(base, scope, filterstr, attrlist, attrsonly) def search_ext( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0, ): """Return an AsynchronousMessage instance, it asynchronous API. The AsynchronousMessage instance can be safely used in a call to `result3()`. To work with `result3()` API in predictable manner, the same LDAP connection is needed which originally provided the `msgid`. So, this method wraps the existing connection and `msgid` in a new `AsynchronousMessage` instance. The connection associated with `search_ext()` is released after `result3()` fetches the data associated with `msgid`. """ conn_ctxt = self._get_pool_connection() conn = conn_ctxt.__enter__() try: msgid = conn.search_ext( base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit, ) except Exception: conn_ctxt.__exit__(*sys.exc_info()) raise return AsynchronousMessage(msgid, conn, conn_ctxt) def result3(self, message, all=1, timeout=None, resp_ctrl_classes=None): """Wait for and return the result to an asynchronous message. This method returns the result of an operation previously initiated by one of the LDAP asynchronous operation routines (e.g., `search_ext()`). The `search_ext()` method in python-ldap returns an invocation identifier, or a message ID, upon successful initiation of the operation by the LDAP server. The `message` is expected to be instance of class `AsynchronousMessage`, which contains the message ID and the connection used to make the original request. The connection and context manager associated with `search_ext()` are cleaned up when message.clean() is called. """ # message.connection.result3 might throw an exception # so the code must ensure that message.clean() is invoked # regardless of the result3's result. Otherwise, the # connection will be marked as active forever, which # ultimately renders the pool unusable, causing a DoS. try: results = message.connection.result3(message.id, all, timeout) finally: # Now that we have the results from the LDAP server for # the message, we don't need the the context manager used # to create the connection. message.clean() return results @use_conn_pool def modify_s(self, conn, dn, modlist): return conn.modify_s(dn, modlist) class KeystoneLDAPHandler(LDAPHandler): """Convert data types and perform logging. This LDAP interface wraps the python-ldap based interfaces. The python-ldap interfaces require string values encoded in UTF-8 with the exception of [1]. The OpenStack logging framework at the time of this writing is not capable of accepting strings encoded in UTF-8, the log functions will throw decoding errors if a non-ascii character appears in a string. [1] In python-ldap, some fields (DNs, RDNs, attribute names, queries) are represented as text (str on Python 3, unicode on Python 2 when bytes_mode=False). For more details see: http://www.python-ldap.org/en/latest/bytes_mode.html#bytes-mode Prior to the call Python data types are converted to a string representation as required by the LDAP APIs. Then logging is performed so we can track what is being sent/received from LDAP. Also the logging filters security sensitive items (i.e. passwords). Then the string values are encoded into UTF-8. Then the LDAP API entry point is invoked. Data returned from the LDAP call is converted back from UTF-8 encoded strings into the Python data type used internally in OpenStack. """ def __init__(self, conn=None): super().__init__(conn=conn) self.page_size = 0 def __enter__(self): """Enter runtime context.""" return self def _disable_paging(self): # Disable the pagination from now on self.page_size = 0 def connect( self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert=ldap.OPT_X_TLS_DEMAND, chase_referrals=None, debug_level=None, conn_timeout=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None, ): self.page_size = page_size return self.conn.connect( url, page_size, alias_dereferencing, use_tls, tls_cacertfile, tls_cacertdir, tls_req_cert, chase_referrals, debug_level=debug_level, conn_timeout=conn_timeout, use_pool=use_pool, pool_size=pool_size, pool_retry_max=pool_retry_max, pool_retry_delay=pool_retry_delay, pool_conn_timeout=pool_conn_timeout, pool_conn_lifetime=pool_conn_lifetime, ) def set_option(self, option, invalue): return self.conn.set_option(option, invalue) def get_option(self, option): return self.conn.get_option(option) def simple_bind_s( self, who='', cred='', serverctrls=None, clientctrls=None ): LOG.debug('LDAP bind: who=%s', who) return self.conn.simple_bind_s( who, cred, serverctrls=serverctrls, clientctrls=clientctrls ) def unbind_s(self): LOG.debug('LDAP unbind') return self.conn.unbind_s() def add_s(self, dn, modlist): ldap_attrs = [ (kind, [py2ldap(x) for x in safe_iter(values)]) for kind, values in modlist ] logging_attrs = [ (kind, values if kind != 'userPassword' else ['****']) for kind, values in ldap_attrs ] LOG.debug('LDAP add: dn=%s attrs=%s', dn, logging_attrs) ldap_attrs_utf8 = [ (kind, [utf8_encode(x) for x in safe_iter(values)]) for kind, values in ldap_attrs ] return self.conn.add_s(dn, ldap_attrs_utf8) def search_s( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, ): # NOTE(morganfainberg): Remove "None" singletons from this list, which # allows us to set mapped attributes to "None" as defaults in config. # Without this filtering, the ldap query would raise a TypeError since # attrlist is expected to be an iterable of strings. if attrlist is not None: attrlist = [attr for attr in attrlist if attr is not None] LOG.debug( 'LDAP search: base=%s scope=%s filterstr=%s ' 'attrs=%s attrsonly=%s', base, scope, filterstr, attrlist, attrsonly, ) if self.page_size: ldap_result = self._paged_search_s( base, scope, filterstr, attrlist ) else: try: ldap_result = self.conn.search_s( base, scope, filterstr, attrlist, attrsonly ) except ldap.SIZELIMIT_EXCEEDED: raise exception.LDAPSizeLimitExceeded() py_result = convert_ldap_result(ldap_result) return py_result def search_ext( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0, ): if attrlist is not None: attrlist = [attr for attr in attrlist if attr is not None] LOG.debug( 'LDAP search_ext: base=%s scope=%s filterstr=%s ' 'attrs=%s attrsonly=%s ' 'serverctrls=%s clientctrls=%s timeout=%s sizelimit=%s', base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit, ) return self.conn.search_ext( base, scope, filterstr, attrlist, attrsonly, serverctrls, clientctrls, timeout, sizelimit, ) def _paged_search_s(self, base, scope, filterstr, attrlist=None): res = [] use_old_paging_api = False # The API for the simple paged results control changed between # python-ldap 2.3 and 2.4. We need to detect the capabilities # of the python-ldap version we are using. if hasattr(ldap, 'LDAP_CONTROL_PAGE_OID'): use_old_paging_api = True lc = ldap.controls.SimplePagedResultsControl( controlType=ldap.LDAP_CONTROL_PAGE_OID, criticality=True, controlValue=(self.page_size, ''), ) page_ctrl_oid = ldap.LDAP_CONTROL_PAGE_OID else: lc = ldap.controls.libldap.SimplePagedResultsControl( criticality=True, size=self.page_size, cookie='' ) page_ctrl_oid = ldap.controls.SimplePagedResultsControl.controlType message = self.conn.search_ext( base, scope, filterstr, attrlist, serverctrls=[lc] ) # Endless loop request pages on ldap server until it has no data while True: # Request to the ldap server a page with 'page_size' entries rtype, rdata, rmsgid, serverctrls = self.conn.result3(message) # Receive the data res.extend(rdata) pctrls = [c for c in serverctrls if c.controlType == page_ctrl_oid] if pctrls: # LDAP server supports pagination if use_old_paging_api: est, cookie = pctrls[0].controlValue lc.controlValue = (self.page_size, cookie) else: cookie = lc.cookie = pctrls[0].cookie if cookie: # There is more data still on the server # so we request another page message = self.conn.search_ext( base, scope, filterstr, attrlist, serverctrls=[lc] ) else: # Exit condition no more data on server break else: LOG.warning( 'LDAP Server does not support paging. ' 'Disable paging in keystone.conf to ' 'avoid this message.' ) self._disable_paging() break return res def result3( self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None ): ldap_result = self.conn.result3(msgid, all, timeout, resp_ctrl_classes) LOG.debug( 'LDAP result3: msgid=%s all=%s timeout=%s ' 'resp_ctrl_classes=%s ldap_result=%s', msgid, all, timeout, resp_ctrl_classes, ldap_result, ) # ldap_result returned from result3 is a tuple of # (rtype, rdata, rmsgid, serverctrls). We don't need use of these, # except rdata. rtype, rdata, rmsgid, serverctrls = ldap_result py_result = convert_ldap_result(rdata) return py_result def modify_s(self, dn, modlist): ldap_modlist = [ ( op, kind, ( None if values is None else [py2ldap(x) for x in safe_iter(values)] ), ) for op, kind, values in modlist ] logging_modlist = [ (op, kind, (values if kind != 'userPassword' else ['****'])) for op, kind, values in ldap_modlist ] LOG.debug('LDAP modify: dn=%s modlist=%s', dn, logging_modlist) ldap_modlist_utf8 = [ ( op, kind, ( None if values is None else [utf8_encode(x) for x in safe_iter(values)] ), ) for op, kind, values in ldap_modlist ] return self.conn.modify_s(dn, ldap_modlist_utf8) def __exit__(self, exc_type, exc_val, exc_tb): """Exit runtime context, unbind LDAP.""" self.unbind_s() _HANDLERS = {} def register_handler(prefix, handler): _HANDLERS[prefix] = handler def _get_connection(conn_url, use_pool=False, use_auth_pool=False): for prefix, handler in _HANDLERS.items(): if conn_url.startswith(prefix): return handler() if use_pool: return PooledLDAPHandler(use_auth_pool=use_auth_pool) else: return PythonLDAPHandler() def filter_entity(entity_ref): """Filter out private items in an entity dict. :param entity_ref: the entity dictionary. The 'dn' field will be removed. 'dn' is used in LDAP, but should not be returned to the user. This value may be modified. :returns: entity_ref """ if entity_ref: entity_ref.pop('dn', None) return entity_ref class BaseLdap: DEFAULT_OU: str DEFAULT_STRUCTURAL_CLASSES: list[str] DEFAULT_ID_ATTR: str = 'cn' DEFAULT_OBJECTCLASS: str DEFAULT_FILTER: ty.Optional[str] = None DEFAULT_EXTRA_ATTR_MAPPING: list[str] = [] NotFound: ty.Type[exception.Error] notfound_arg: ty.Optional[str] = None options_name: ty.Optional[str] = None model: ty.Type[models.Model] attribute_options_names: dict[str, str] = {} immutable_attrs: list[str] = [] attribute_ignore: list[str] = [] tree_dn: ty.Optional[str] = None def __init__(self, conf): if conf.ldap.randomize_urls: urls = re.split(r'[\s,]+', conf.ldap.url) random.shuffle(urls) self.LDAP_URL = ','.join(urls) else: self.LDAP_URL = conf.ldap.url self.LDAP_USER = conf.ldap.user self.LDAP_PASSWORD = conf.ldap.password self.LDAP_SCOPE = ldap_scope(conf.ldap.query_scope) self.alias_dereferencing = parse_deref(conf.ldap.alias_dereferencing) self.page_size = conf.ldap.page_size self.use_tls = conf.ldap.use_tls self.tls_cacertfile = conf.ldap.tls_cacertfile self.tls_cacertdir = conf.ldap.tls_cacertdir self.tls_req_cert = parse_tls_cert(conf.ldap.tls_req_cert) self.attribute_mapping = {} self.chase_referrals = conf.ldap.chase_referrals self.debug_level = conf.ldap.debug_level self.conn_timeout = conf.ldap.connection_timeout # LDAP Pool specific attribute self.use_pool = conf.ldap.use_pool self.pool_size = conf.ldap.pool_size self.pool_retry_max = conf.ldap.pool_retry_max self.pool_retry_delay = conf.ldap.pool_retry_delay self.pool_conn_timeout = conf.ldap.pool_connection_timeout self.pool_conn_lifetime = conf.ldap.pool_connection_lifetime # End user authentication pool specific config attributes self.use_auth_pool = self.use_pool and conf.ldap.use_auth_pool self.auth_pool_size = conf.ldap.auth_pool_size self.auth_pool_conn_lifetime = conf.ldap.auth_pool_connection_lifetime if self.options_name is not None: self.tree_dn = ( getattr(conf.ldap, '%s_tree_dn' % self.options_name) or f'{self.DEFAULT_OU},{conf.ldap.suffix}' ) idatt = '%s_id_attribute' % self.options_name self.id_attr = getattr(conf.ldap, idatt) or self.DEFAULT_ID_ATTR objclass = '%s_objectclass' % self.options_name self.object_class = ( getattr(conf.ldap, objclass) or self.DEFAULT_OBJECTCLASS ) for k, v in self.attribute_options_names.items(): v = f'{self.options_name}_{v}_attribute' self.attribute_mapping[k] = getattr(conf.ldap, v) attr_mapping_opt = ( '%s_additional_attribute_mapping' % self.options_name ) attr_mapping = ( getattr(conf.ldap, attr_mapping_opt) or self.DEFAULT_EXTRA_ATTR_MAPPING ) self.extra_attr_mapping = self._parse_extra_attrs(attr_mapping) ldap_filter = '%s_filter' % self.options_name self.ldap_filter = ( getattr(conf.ldap, ldap_filter) or self.DEFAULT_FILTER ) member_attribute = '%s_member_attribute' % self.options_name self.member_attribute = getattr(conf.ldap, member_attribute, None) self.structural_classes = self.DEFAULT_STRUCTURAL_CLASSES if self.notfound_arg is None: self.notfound_arg = self.options_name + '_id' attribute_ignore = '%s_attribute_ignore' % self.options_name self.attribute_ignore = getattr(conf.ldap, attribute_ignore) def _not_found(self, object_id): if self.NotFound is None: return exception.NotFound(target=object_id) else: return self.NotFound(**{self.notfound_arg: object_id}) @staticmethod def _parse_extra_attrs(option_list): mapping = {} for item in option_list: try: ldap_attr, attr_map = item.split(':') except ValueError: LOG.warning( 'Invalid additional attribute mapping: "%s". ' 'Format must be :', item, ) continue mapping[ldap_attr] = attr_map return mapping def get_connection(self, user=None, password=None, end_user_auth=False): use_pool = self.use_pool pool_size = self.pool_size pool_conn_lifetime = self.pool_conn_lifetime if end_user_auth: if not self.use_auth_pool: use_pool = False else: pool_size = self.auth_pool_size pool_conn_lifetime = self.auth_pool_conn_lifetime conn = _get_connection( self.LDAP_URL, use_pool, use_auth_pool=end_user_auth ) conn = KeystoneLDAPHandler(conn=conn) # The LDAP server may be down or a connection may not # exist. If that is the case, the bind attempt will # fail with a server down exception. try: conn.connect( self.LDAP_URL, page_size=self.page_size, alias_dereferencing=self.alias_dereferencing, use_tls=self.use_tls, tls_cacertfile=self.tls_cacertfile, tls_cacertdir=self.tls_cacertdir, tls_req_cert=self.tls_req_cert, chase_referrals=self.chase_referrals, debug_level=self.debug_level, conn_timeout=self.conn_timeout, use_pool=use_pool, pool_size=pool_size, pool_retry_max=self.pool_retry_max, pool_retry_delay=self.pool_retry_delay, pool_conn_timeout=self.pool_conn_timeout, pool_conn_lifetime=pool_conn_lifetime, ) if user is None: user = self.LDAP_USER if password is None: password = self.LDAP_PASSWORD # not all LDAP servers require authentication, so we don't bind # if we don't have any user/pass if user and password: conn.simple_bind_s(user, password) else: conn.simple_bind_s() return conn except ldap.INVALID_CREDENTIALS: raise exception.LDAPInvalidCredentialsError() except ldap.SERVER_DOWN: raise exception.LDAPServerConnectionError(url=self.LDAP_URL) def _id_to_dn_string(self, object_id): return '{}={},{}'.format( self.id_attr, ldap.dn.escape_dn_chars(str(object_id)), self.tree_dn, ) def _id_to_dn(self, object_id): if self.LDAP_SCOPE == ldap.SCOPE_ONELEVEL: return self._id_to_dn_string(object_id) with self.get_connection() as conn: search_result = conn.search_s( self.tree_dn, self.LDAP_SCOPE, '(&(%(id_attr)s=%(id)s)(objectclass=%(objclass)s))' % { 'id_attr': self.id_attr, 'id': ldap.filter.escape_filter_chars(str(object_id)), 'objclass': self.object_class, }, attrlist=DN_ONLY, ) if search_result: dn, attrs = search_result[0] return dn else: return self._id_to_dn_string(object_id) def _dn_to_id(self, dn): # Check if the naming attribute in the DN is the same as keystone's # configured 'id' attribute'. If so, extract the ID value from the DN if self.id_attr == ldap.dn.str2dn(dn)[0][0][0].lower(): return ldap.dn.str2dn(dn)[0][0][1] else: # The 'ID' attribute is NOT in the DN, so we need to perform an # LDAP search to look it up from the user entry itself. with self.get_connection() as conn: search_result = conn.search_s(dn, ldap.SCOPE_BASE) if search_result: try: id_list = search_result[0][1][self.id_attr] except KeyError: message = ( 'ID attribute %(id_attr)s not found in LDAP ' 'object %(dn)s.' ) % ({'id_attr': self.id_attr, 'dn': search_result}) LOG.warning(message) raise exception.NotFound(message=message) if len(id_list) > 1: message = ( 'In order to keep backward compatibility, in ' 'the case of multivalued ids, we are ' 'returning the first id %(id_attr)s in the ' 'DN.' ) % ({'id_attr': id_list[0]}) LOG.warning(message) return id_list[0] else: message = _('DN attribute %(dn)s not found in LDAP') % ( {'dn': dn} ) raise exception.NotFound(message=message) def _ldap_res_to_model(self, res): # LDAP attribute names may be returned in a different case than # they are defined in the mapping, so we need to check for keys # in a case-insensitive way. We use the case specified in the # mapping for the model to ensure we have a predictable way of # retrieving values later. lower_res = {k.lower(): v for k, v in res[1].items()} id_attrs = lower_res.get(self.id_attr.lower()) if not id_attrs: message = _( 'ID attribute %(id_attr)s not found in LDAP object %(dn)s' ) % ({'id_attr': self.id_attr, 'dn': res[0]}) raise exception.NotFound(message=message) if len(id_attrs) > 1: # FIXME(gyee): if this is a multi-value attribute and it has # multiple values, we can't use it as ID. Retain the dn_to_id # logic here so it does not potentially break existing # deployments. We need to fix our read-write LDAP logic so # it does not get the ID from DN. message = ( 'ID attribute %(id_attr)s for LDAP object %(dn)s ' 'has multiple values and therefore cannot be used ' 'as an ID. Will get the ID from DN instead' ) % ({'id_attr': self.id_attr, 'dn': res[0]}) LOG.warning(message) id_val = self._dn_to_id(res[0]) else: id_val = id_attrs[0] obj = self.model(id=id_val) for k in obj.known_keys: if k in self.attribute_ignore: continue try: map_attr = self.attribute_mapping.get(k, k) if map_attr is None: # Ignore attributes that are mapped to None. continue v = lower_res[map_attr.lower()] except KeyError: # nosec # Didn't find the attr, so don't add it. pass else: try: value = v[0] except IndexError: value = None # NOTE(xek): Some LDAP servers return bytes data type # We convert it to string here, so that it is consistent with # the other (SQL) backends. # Bytes data type caused issues in the past, because it could # be cached and then passed into str() method to be used as # LDAP filters, which results in an unexpected b'...' prefix. if isinstance(value, bytes): try: value = value.decode('utf-8') except UnicodeDecodeError: LOG.error( "Error decoding value %r (object id %r).", value, res[0], ) raise obj[k] = value return obj def affirm_unique(self, values): if values.get('name') is not None: try: self.get_by_name(values['name']) except exception.NotFound: # nosec # Didn't find it so it's unique, good. pass else: raise exception.Conflict( type=self.options_name, details=_('Duplicate name, %s.') % values['name'], ) if values.get('id') is not None: try: self.get(values['id']) except exception.NotFound: # nosec # Didn't find it, so it's unique, good. pass else: raise exception.Conflict( type=self.options_name, details=_('Duplicate ID, %s.') % values['id'], ) def create(self, values): self.affirm_unique(values) object_classes = self.structural_classes + [self.object_class] attrs = [('objectClass', object_classes)] for k, v in values.items(): if k in self.attribute_ignore: continue if k == 'id': # no need to check if v is None as 'id' will always have # a value attrs.append((self.id_attr, [v])) elif v is not None: attr_type = self.attribute_mapping.get(k, k) if attr_type is not None: attrs.append((attr_type, [v])) extra_attrs = [ attr for attr, name in self.extra_attr_mapping.items() if name == k ] for attr in extra_attrs: attrs.append((attr, [v])) with self.get_connection() as conn: conn.add_s(self._id_to_dn(values['id']), attrs) return values # NOTE(prashkre): Filter ldap search results on an attribute to ensure # that attribute has a value set on ldap. This keeps keystone away # from entities that don't have attribute value set on ldap. # for e.g. In ldap configuration, if user_name_attribute = personName # then it will ignore ldap users who don't have 'personName' attribute # value set on user. def _filter_ldap_result_by_attr(self, ldap_result, ldap_attr_name): attr = self.attribute_mapping[ldap_attr_name] # To ensure that ldap attribute value is not empty in ldap config. if not attr: attr_name = '{}_{}_attribute'.format( self.options_name, self.attribute_options_names[ldap_attr_name], ) raise ValueError( '"%(attr)s" is not a valid value for' ' "%(attr_name)s"' % {'attr': attr, 'attr_name': attr_name} ) # consider attr = "cn" and # ldap_result = [{'uid': ['fake_id1']}, , 'cN': ["name"]}] # doing lower case on both user_name_attribute and ldap users # attribute result = [] # consider attr = "cn" and # ldap_result = [(u'cn=fake1,o=ex_domain', {'uid': ['fake_id1']}), # (u'cn=fake2,o=ex_domain', {'uid': ['fake_id2'], # 'cn': [' ']}), # (u'cn=fake3,o=ex_domain', {'uid': ['fake_id3'], # 'cn': ['']}), # (u'cn=fake4,o=ex_domain', {'uid': ['fake_id4'], # 'cn': []}), # (u'cn=fake5,o=ex_domain', {'uid': ['fake_id5'], # 'cn': ["name"]})] for obj in ldap_result: # ignore ldap object(user/group entry) which has no attr set # in it or whose value is empty list. ldap_res_low_keys_dict = {k.lower(): v for k, v in obj[1].items()} result_attr_vals = ldap_res_low_keys_dict.get(attr.lower()) # ignore ldap object whose attr value has empty strings or # contains only whitespaces. if result_attr_vals: if result_attr_vals[0] and result_attr_vals[0].strip(): result.append(obj) # except {'uid': ['fake_id5'], 'cn': ["name"]}, all entries # will be ignored in ldap_result return result def _ldap_get(self, object_id, ldap_filter=None): query = ( '(&(%(id_attr)s=%(id)s)' '%(filter)s' '(objectClass=%(object_class)s))' % { 'id_attr': self.id_attr, 'id': ldap.filter.escape_filter_chars(str(object_id)), 'filter': (ldap_filter or self.ldap_filter or ''), 'object_class': self.object_class, } ) with self.get_connection() as conn: try: attrs = list( set( [self.id_attr] + list(self.attribute_mapping.values()) + list(self.extra_attr_mapping.keys()) ) ) res = conn.search_s( self.tree_dn, self.LDAP_SCOPE, query, attrs ) except ldap.NO_SUCH_OBJECT: return None # TODO(prashkre): add functional testing for missing name attibute # on ldap entities. # NOTE(prashkre): Filter ldap search result to keep keystone away from # entities that don't have names. We can also do the same by appending # a condition '(!(!(self.attribute_mapping.get('name')=*))' to ldap # search query but the repsonse time of the query is pretty slow when # compared to explicit filtering by 'name' through ldap result. try: return self._filter_ldap_result_by_attr(res[:1], 'name')[0] except IndexError: return None def _ldap_get_limited(self, base, scope, filterstr, attrlist, sizelimit): with self.get_connection() as conn: try: control = ldap.controls.libldap.SimplePagedResultsControl( criticality=True, size=sizelimit, cookie='' ) msgid = conn.search_ext( base, scope, filterstr, attrlist, serverctrls=[control] ) rdata = conn.result3(msgid) return rdata except ldap.NO_SUCH_OBJECT: return [] @driver_hints.truncated def _ldap_get_all(self, hints, ldap_filter=None): query = '(&{}(objectClass={})({}=*))'.format( ldap_filter or self.ldap_filter or '', self.object_class, self.id_attr, ) sizelimit = 0 attrs = list( set( [self.id_attr] + list(self.attribute_mapping.values()) + list(self.extra_attr_mapping.keys()) ) ) if hints.limit: sizelimit = hints.limit['limit'] res = self._ldap_get_limited( self.tree_dn, self.LDAP_SCOPE, query, attrs, sizelimit ) else: with self.get_connection() as conn: try: res = conn.search_s( self.tree_dn, self.LDAP_SCOPE, query, attrs ) except ldap.NO_SUCH_OBJECT: return [] # TODO(prashkre): add functional testing for missing name attribute # on ldap entities. # NOTE(prashkre): Filter ldap search result to keep keystone away from # entities that don't have names. We can also do the same by appending # a condition '(!(!(self.attribute_mapping.get('name')=*))' to ldap # search query but the repsonse time of the query is pretty slow when # compared to explicit filtering by 'name' through ldap result. return self._filter_ldap_result_by_attr(res, 'name') def _ldap_get_list( self, search_base, scope, query_params=None, attrlist=None ): query = '(objectClass=%s)' % self.object_class if query_params: def calc_filter(attrname, value): val_esc = ldap.filter.escape_filter_chars(value) return f'({attrname}={val_esc})' query = '(&{}{})'.format( query, ''.join([calc_filter(k, v) for k, v in query_params.items()]), ) with self.get_connection() as conn: return conn.search_s(search_base, scope, query, attrlist) def get(self, object_id, ldap_filter=None): res = self._ldap_get(object_id, ldap_filter) if res is None: raise self._not_found(object_id) else: return self._ldap_res_to_model(res) def get_by_name(self, name, ldap_filter=None): query = '({}={})'.format( self.attribute_mapping['name'], ldap.filter.escape_filter_chars(str(name)), ) res = self.get_all(query) try: return res[0] except IndexError: raise self._not_found(name) def get_all(self, ldap_filter=None, hints=None): hints = hints or driver_hints.Hints() return [ self._ldap_res_to_model(x) for x in self._ldap_get_all(hints, ldap_filter) ] def update(self, object_id, values, old_obj=None): if old_obj is None: old_obj = self.get(object_id) modlist = [] for k, v in values.items(): if k == 'id': # id can't be modified. continue if k in self.attribute_ignore: # Handle 'enabled' specially since can't disable if ignored. if k == 'enabled' and (not v): action = _( "Disabling an entity where the 'enable' " "attribute is ignored by configuration." ) raise exception.ForbiddenAction(action=action) continue # attribute value has not changed if k in old_obj and old_obj[k] == v: continue if k in self.immutable_attrs: msg = _("Cannot change %(option_name)s %(attr)s") % { 'option_name': self.options_name, 'attr': k, } raise exception.ValidationError(msg) if v is None: if old_obj.get(k) is not None: modlist.append( ( ldap.MOD_DELETE, self.attribute_mapping.get(k, k), None, ) ) continue current_value = old_obj.get(k) if current_value is None: op = ldap.MOD_ADD modlist.append((op, self.attribute_mapping.get(k, k), [v])) elif current_value != v: op = ldap.MOD_REPLACE modlist.append((op, self.attribute_mapping.get(k, k), [v])) if modlist: with self.get_connection() as conn: try: conn.modify_s(self._id_to_dn(object_id), modlist) except ldap.NO_SUCH_OBJECT: raise self._not_found(object_id) return self.get(object_id) def add_member(self, member_dn, member_list_dn): """Add member to the member list. :param member_dn: DN of member to be added. :param member_list_dn: DN of group to which the member will be added. :raises keystone.exception.Conflict: If the user was already a member. :raises self.NotFound: If the group entry didn't exist. """ with self.get_connection() as conn: try: mod = (ldap.MOD_ADD, self.member_attribute, member_dn) conn.modify_s(member_list_dn, [mod]) except ldap.TYPE_OR_VALUE_EXISTS: raise exception.Conflict( _( 'Member %(member)s ' 'is already a member' ' of group %(group)s' ) % {'member': member_dn, 'group': member_list_dn} ) except ldap.NO_SUCH_OBJECT: raise self._not_found(member_list_dn) def filter_query(self, hints, query=None): """Apply filtering to a query. :param hints: contains the list of filters, which may be None, indicating that there are no filters to be applied. If it's not None, then any filters satisfied here will be removed so that the caller will know if any filters remain to be applied. :param query: LDAP query into which to include filters :returns query: LDAP query, updated with any filters satisfied """ def build_filter(filter_): """Build a filter for the query. :param filter_: the dict that describes this filter :returns query: LDAP query term to be added """ ldap_attr = self.attribute_mapping[filter_['name']] val_esc = ldap.filter.escape_filter_chars(filter_['value']) if filter_['case_sensitive']: # NOTE(henry-nash): Although dependent on the schema being # used, most LDAP attributes are configured with case # insensitive matching rules, so we'll leave this to the # controller to filter. return if filter_['name'] == 'enabled': # NOTE(henry-nash): Due to the different options for storing # the enabled attribute (e,g, emulated or not), for now we # don't try and filter this at the driver level - we simply # leave the filter to be handled by the controller. It seems # unlikley that this will cause a signifcant performance # issue. return # TODO(henry-nash): Currently there are no booleans (other than # 'enabled' that is handled above) on which you can filter. If # there were, we would need to add special handling here to # convert the booleans values to 'TRUE' and 'FALSE'. To do that # we would also need to know which filter keys were actually # booleans (this is related to bug #1411478). if filter_['comparator'] == 'equals': query_term = '({attr}={val})'.format( attr=ldap_attr, val=val_esc, ) elif filter_['comparator'] == 'contains': query_term = '({attr}=*{val}*)'.format( attr=ldap_attr, val=val_esc, ) elif filter_['comparator'] == 'startswith': query_term = '({attr}={val}*)'.format( attr=ldap_attr, val=val_esc, ) elif filter_['comparator'] == 'endswith': query_term = '({attr}=*{val})'.format( attr=ldap_attr, val=val_esc, ) else: # It's a filter we don't understand, so let the caller # work out if they need to do something with it. return return query_term if query is None: # make sure query is a string so the ldap filter is properly # constructed from filter_list later query = '' if hints is None: return query filter_list = [] satisfied_filters = [] for filter_ in hints.filters: if filter_['name'] not in self.attribute_mapping: continue new_filter = build_filter(filter_) if new_filter is not None: filter_list.append(new_filter) satisfied_filters.append(filter_) if filter_list: query = '(&{}{})'.format(query, ''.join(filter_list)) # Remove satisfied filters, then the caller will know remaining filters for filter_ in satisfied_filters: hints.filters.remove(filter_) return query class EnabledEmuMixIn(BaseLdap): """Emulates boolean 'enabled' attribute if turned on. Creates a group holding all enabled objects of this class, all missing objects are considered disabled. Options: * $name_enabled_emulation - boolean, on/off * $name_enabled_emulation_dn - DN of that group, default is cn=enabled_${name}s,${tree_dn} * $name_enabled_emulation_use_group_config - boolean, on/off Where ${name}s is the plural of self.options_name ('users' or 'tenants'), ${tree_dn} is self.tree_dn. """ DEFAULT_GROUP_OBJECTCLASS = 'groupOfNames' DEFAULT_MEMBER_ATTRIBUTE = 'member' DEFAULT_GROUP_MEMBERS_ARE_IDS = False def __init__(self, conf): super().__init__(conf) enabled_emulation = '%s_enabled_emulation' % self.options_name self.enabled_emulation = getattr(conf.ldap, enabled_emulation) enabled_emulation_dn = '%s_enabled_emulation_dn' % self.options_name self.enabled_emulation_dn = getattr(conf.ldap, enabled_emulation_dn) use_group_config = ( '%s_enabled_emulation_use_group_config' % self.options_name ) self.use_group_config = getattr(conf.ldap, use_group_config) if not self.use_group_config: self.member_attribute = self.DEFAULT_MEMBER_ATTRIBUTE self.group_objectclass = self.DEFAULT_GROUP_OBJECTCLASS self.group_members_are_ids = self.DEFAULT_GROUP_MEMBERS_ARE_IDS else: self.member_attribute = conf.ldap.group_member_attribute self.group_objectclass = conf.ldap.group_objectclass self.group_members_are_ids = conf.ldap.group_members_are_ids if not self.enabled_emulation_dn: naming_attr_name = 'cn' naming_attr_value = 'enabled_%ss' % self.options_name sub_vals = (naming_attr_name, naming_attr_value, self.tree_dn) self.enabled_emulation_dn = '%s=%s,%s' % sub_vals naming_attr = (naming_attr_name, [naming_attr_value]) else: # Extract the attribute name and value from the configured DN. naming_dn = ldap.dn.str2dn(self.enabled_emulation_dn) naming_rdn = naming_dn[0][0] naming_attr = (naming_rdn[0], naming_rdn[1]) self.enabled_emulation_naming_attr = naming_attr def _id_to_member_attribute_value(self, object_id): """Convert id to value expected by member_attribute.""" if self.group_members_are_ids: return object_id return self._id_to_dn(object_id) def _is_id_enabled(self, object_id, conn): member_attr_val = self._id_to_member_attribute_value(object_id) return self._is_member_enabled(member_attr_val, conn) def _is_member_enabled(self, member_attr_val, conn): query = '({}={})'.format( self.member_attribute, ldap.filter.escape_filter_chars(member_attr_val), ) try: enabled_value = conn.search_s( self.enabled_emulation_dn, ldap.SCOPE_BASE, query, attrlist=DN_ONLY, ) except ldap.NO_SUCH_OBJECT: return False else: return bool(enabled_value) def _add_enabled(self, object_id): member_attr_val = self._id_to_member_attribute_value(object_id) with self.get_connection() as conn: if not self._is_member_enabled(member_attr_val, conn): modlist = [ (ldap.MOD_ADD, self.member_attribute, [member_attr_val]) ] try: conn.modify_s(self.enabled_emulation_dn, modlist) except ldap.NO_SUCH_OBJECT: attr_list = [ ('objectClass', [self.group_objectclass]), (self.member_attribute, [member_attr_val]), self.enabled_emulation_naming_attr, ] conn.add_s(self.enabled_emulation_dn, attr_list) def _remove_enabled(self, object_id): member_attr_val = self._id_to_member_attribute_value(object_id) modlist = [(ldap.MOD_DELETE, self.member_attribute, [member_attr_val])] with self.get_connection() as conn: try: conn.modify_s(self.enabled_emulation_dn, modlist) except (ldap.NO_SUCH_OBJECT, ldap.NO_SUCH_ATTRIBUTE): # nosec # It's already gone, good. pass def create(self, values): if self.enabled_emulation: enabled_value = values.pop('enabled', True) ref = super().create(values) if 'enabled' not in self.attribute_ignore: if enabled_value: self._add_enabled(ref['id']) ref['enabled'] = enabled_value return ref else: return super().create(values) def get(self, object_id, ldap_filter=None): with self.get_connection() as conn: ref = super().get(object_id, ldap_filter) if ( 'enabled' not in self.attribute_ignore and self.enabled_emulation ): ref['enabled'] = self._is_id_enabled(object_id, conn) return ref def get_all(self, ldap_filter=None, hints=None): hints = hints or driver_hints.Hints() if 'enabled' not in self.attribute_ignore and self.enabled_emulation: # had to copy BaseLdap.get_all here to ldap_filter by DN obj_list = [ self._ldap_res_to_model(x) for x in self._ldap_get_all(hints, ldap_filter) if x[0] != self.enabled_emulation_dn ] with self.get_connection() as conn: for obj_ref in obj_list: obj_ref['enabled'] = self._is_id_enabled( obj_ref['id'], conn ) return obj_list else: return super().get_all(ldap_filter, hints) def update(self, object_id, values, old_obj=None): if 'enabled' not in self.attribute_ignore and self.enabled_emulation: data = values.copy() enabled_value = data.pop('enabled', None) ref = super().update(object_id, data, old_obj) if enabled_value is not None: if enabled_value: self._add_enabled(object_id) else: self._remove_enabled(object_id) ref['enabled'] = enabled_value return ref else: return super().update(object_id, values, old_obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/ldap/core.py0000664000175000017500000004334500000000000023162 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import ldap.filter from oslo_log import log from oslo_log import versionutils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.identity.backends import base from keystone.identity.backends.ldap import common as common_ldap from keystone.identity.backends.ldap import models CONF = keystone.conf.CONF LOG = log.getLogger(__name__) _DEPRECATION_MSG = ( '%s for the LDAP identity backend has been deprecated in ' 'the Mitaka release in favor of read-only identity LDAP ' 'access. It will be removed in the "O" release.' ) READ_ONLY_LDAP_ERROR_MESSAGE = _("LDAP does not support write operations") LDAP_MATCHING_RULE_IN_CHAIN = "1.2.840.113556.1.4.1941" class Identity(base.IdentityDriverBase): def __init__(self, conf=None): super().__init__() if conf is None: self.conf = CONF else: self.conf = conf self.user = UserApi(self.conf) self.group = GroupApi(self.conf) def is_domain_aware(self): return False def generates_uuids(self): return False # Identity interface def authenticate(self, user_id, password): try: user_ref = self._get_user(user_id) except exception.UserNotFound: raise AssertionError(_('Invalid user / password')) if not user_id or not password: raise AssertionError(_('Invalid user / password')) conn = None try: conn = self.user.get_connection( user_ref['dn'], password, end_user_auth=True ) if not conn: raise AssertionError(_('Invalid user / password')) except Exception: raise AssertionError(_('Invalid user / password')) finally: if conn: conn.unbind_s() return self.user.filter_attributes(user_ref) def _get_user(self, user_id): return self.user.get(user_id) def get_user(self, user_id): return self.user.get_filtered(user_id) def list_users(self, hints): return self.user.get_all_filtered(hints) def unset_default_project_id(self, project_id): # This function is not implemented for the LDAP backend. The LDAP # backend is readonly. self._disallow_write() def get_user_by_name(self, user_name, domain_id): # domain_id will already have been handled in the Manager layer, # parameter left in so this matches the Driver specification return self.user.filter_attributes(self.user.get_by_name(user_name)) def get_group(self, group_id): return self.group.get_filtered(group_id) def get_group_by_name(self, group_name, domain_id): # domain_id will already have been handled in the Manager layer, # parameter left in so this matches the Driver specification return self.group.get_filtered_by_name(group_name) def list_groups_for_user(self, user_id, hints): user_ref = self._get_user(user_id) if self.conf.ldap.group_members_are_ids: user_dn = user_ref['id'] else: user_dn = user_ref['dn'] return self.group.list_user_groups_filtered(user_dn, hints) def list_groups(self, hints): return self.group.get_all_filtered(hints) def _transform_group_member_ids(self, group_member_list): for user_key in group_member_list: if self.conf.ldap.group_members_are_ids: user_id = user_key else: user_id = self.user._dn_to_id(user_key) yield user_id def list_users_in_group(self, group_id, hints): users = [] group_members = self.group.list_group_users(group_id) for user_id in self._transform_group_member_ids(group_members): try: users.append(self.user.get_filtered(user_id)) except exception.UserNotFound: msg = ( 'Group member `%(user_id)s` for group `%(group_id)s`' ' not found in the directory. The user should be' ' removed from the group. The user will be ignored.' ) LOG.debug(msg, dict(user_id=user_id, group_id=group_id)) return users def check_user_in_group(self, user_id, group_id): # Before doing anything, check that the user exists. This will raise # a not found error if the user doesn't exist so we avoid doing extra # work. self.get_user(user_id) member_list = self.group.list_group_users(group_id) for group_member_id in self._transform_group_member_ids(member_list): if group_member_id == user_id: break else: raise exception.NotFound( _("User '%(user_id)s' not found in group '%(group_id)s'") % {'user_id': user_id, 'group_id': group_id} ) # Unsupported methods def _disallow_write(self): if not common_ldap.WRITABLE: raise exception.Forbidden(READ_ONLY_LDAP_ERROR_MESSAGE) def create_user(self, user_id, user): self._disallow_write() return self._create_user(user_id, user) def update_user(self, user_id, user): self._disallow_write() return self._update_user(user_id, user) def delete_user(self, user_id): raise exception.Forbidden(READ_ONLY_LDAP_ERROR_MESSAGE) def reset_last_active(self): raise exception.Forbidden(READ_ONLY_LDAP_ERROR_MESSAGE) def change_password(self, user_id, new_password): raise exception.Forbidden(READ_ONLY_LDAP_ERROR_MESSAGE) def add_user_to_group(self, user_id, group_id): self._disallow_write() self._add_user_to_group(user_id, group_id) def remove_user_from_group(self, user_id, group_id): raise exception.Forbidden(READ_ONLY_LDAP_ERROR_MESSAGE) def create_group(self, group_id, group): self._disallow_write() return self._create_group(group_id, group) def update_group(self, group_id, group): self._disallow_write() return self._update_group(group_id, group) def delete_group(self, group_id): raise exception.Forbidden(READ_ONLY_LDAP_ERROR_MESSAGE) # Test implementations def _create_user(self, user_id, user): msg = _DEPRECATION_MSG % "create_user" versionutils.report_deprecated_feature(LOG, msg) user_ref = self.user.create(user) return self.user.filter_attributes(user_ref) def _update_user(self, user_id, user): msg = _DEPRECATION_MSG % "update_user" versionutils.report_deprecated_feature(LOG, msg) old_obj = self.user.get(user_id) if 'name' in user and old_obj.get('name') != user['name']: raise exception.Conflict(_('Cannot change user name')) if self.user.enabled_mask: self.user.mask_enabled_attribute(user) elif self.user.enabled_invert and not self.user.enabled_emulation: # We need to invert the enabled value for the old model object # to prevent the LDAP update code from thinking that the enabled # values are already equal. user['enabled'] = not user['enabled'] old_obj['enabled'] = not old_obj['enabled'] self.user.update(user_id, user, old_obj) return self.user.get_filtered(user_id) def _create_group(self, group_id, group): msg = _DEPRECATION_MSG % "create_group" versionutils.report_deprecated_feature(LOG, msg) return common_ldap.filter_entity(self.group.create(group)) def _update_group(self, group_id, group): msg = _DEPRECATION_MSG % "update_group" versionutils.report_deprecated_feature(LOG, msg) return common_ldap.filter_entity(self.group.update(group_id, group)) def _add_user_to_group(self, user_id, group_id): msg = _DEPRECATION_MSG % "add_user_to_group" versionutils.report_deprecated_feature(LOG, msg) user_ref = self._get_user(user_id) user_dn = user_ref['dn'] self.group.add_user(user_dn, group_id, user_id) # TODO(termie): turn this into a data object and move logic to driver class UserApi(common_ldap.EnabledEmuMixIn, common_ldap.BaseLdap): DEFAULT_OU = 'ou=Users' DEFAULT_STRUCTURAL_CLASSES = ['person'] DEFAULT_ID_ATTR = 'cn' DEFAULT_OBJECTCLASS = 'inetOrgPerson' NotFound = exception.UserNotFound options_name = 'user' attribute_options_names = { 'password': 'pass', 'email': 'mail', 'name': 'name', 'description': 'description', 'enabled': 'enabled', 'default_project_id': 'default_project_id', } immutable_attrs = ['id'] model = models.User def __init__(self, conf): super().__init__(conf) self.enabled_mask = conf.ldap.user_enabled_mask self.enabled_default = conf.ldap.user_enabled_default self.enabled_invert = conf.ldap.user_enabled_invert self.enabled_emulation = conf.ldap.user_enabled_emulation def _ldap_res_to_model(self, res): obj = super()._ldap_res_to_model(res) if self.enabled_mask != 0: enabled = int(obj.get('enabled', self.enabled_default)) obj['enabled'] = (enabled & self.enabled_mask) != self.enabled_mask elif self.enabled_invert and not self.enabled_emulation: # This could be a bool or a string. If it's a string, # we need to convert it so we can invert it properly. enabled = obj.get('enabled', self.enabled_default) if isinstance(enabled, str): if enabled.lower() == 'true': enabled = True else: enabled = False obj['enabled'] = not enabled obj['dn'] = res[0] return obj def mask_enabled_attribute(self, values): value = values['enabled'] values.setdefault('enabled_nomask', int(self.enabled_default)) if value != ( (values['enabled_nomask'] & self.enabled_mask) != self.enabled_mask ): values['enabled_nomask'] ^= self.enabled_mask values['enabled'] = values['enabled_nomask'] del values['enabled_nomask'] def create(self, values): if 'options' in values: values.pop('options') # can't specify options if self.enabled_mask: orig_enabled = values['enabled'] self.mask_enabled_attribute(values) elif self.enabled_invert and not self.enabled_emulation: orig_enabled = values['enabled'] if orig_enabled is not None: values['enabled'] = not orig_enabled else: values['enabled'] = self.enabled_default values = super().create(values) if self.enabled_mask or ( self.enabled_invert and not self.enabled_emulation ): values['enabled'] = orig_enabled values['options'] = {} # options always empty return values def get(self, user_id, ldap_filter=None): obj = super().get(user_id, ldap_filter=ldap_filter) obj['options'] = {} # options always empty return obj def get_filtered(self, user_id): try: user = self.get(user_id) return self.filter_attributes(user) except ldap.NO_SUCH_OBJECT: raise self.NotFound(user_id=user_id) def get_all(self, ldap_filter=None, hints=None): objs = super().get_all(ldap_filter=ldap_filter, hints=hints) for obj in objs: obj['options'] = {} # options always empty return objs def get_all_filtered(self, hints): query = self.filter_query(hints, self.ldap_filter) return [ self.filter_attributes(user) for user in self.get_all(query, hints) ] def filter_attributes(self, user): return base.filter_user(common_ldap.filter_entity(user)) def is_user(self, dn): """Return True if the entry is a user.""" # NOTE(blk-u): It's easy to check if the DN is under the User tree, # but may not be accurate. A more accurate test would be to fetch the # entry to see if it's got the user objectclass, but this could be # really expensive considering how this is used. return common_ldap.dn_startswith(dn, self.tree_dn) def update(self, user_id, values, old_obj=None): if old_obj is None: old_obj = self.get(user_id) # don't support updating options if 'options' in old_obj: old_obj.pop('options') if 'options' in values: values.pop('options') values = super().update(user_id, values, old_obj) values['options'] = {} # options always empty return values class GroupApi(common_ldap.BaseLdap): DEFAULT_OU = 'ou=UserGroups' DEFAULT_STRUCTURAL_CLASSES = [] DEFAULT_OBJECTCLASS = 'groupOfNames' DEFAULT_ID_ATTR = 'cn' DEFAULT_MEMBER_ATTRIBUTE = 'member' NotFound = exception.GroupNotFound options_name = 'group' attribute_options_names = {'description': 'desc', 'name': 'name'} immutable_attrs = ['name'] model = models.Group def _ldap_res_to_model(self, res): model = super()._ldap_res_to_model(res) model['dn'] = res[0] return model def __init__(self, conf): super().__init__(conf) self.group_ad_nesting = conf.ldap.group_ad_nesting self.member_attribute = ( conf.ldap.group_member_attribute or self.DEFAULT_MEMBER_ATTRIBUTE ) def create(self, values): data = values.copy() if data.get('id') is None: data['id'] = uuid.uuid4().hex if 'description' in data and data['description'] in ['', None]: data.pop('description') return super().create(data) def update(self, group_id, values): old_obj = self.get(group_id) return super().update(group_id, values, old_obj) def add_user(self, user_dn, group_id, user_id): group_ref = self.get(group_id) group_dn = group_ref['dn'] try: super().add_member(user_dn, group_dn) except exception.Conflict: raise exception.Conflict( _('User %(user_id)s is already a member of group %(group_id)s') % {'user_id': user_id, 'group_id': group_id} ) def list_user_groups(self, user_dn): """Return a list of groups for which the user is a member.""" user_dn_esc = ldap.filter.escape_filter_chars(user_dn) if self.group_ad_nesting: query = '({}:{}:={})'.format( self.member_attribute, LDAP_MATCHING_RULE_IN_CHAIN, user_dn_esc, ) else: query = f'({self.member_attribute}={user_dn_esc})' return self.get_all(query) def list_user_groups_filtered(self, user_dn, hints): """Return a filtered list of groups for which the user is a member.""" user_dn_esc = ldap.filter.escape_filter_chars(user_dn) if self.group_ad_nesting: # Hardcoded to member as that is how the Matching Rule in Chain # Mechanisms expects it. The member_attribute might actually be # member_of elsewhere, so they are not the same. query = '(member:{}:={})'.format( LDAP_MATCHING_RULE_IN_CHAIN, user_dn_esc, ) else: query = f'({self.member_attribute}={user_dn_esc})' return self.get_all_filtered(hints, query) def list_group_users(self, group_id): """Return a list of user dns which are members of a group.""" group_ref = self.get(group_id) group_dn = group_ref['dn'] try: if self.group_ad_nesting: # NOTE(ayoung): LDAP_SCOPE is used here instead of hard- # coding to SCOPE_SUBTREE to get through the unit tests. # However, it is also probably more correct. attrs = self._ldap_get_list( self.tree_dn, self.LDAP_SCOPE, query_params={ "member:%s:" % LDAP_MATCHING_RULE_IN_CHAIN: group_dn }, attrlist=[self.member_attribute], ) else: attrs = self._ldap_get_list( group_dn, ldap.SCOPE_BASE, attrlist=[self.member_attribute] ) except ldap.NO_SUCH_OBJECT: raise self.NotFound(group_id=group_id) users = [] for dn, member in attrs: user_dns = member.get(self.member_attribute, []) for user_dn in user_dns: users.append(user_dn) return users def get_filtered(self, group_id): group = self.get(group_id) return common_ldap.filter_entity(group) def get_filtered_by_name(self, group_name): group = self.get_by_name(group_name) return common_ldap.filter_entity(group) def get_all_filtered(self, hints, query=None): if self.ldap_filter: query = (query or '') + self.ldap_filter query = self.filter_query(hints, query) return [ common_ldap.filter_entity(group) for group in self.get_all(query, hints) ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/ldap/models.py0000664000175000017500000000315000000000000023503 0ustar00zuulzuul00000000000000# Copyright (C) 2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Base model for keystone internal services. Unless marked otherwise, all fields are strings. """ class Model(dict): """Base model class.""" def __hash__(self): """Define hash behavior where hash of service ID is returned.""" return self['id'].__hash__() @property def known_keys(cls): return cls.required_keys + cls.optional_keys class User(Model): """User object. Required keys: id name domain_id Optional keys: password description email enabled (bool, default True) default_project_id """ required_keys = ('id', 'name', 'domain_id') optional_keys = ( 'password', 'description', 'email', 'enabled', 'default_project_id', ) class Group(Model): """Group object. Required keys: id name domain_id Optional keys: description """ required_keys = ('id', 'name', 'domain_id') optional_keys = ('description',) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/resource_options.py0000664000175000017500000001142700000000000024710 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import resource_options from keystone.common.validation import parameter_types from keystone.i18n import _ def _mfa_rules_validator_list_of_lists_of_strings_no_duplicates(value): # NOTE(notmorgan): This should possibly validate that the auth-types # are enabled? For now it simply validates the following: # # Must be a list of lists, each sub list must be a list of strings # e.g. [['str1', 'str2'], ['str3', 'str4']] # No sub-list may be empty. Duplication of sub-lists and duplication of # string elements are not permitted. msg = _( 'Invalid data type, must be a list of lists comprised of strings. ' 'Sub-lists may not be duplicated. Strings in sub-lists may not be ' 'duplicated.' ) if not isinstance(value, list): # Value is not a List, TypeError raise TypeError(msg) sublists = [] for sublist in value: # Sublist element tracker is reset for each sublist. string_set = set() if not isinstance(sublist, list): # Sublist is not a List, TypeError raise TypeError(msg) if not sublist: # Sublist is Empty, ValueError raise ValueError(msg) if sublist in sublists: # Sublist is duplicated, ValueError raise ValueError(msg) # Add the sublist to the tracker sublists.append(sublist) for element in sublist: if not isinstance(element, str): # Element of sublist is not a string, TypeError raise TypeError(msg) if element in string_set: # Element of sublist is duplicated, ValueError raise ValueError(msg) # add element to the sublist element tracker string_set.add(element) USER_OPTIONS_REGISTRY = resource_options.ResourceOptionRegistry('USER') IGNORE_CHANGE_PASSWORD_OPT = resource_options.ResourceOption( option_id='1000', option_name='ignore_change_password_upon_first_use', validator=resource_options.boolean_validator, json_schema_validation=parameter_types.boolean, ) IGNORE_PASSWORD_EXPIRY_OPT = resource_options.ResourceOption( option_id='1001', option_name='ignore_password_expiry', validator=resource_options.boolean_validator, json_schema_validation=parameter_types.boolean, ) IGNORE_LOCKOUT_ATTEMPT_OPT = resource_options.ResourceOption( option_id='1002', option_name='ignore_lockout_failure_attempts', validator=resource_options.boolean_validator, json_schema_validation=parameter_types.boolean, ) LOCK_PASSWORD_OPT = resource_options.ResourceOption( option_id='1003', option_name='lock_password', validator=resource_options.boolean_validator, json_schema_validation=parameter_types.boolean, ) IGNORE_USER_INACTIVITY_OPT = resource_options.ResourceOption( option_id='1004', option_name='ignore_user_inactivity', validator=resource_options.boolean_validator, json_schema_validation=parameter_types.boolean, ) MFA_RULES_OPT = resource_options.ResourceOption( option_id='MFAR', option_name='multi_factor_auth_rules', validator=_mfa_rules_validator_list_of_lists_of_strings_no_duplicates, json_schema_validation={ # List 'type': 'array', 'items': { # Of Lists 'type': 'array', 'items': { # Of Strings, each string must be unique, minimum 1 # element 'type': 'string', }, 'minItems': 1, 'uniqueItems': True, }, 'uniqueItems': True, }, ) MFA_ENABLED_OPT = resource_options.ResourceOption( option_id='MFAE', option_name='multi_factor_auth_enabled', validator=resource_options.boolean_validator, json_schema_validation=parameter_types.boolean, ) # NOTE(notmorgan): wrap this in a function for testing purposes. # This is called on import by design. def register_user_options(): for opt in [ IGNORE_CHANGE_PASSWORD_OPT, IGNORE_PASSWORD_EXPIRY_OPT, IGNORE_LOCKOUT_ATTEMPT_OPT, LOCK_PASSWORD_OPT, IGNORE_USER_INACTIVITY_OPT, MFA_RULES_OPT, MFA_ENABLED_OPT, ]: USER_OPTIONS_REGISTRY.register_option(opt) register_user_options() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/sql.py0000664000175000017500000004720400000000000022107 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_db import api as oslo_db_api from oslo_utils import timeutils import sqlalchemy from keystone.common import driver_hints from keystone.common import password_hashing from keystone.common import resource_options from keystone.common import sql import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.identity.backends import base from keystone.identity.backends import resource_options as options from keystone.identity.backends import sql_model as model CONF = keystone.conf.CONF def _stale_data_exception_checker(exc): return isinstance(exc, sqlalchemy.orm.exc.StaleDataError) class Identity(base.IdentityDriverBase): # NOTE(henry-nash): Override the __init__() method so as to take a # config parameter to enable sql to be used as a domain-specific driver. def __init__(self, conf=None): self.conf = conf super().__init__() @property def is_sql(self): return True def _check_password(self, password, user_ref): """Check the specified password against the data store. Note that we'll pass in the entire user_ref in case the subclass needs things like user_ref.get('name') For further justification, please see the follow up suggestion at https://blueprints.launchpad.net/keystone/+spec/sql-identiy-pam """ return password_hashing.check_password(password, user_ref.password) # Identity interface def authenticate(self, user_id, password): with sql.session_for_read() as session: try: user_ref = self._get_user(session, user_id) user_dict = base.filter_user(user_ref.to_dict()) except exception.UserNotFound: raise AssertionError(_('Invalid user / password')) if self._is_account_locked(user_id, user_ref): raise exception.AccountLocked(user_id=user_id) elif not self._check_password(password, user_ref): self._record_failed_auth(user_id) raise AssertionError(_('Invalid user / password')) elif not user_ref.enabled: raise exception.UserDisabled(user_id=user_id) elif user_ref.password_is_expired: raise exception.PasswordExpired(user_id=user_id) # successful auth, reset failed count if present if user_ref.local_user.failed_auth_count: self._reset_failed_auth(user_id) return user_dict def _is_account_locked(self, user_id, user_ref): """Check if the user account is locked. Checks if the user account is locked based on the number of failed authentication attempts. :param user_id: The user ID :param user_ref: Reference to the user object :returns Boolean: True if the account is locked; False otherwise """ ignore_option = user_ref.get_resource_option( options.IGNORE_LOCKOUT_ATTEMPT_OPT.option_id ) if ignore_option and ignore_option.option_value is True: return False attempts = user_ref.local_user.failed_auth_count or 0 max_attempts = CONF.security_compliance.lockout_failure_attempts lockout_duration = CONF.security_compliance.lockout_duration if max_attempts and (attempts >= max_attempts): if not lockout_duration: return True else: delta = datetime.timedelta(seconds=lockout_duration) last_failure = user_ref.local_user.failed_auth_at if (last_failure + delta) > timeutils.utcnow(): return True else: self._reset_failed_auth(user_id) return False def _record_failed_auth(self, user_id): with sql.session_for_write() as session: user_ref = session.get(model.User, user_id) if not user_ref.local_user.failed_auth_count: user_ref.local_user.failed_auth_count = 0 user_ref.local_user.failed_auth_count += 1 user_ref.local_user.failed_auth_at = timeutils.utcnow() def _reset_failed_auth(self, user_id): with sql.session_for_write() as session: user_ref = session.get(model.User, user_id) user_ref.local_user.failed_auth_count = 0 user_ref.local_user.failed_auth_at = None # user crud @sql.handle_conflicts(conflict_type='user') def create_user(self, user_id, user): with sql.session_for_write() as session: user_ref = model.User.from_dict(user) if self._change_password_required(user_ref): user_ref.password_ref.expires_at = timeutils.utcnow() user_ref.created_at = timeutils.utcnow() session.add(user_ref) # Set resource options passed on creation resource_options.resource_options_ref_to_mapper( user_ref, model.UserOption ) return base.filter_user(user_ref.to_dict()) def _change_password_required(self, user): if not CONF.security_compliance.change_password_upon_first_use: return False ignore_option = user.get_resource_option( options.IGNORE_CHANGE_PASSWORD_OPT.option_id ) return not (ignore_option and ignore_option.option_value is True) def _create_password_expires_query(self, session, query, hints): for filter_ in hints.filters: if 'password_expires_at' == filter_['name']: # Filter on users who's password expires based on the operator # specified in `filter_['comparator']` query = query.filter( sqlalchemy.and_( model.LocalUser.id == model.Password.local_user_id, filter_['comparator']( model.Password.expires_at, filter_['value'] ), ) ) # Removes the `password_expired_at` filters so there are no errors # if the call is filtered further. This is because the # `password_expires_at` value is not stored in the `User` table but # derived from the `Password` table's value `expires_at`. hints.filters = [ x for x in hints.filters if x['name'] != 'password_expires_at' ] return query, hints @staticmethod def _apply_limits_to_list(collection, hints): if not hints.limit: return collection return collection[: hints.limit['limit']] @driver_hints.truncated def list_users(self, hints): with sql.session_for_read() as session: query = session.query(model.User).outerjoin(model.LocalUser) query, hints = self._create_password_expires_query( session, query, hints ) user_refs = sql.filter_limit_query(model.User, query, hints) return [base.filter_user(x.to_dict()) for x in user_refs] def unset_default_project_id(self, project_id): with sql.session_for_write() as session: query = session.query(model.User) query = query.filter(model.User.default_project_id == project_id) for user in query: user.default_project_id = None def _get_user(self, session, user_id): user_ref = session.get(model.User, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return user_ref def get_user(self, user_id): with sql.session_for_read() as session: return base.filter_user(self._get_user(session, user_id).to_dict()) def get_user_by_name(self, user_name, domain_id): with sql.session_for_read() as session: query = session.query(model.User).join(model.LocalUser) query = query.filter( sqlalchemy.and_( model.LocalUser.name == user_name, model.LocalUser.domain_id == domain_id, ) ) try: user_ref = query.one() except sql.NotFound: raise exception.UserNotFound(user_id=user_name) return base.filter_user(user_ref.to_dict()) @sql.handle_conflicts(conflict_type='user') # Explicitly retry on StaleDataErrors, which can happen if two clients # update the same user's password and the second client has stale password # information. @oslo_db_api.wrap_db_retry(exception_checker=_stale_data_exception_checker) def update_user(self, user_id, user): with sql.session_for_write() as session: user_ref = self._get_user(session, user_id) old_user_dict = user_ref.to_dict() for k in user: old_user_dict[k] = user[k] new_user = model.User.from_dict(old_user_dict) for attr in model.User.attributes: if attr not in model.User.readonly_attributes: setattr(user_ref, attr, getattr(new_user, attr)) # Move the "_resource_options" attribute over to the real user_ref # so that resource_options.resource_options_ref_to_mapper can # handle the work. setattr( user_ref, '_resource_options', getattr(new_user, '_resource_options', {}), ) # Move options into the proper attribute mapper construct resource_options.resource_options_ref_to_mapper( user_ref, model.UserOption ) if 'password' in user: user_ref.password = user['password'] if self._change_password_required(user_ref): expires_now = timeutils.utcnow() user_ref.password_ref.expires_at = expires_now user_ref.extra = new_user.extra return base.filter_user(user_ref.to_dict(include_extra_dict=True)) def _validate_password_history(self, password, user_ref): unique_cnt = CONF.security_compliance.unique_last_password_count # Validate the new password against the remaining passwords. if unique_cnt > 0: for password_ref in user_ref.local_user.passwords[-unique_cnt:]: if password_hashing.check_password( password, password_ref.password_hash ): raise exception.PasswordHistoryValidationError( unique_count=unique_cnt ) def change_password(self, user_id, new_password): with sql.session_for_write() as session: user_ref = session.get(model.User, user_id) lock_pw_opt = user_ref.get_resource_option( options.LOCK_PASSWORD_OPT.option_id ) if lock_pw_opt is not None and lock_pw_opt.option_value is True: raise exception.PasswordSelfServiceDisabled() if user_ref.password_ref and user_ref.password_ref.self_service: self._validate_minimum_password_age(user_ref) self._validate_password_history(new_password, user_ref) user_ref.password = new_password user_ref.password_ref.self_service = True def _validate_minimum_password_age(self, user_ref): min_age_days = CONF.security_compliance.minimum_password_age min_age = user_ref.password_created_at + datetime.timedelta( days=min_age_days ) if timeutils.utcnow() < min_age: days_left = (min_age - timeutils.utcnow()).days raise exception.PasswordAgeValidationError( min_age_days=min_age_days, days_left=days_left ) def add_user_to_group(self, user_id, group_id): with sql.session_for_write() as session: self.get_group(group_id) self.get_user(user_id) query = session.query(model.UserGroupMembership) query = query.filter_by(user_id=user_id) query = query.filter_by(group_id=group_id) rv = query.first() if rv: return session.add( model.UserGroupMembership(user_id=user_id, group_id=group_id) ) def check_user_in_group(self, user_id, group_id): with sql.session_for_read() as session: self.get_group(group_id) self.get_user(user_id) # Note(knikolla): Check for normal group membership query = session.query(model.UserGroupMembership) query = query.filter_by(user_id=user_id) query = query.filter_by(group_id=group_id) if query.first(): return # Note(knikolla): Check for expiring group membership query = session.query(model.ExpiringUserGroupMembership) query = query.filter( model.ExpiringUserGroupMembership.user_id == user_id ) query = query.filter( model.ExpiringUserGroupMembership.group_id == group_id ) active = [q for q in query.all() if not q.expired] if active: return raise exception.NotFound( _("User '%(user_id)s' not found in group '%(group_id)s'") % {'user_id': user_id, 'group_id': group_id} ) def remove_user_from_group(self, user_id, group_id): # We don't check if user or group are still valid and let the remove # be tried anyway - in case this is some kind of clean-up operation with sql.session_for_write() as session: query = session.query(model.UserGroupMembership) query = query.filter_by(user_id=user_id) query = query.filter_by(group_id=group_id) membership_ref = query.first() if membership_ref is None: # Check if the group and user exist to return descriptive # exceptions. self.get_group(group_id) self.get_user(user_id) raise exception.NotFound( _( "User '%(user_id)s' not found in" " group '%(group_id)s'" ) % {'user_id': user_id, 'group_id': group_id} ) session.delete(membership_ref) def list_groups_for_user(self, user_id, hints): def row_to_group_dict(row): group = row.group.to_dict() group['membership_expires_at'] = row.expires return group with sql.session_for_read() as session: self.get_user(user_id) query = session.query(model.Group).join(model.UserGroupMembership) query = query.filter(model.UserGroupMembership.user_id == user_id) query = sql.filter_limit_query(model.Group, query, hints) groups = [g.to_dict() for g in query] # Note(knikolla): We must use the ExpiringGroupMembership model # so that we can access the expired property. query = session.query(model.ExpiringUserGroupMembership) query = query.filter( model.ExpiringUserGroupMembership.user_id == user_id ) query = sql.filter_limit_query( model.UserGroupMembership, query, hints ) expiring_groups = [ row_to_group_dict(r) for r in query.all() if not r.expired ] # Note(knikolla): I would have loved to be able to merge the two # queries together and use filter_limit_query on the union, but # I haven't found a generic way to express expiration in a SQL # query, therefore we have to apply the limits here again. return self._apply_limits_to_list(groups + expiring_groups, hints) def list_users_in_group(self, group_id, hints): with sql.session_for_read() as session: self.get_group(group_id) query = session.query(model.User).outerjoin(model.LocalUser) query = query.join(model.UserGroupMembership) query = query.filter( model.UserGroupMembership.group_id == group_id ) query, hints = self._create_password_expires_query( session, query, hints ) query = sql.filter_limit_query(model.User, query, hints) return [base.filter_user(u.to_dict()) for u in query] @oslo_db_api.wrap_db_retry(retry_on_deadlock=True) def delete_user(self, user_id): with sql.session_for_write() as session: ref = self._get_user(session, user_id) q = session.query(model.UserGroupMembership) q = q.filter_by(user_id=user_id) q.delete(False) session.delete(ref) def reset_last_active(self): with sql.session_for_write() as session: session.query(model.User).filter( model.User.last_active_at.is_(None).update( {'last_active_at': timeutils.utcnow()} ) ) # group crud @sql.handle_conflicts(conflict_type='group') def create_group(self, group_id, group): with sql.session_for_write() as session: ref = model.Group.from_dict(group) session.add(ref) return ref.to_dict() @driver_hints.truncated def list_groups(self, hints): with sql.session_for_read() as session: query = session.query(model.Group) refs = sql.filter_limit_query(model.Group, query, hints) return [ref.to_dict() for ref in refs] def _get_group(self, session, group_id): ref = session.get(model.Group, group_id) if not ref: raise exception.GroupNotFound(group_id=group_id) return ref def get_group(self, group_id): with sql.session_for_read() as session: return self._get_group(session, group_id).to_dict() def get_group_by_name(self, group_name, domain_id): with sql.session_for_read() as session: query = session.query(model.Group) query = query.filter_by(name=group_name) query = query.filter_by(domain_id=domain_id) try: group_ref = query.one() except sql.NotFound: raise exception.GroupNotFound(group_id=group_name) return group_ref.to_dict() @sql.handle_conflicts(conflict_type='group') def update_group(self, group_id, group): with sql.session_for_write() as session: ref = self._get_group(session, group_id) old_dict = ref.to_dict() for k in group: old_dict[k] = group[k] new_group = model.Group.from_dict(old_dict) for attr in model.Group.attributes: if attr != 'id': setattr(ref, attr, getattr(new_group, attr)) ref.extra = new_group.extra return ref.to_dict() def delete_group(self, group_id): with sql.session_for_write() as session: ref = self._get_group(session, group_id) q = session.query(model.UserGroupMembership) q = q.filter_by(group_id=group_id) q.delete(False) session.delete(ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/backends/sql_model.py0000664000175000017500000004355100000000000023270 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import typing as ty from oslo_utils import timeutils import sqlalchemy from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy import orm from sqlalchemy.orm import collections from keystone.common import password_hashing from keystone.common import resource_options from keystone.common import sql import keystone.conf from keystone.identity.backends import resource_options as iro CONF = keystone.conf.CONF class User(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'user' attributes = [ 'id', 'name', 'domain_id', 'password', 'enabled', 'default_project_id', 'password_expires_at', ] readonly_attributes = ['id', 'password_expires_at', 'password'] resource_options_registry = iro.USER_OPTIONS_REGISTRY id = sql.Column(sql.String(64), primary_key=True) domain_id = sql.Column(sql.String(64), nullable=False) _enabled = sql.Column('enabled', sql.Boolean) extra = sql.Column(sql.JsonBlob()) default_project_id = sql.Column(sql.String(64), index=True) _resource_option_mapper = orm.relationship( 'UserOption', single_parent=True, cascade='all,delete,delete-orphan', lazy='subquery', backref='user', collection_class=collections.attribute_mapped_collection('option_id'), ) local_user = orm.relationship( 'LocalUser', uselist=False, single_parent=True, lazy='joined', cascade='all,delete-orphan', backref='user', ) federated_users = orm.relationship( 'FederatedUser', single_parent=True, lazy='joined', cascade='all,delete-orphan', backref='user', ) nonlocal_user = orm.relationship( 'NonLocalUser', uselist=False, single_parent=True, lazy='joined', cascade='all,delete-orphan', backref='user', ) expiring_user_group_memberships = orm.relationship( 'ExpiringUserGroupMembership', cascade='all, delete-orphan', backref="user", ) created_at = sql.Column(sql.DateTime, nullable=True) last_active_at = sql.Column(sql.Date, nullable=True) # unique constraint needed here to support composite fk constraints __table_args__: ty.Any = ( sql.UniqueConstraint('id', 'domain_id'), {}, ) # NOTE(stevemar): we use a hybrid property here because we leverage the # expression method, see `@name.expression` and `LocalUser.name` below. @hybrid_property def name(self): """Return the current user name.""" if self.local_user: return self.local_user.name elif self.nonlocal_user: return self.nonlocal_user.name elif self.federated_users: return self.federated_users[0].display_name else: return None @name.setter # type: ignore[no-redef] def name(self, value): if self.federated_users: self.federated_users[0].display_name = value elif self.local_user: self.local_user.name = value else: self.local_user = LocalUser() self.local_user.name = value @name.expression # type: ignore[no-redef] def name(cls): return LocalUser.name # password properties @property def password_ref(self): """Return the current password ref.""" if self.local_user and self.local_user.passwords: return self.local_user.passwords[-1] return None # NOTE(stevemar): we use a hybrid property here because we leverage the # expression method, see `@password.expression` and `Password.password` # below. @hybrid_property def password(self): """Return the current password.""" if self.password_ref: return self.password_ref.password_hash return None @property def password_created_at(self): """Return when password was created at.""" if self.password_ref: return self.password_ref.created_at return None @property def password_expires_at(self): """Return when password expires at.""" if self.password_ref: return self.password_ref.expires_at return None @property def password_is_expired(self): """Return whether password is expired or not.""" if self.password_expires_at and not self._password_expiry_exempt(): return timeutils.utcnow() >= self.password_expires_at return False @password.setter # type: ignore[no-redef] def password(self, value): now = timeutils.utcnow() if not self.local_user: self.local_user = LocalUser() # truncate extra passwords if self.local_user.passwords: unique_cnt = CONF.security_compliance.unique_last_password_count unique_cnt = unique_cnt + 1 if unique_cnt == 0 else unique_cnt self.local_user.passwords = self.local_user.passwords[-unique_cnt:] # set all previous passwords to be expired for ref in self.local_user.passwords: if not ref.expires_at or ref.expires_at > now: ref.expires_at = now new_password_ref = Password() hashed_passwd = None if value is not None: # NOTE(notmorgan): hash the passwords, never directly bind the # "value" in the unhashed form to hashed_passwd to ensure the # unhashed password cannot end up in the db. If an unhashed # password ends up in the DB, it cannot be used for auth, it is # however incorrect and could leak user credentials (due to users # doing insecure things such as sharing passwords across # different systems) to unauthorized parties. hashed_passwd = password_hashing.hash_password(value) new_password_ref.password_hash = hashed_passwd new_password_ref.created_at = now new_password_ref.expires_at = self._get_password_expires_at(now) self.local_user.passwords.append(new_password_ref) def _password_expiry_exempt(self): # Get the IGNORE_PASSWORD_EXPIRY_OPT value from the user's # option_mapper. return getattr( self.get_resource_option(iro.IGNORE_PASSWORD_EXPIRY_OPT.option_id), 'option_value', False, ) def _get_password_expires_at(self, created_at): expires_days = CONF.security_compliance.password_expires_days if not self._password_expiry_exempt(): if expires_days: expired_date = created_at + datetime.timedelta( days=expires_days ) return expired_date.replace(microsecond=0) return None @password.expression # type: ignore[no-redef] def password(cls): return Password.password_hash # NOTE(stevemar): we use a hybrid property here because we leverage the # expression method, see `@enabled.expression` and `User._enabled` below. @hybrid_property def enabled(self): """Return whether user is enabled or not.""" if self._enabled: max_days = ( CONF.security_compliance.disable_user_account_days_inactive ) inactivity_exempt = getattr( self.get_resource_option( iro.IGNORE_USER_INACTIVITY_OPT.option_id ), 'option_value', False, ) last_active = self.last_active_at if not last_active and self.created_at: last_active = self.created_at.date() if max_days and last_active: now = timeutils.utcnow().date() days_inactive = (now - last_active).days if days_inactive >= max_days and not inactivity_exempt: self._enabled = False return self._enabled @enabled.setter # type: ignore[no-redef] def enabled(self, value): if ( value and CONF.security_compliance.disable_user_account_days_inactive ): self.last_active_at = timeutils.utcnow().date() if value and self.local_user: self.local_user.failed_auth_count = 0 self.local_user.failed_auth_at = None self._enabled = value @enabled.expression # type: ignore[no-redef] def enabled(cls): return User._enabled def get_resource_option(self, option_id): if option_id in self._resource_option_mapper.keys(): return self._resource_option_mapper[option_id] return None def to_dict(self, include_extra_dict=False): d = super().to_dict(include_extra_dict=include_extra_dict) if 'default_project_id' in d and d['default_project_id'] is None: del d['default_project_id'] # NOTE(notmorgan): Eventually it may make sense to drop the empty # option dict creation to the superclass (if enough models use it) d['options'] = resource_options.ref_mapper_to_dict_options(self) return d @classmethod def from_dict(cls, user_dict): """Override from_dict to remove password_expires_at attribute. Overriding this method to remove password_expires_at attribute to support update_user and unit tests where password_expires_at inadvertently gets added by calling to_dict followed by from_dict. :param user_dict: User entity dictionary :returns User: User object """ new_dict = user_dict.copy() resource_options = {} options = new_dict.pop('options', {}) password_expires_at_key = 'password_expires_at' # nosec if password_expires_at_key in user_dict: del new_dict[password_expires_at_key] for opt in cls.resource_options_registry.options: if opt.option_name in options: opt_value = options[opt.option_name] # NOTE(notmorgan): None is always a valid type if opt_value is not None: opt.validator(opt_value) resource_options[opt.option_id] = opt_value user_obj = super().from_dict(new_dict) setattr(user_obj, '_resource_options', resource_options) return user_obj class LocalUser(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'local_user' attributes = ['id', 'user_id', 'domain_id', 'name'] id = sql.Column(sql.Integer, primary_key=True) user_id = sql.Column(sql.String(64), nullable=False) domain_id = sql.Column(sql.String(64), nullable=False) name = sql.Column(sql.String(255), nullable=False) passwords = orm.relationship( 'Password', single_parent=True, cascade='all,delete-orphan', lazy='joined', backref='local_user', order_by='Password.created_at_int', ) failed_auth_count = sql.Column(sql.Integer, nullable=True) failed_auth_at = sql.Column(sql.DateTime, nullable=True) __table_args__ = ( sql.UniqueConstraint('user_id'), sql.UniqueConstraint('domain_id', 'name'), sqlalchemy.ForeignKeyConstraint( ['user_id', 'domain_id'], ['user.id', 'user.domain_id'], onupdate='CASCADE', ondelete='CASCADE', ), ) class Password(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'password' attributes = [ 'id', 'local_user_id', 'password_hash', 'created_at', 'expires_at', ] id = sql.Column(sql.Integer, primary_key=True) local_user_id = sql.Column( sql.Integer, sql.ForeignKey('local_user.id', ondelete='CASCADE'), nullable=False, ) password_hash = sql.Column(sql.String(255), nullable=True) # TODO(lbragstad): Once Rocky opens for development, the _created_at and # _expires_at attributes/columns can be removed from the schema. The # migration ensures all passwords are converted from datetime objects to # big integers. The old datetime columns and their corresponding attributes # in the model are no longer required. # created_at default set here to safe guard in case it gets missed _created_at = sql.Column( 'created_at', sql.DateTime, nullable=False, default=timeutils.utcnow, ) _expires_at = sql.Column('expires_at', sql.DateTime, nullable=True) # set the default to 0, a 0 indicates it is unset. created_at_int = sql.Column( sql.DateTimeInt(), nullable=False, default=0, server_default='0', ) expires_at_int = sql.Column(sql.DateTimeInt(), nullable=True) self_service = sql.Column( sql.Boolean, default=False, nullable=False, server_default='0', ) @hybrid_property def created_at(self): return self.created_at_int or self._created_at @created_at.setter # type: ignore[no-redef] def created_at(self, value): self._created_at = value self.created_at_int = value @hybrid_property def expires_at(self): return self.expires_at_int or self._expires_at @expires_at.setter # type: ignore[no-redef] def expires_at(self, value): self._expires_at = value self.expires_at_int = value class FederatedUser(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'federated_user' attributes = [ 'id', 'user_id', 'idp_id', 'protocol_id', 'unique_id', 'display_name', ] id = sql.Column(sql.Integer, primary_key=True) user_id = sql.Column( sql.String(64), sql.ForeignKey('user.id', ondelete='CASCADE'), nullable=False, ) idp_id = sql.Column( sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), nullable=False, ) protocol_id = sql.Column(sql.String(64), nullable=False) unique_id = sql.Column(sql.String(255), nullable=False) display_name = sql.Column(sql.String(255), nullable=True) __table_args__ = ( sql.UniqueConstraint('idp_id', 'protocol_id', 'unique_id'), sqlalchemy.ForeignKeyConstraint( ['protocol_id', 'idp_id'], ['federation_protocol.id', 'federation_protocol.idp_id'], ondelete='CASCADE', ), ) class NonLocalUser(sql.ModelBase, sql.ModelDictMixin): """SQL data model for nonlocal users (LDAP and custom).""" __tablename__ = 'nonlocal_user' attributes = ['domain_id', 'name', 'user_id'] domain_id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), primary_key=True) user_id = sql.Column(sql.String(64), nullable=False) __table_args__ = ( sql.UniqueConstraint('user_id'), sqlalchemy.ForeignKeyConstraint( ['user_id', 'domain_id'], ['user.id', 'user.domain_id'], onupdate='CASCADE', ondelete='CASCADE', ), ) class Group(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'group' attributes = ['id', 'name', 'domain_id', 'description'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(64), nullable=False) domain_id = sql.Column(sql.String(64), nullable=False) description = sql.Column(sql.Text()) extra = sql.Column(sql.JsonBlob()) expiring_user_group_memberships = orm.relationship( 'ExpiringUserGroupMembership', cascade='all, delete-orphan', backref="group", ) # Unique constraint across two columns to create the separation # rather than just only 'name' being unique __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),) class UserGroupMembership(sql.ModelBase, sql.ModelDictMixin): """Group membership join table.""" __tablename__ = 'user_group_membership' user_id = sql.Column( sql.String(64), sql.ForeignKey('user.id'), primary_key=True ) group_id = sql.Column( sql.String(64), sql.ForeignKey('group.id'), primary_key=True ) class ExpiringUserGroupMembership(sql.ModelBase, sql.ModelDictMixin): """Expiring group membership through federation mapping rules.""" __tablename__ = 'expiring_user_group_membership' user_id = sql.Column( sql.String(64), sql.ForeignKey('user.id'), primary_key=True ) group_id = sql.Column( sql.String(64), sql.ForeignKey('group.id'), primary_key=True ) idp_id = sql.Column( sql.String(64), sql.ForeignKey('identity_provider.id', ondelete='CASCADE'), primary_key=True, ) last_verified = sql.Column(sql.DateTime, nullable=False) @hybrid_property def expires(self): ttl = self.idp.authorization_ttl if not ttl: ttl = CONF.federation.default_authorization_ttl return self.last_verified + datetime.timedelta(minutes=ttl) @hybrid_property def expired(self): return self.expires <= timeutils.utcnow() class UserOption(sql.ModelBase): __tablename__ = 'user_option' user_id = sql.Column( sql.String(64), sql.ForeignKey('user.id', ondelete='CASCADE'), nullable=False, primary_key=True, ) option_id = sql.Column(sql.String(4), nullable=False, primary_key=True) option_value = sql.Column(sql.JsonBlob, nullable=True) def __init__(self, option_id, option_value): self.option_id = option_id self.option_value = option_value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/core.py0000664000175000017500000022560200000000000020466 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Identity service.""" import copy import functools import itertools import operator import os import threading import uuid from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from pycadf import reason import stevedore from keystone import assignment # TODO(lbragstad): Decouple this dependency from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api from keystone.common.validation import validators import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.identity.mapping_backends import mapping from keystone import notifications CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs MEMOIZE = cache.get_memoization_decorator(group='identity') ID_MAPPING_REGION = cache.create_region(name='id mapping') MEMOIZE_ID_MAPPING = cache.get_memoization_decorator( group='identity', region=ID_MAPPING_REGION ) DOMAIN_CONF_FHEAD = 'keystone.' DOMAIN_CONF_FTAIL = '.conf' # The number of times we will attempt to register a domain to use the SQL # driver, if we find that another process is in the middle of registering or # releasing at the same time as us. REGISTRATION_ATTEMPTS = 10 # Config Registration Types SQL_DRIVER = 'SQL' def get_driver(namespace, driver_name, *args): """Get identity driver without initializing. The method is invoked to be able to introspect domain specific driver looking for additional configuration options required by the driver. """ try: driver_manager = stevedore.DriverManager( namespace, driver_name, invoke_on_load=False, invoke_args=args ) return driver_manager.driver except stevedore.exception.NoMatches: msg = _('Unable to find %(name)r driver in %(namespace)r.') raise ImportError(msg % {'name': driver_name, 'namespace': namespace}) class DomainConfigs(provider_api.ProviderAPIMixin, dict): """Discover, store and provide access to domain specific configs. The setup_domain_drivers() call will be made via the wrapper from the first call to any driver function handled by this manager. Domain specific configurations are only supported for the identity backend and the individual configurations are either specified in the resource database or in individual domain configuration files, depending on the setting of the 'domain_configurations_from_database' config option. The result will be that for each domain with a specific configuration, this class will hold a reference to a ConfigOpts and driver object that the identity manager and driver can use. """ configured = False driver = None _any_sql = False lock = threading.Lock() def _load_driver(self, domain_config): return manager.load_driver( Manager.driver_namespace, domain_config['cfg'].identity.driver, domain_config['cfg'], ) def _load_config_from_file(self, resource_api, file_list, domain_name): def _assert_no_more_than_one_sql_driver(new_config, config_file): """Ensure there is no more than one sql driver. Check to see if the addition of the driver in this new config would cause there to be more than one sql driver. """ if new_config['driver'].is_sql and ( self.driver.is_sql or self._any_sql ): # The addition of this driver would cause us to have more than # one sql driver, so raise an exception. raise exception.MultipleSQLDriversInConfig(source=config_file) self._any_sql = self._any_sql or new_config['driver'].is_sql try: domain_ref = resource_api.get_domain_by_name(domain_name) except exception.DomainNotFound: LOG.warning( 'Invalid domain name (%s) found in config file name', domain_name, ) return # Create a new entry in the domain config dict, which contains # a new instance of both the conf environment and driver using # options defined in this set of config files. Later, when we # service calls via this Manager, we'll index via this domain # config dict to make sure we call the right driver domain_config = {} domain_config['cfg'] = cfg.ConfigOpts() keystone.conf.configure(conf=domain_config['cfg']) domain_config['cfg']( args=[], project='keystone', default_config_files=file_list, default_config_dirs=[], ) domain_config['driver'] = self._load_driver(domain_config) _assert_no_more_than_one_sql_driver(domain_config, file_list) self[domain_ref['id']] = domain_config def _setup_domain_drivers_from_files(self, standard_driver, resource_api): """Read the domain specific configuration files and load the drivers. Domain configuration files are stored in the domain config directory, and must be named of the form: keystone..conf For each file, call the load config method where the domain_name will be turned into a domain_id and then: - Create a new config structure, adding in the specific additional options defined in this config file - Initialise a new instance of the required driver with this new config """ conf_dir = CONF.identity.domain_config_dir if not os.path.exists(conf_dir): LOG.warning( 'Unable to locate domain config directory: %s', conf_dir ) return for r, d, f in os.walk(conf_dir): for fname in f: if fname.startswith(DOMAIN_CONF_FHEAD) and fname.endswith( DOMAIN_CONF_FTAIL ): if fname.count('.') >= 2: self._load_config_from_file( resource_api, [os.path.join(r, fname)], fname[ len(DOMAIN_CONF_FHEAD) : -len( DOMAIN_CONF_FTAIL ) ], ) else: LOG.debug( ( 'Ignoring file (%s) while scanning domain ' 'config directory' ), fname, ) def _load_config_from_database(self, domain_id, specific_config): def _assert_no_more_than_one_sql_driver(domain_id, new_config): """Ensure adding driver doesn't push us over the limit of 1. The checks we make in this method need to take into account that we may be in a multiple process configuration and ensure that any race conditions are avoided. """ if not new_config['driver'].is_sql: PROVIDERS.domain_config_api.release_registration(domain_id) return # To ensure the current domain is the only SQL driver, we attempt # to register our use of SQL. If we get it we know we are good, # if we fail to register it then we should: # # - First check if another process has registered for SQL for our # domain, in which case we are fine # - If a different domain has it, we should check that this domain # is still valid, in case, for example, domain deletion somehow # failed to remove its registration (i.e. we self heal for these # kinds of issues). domain_registered = 'Unknown' for attempt in range(REGISTRATION_ATTEMPTS): if PROVIDERS.domain_config_api.obtain_registration( domain_id, SQL_DRIVER ): LOG.debug( 'Domain %s successfully registered to use the ' 'SQL driver.', domain_id, ) return # We failed to register our use, let's find out who is using it try: domain_registered = ( PROVIDERS.domain_config_api.read_registration( SQL_DRIVER ) ) except exception.ConfigRegistrationNotFound: msg = ( 'While attempting to register domain %(domain)s to ' 'use the SQL driver, another process released it, ' 'retrying (attempt %(attempt)s).' ) LOG.debug( msg, {'domain': domain_id, 'attempt': attempt + 1} ) continue if domain_registered == domain_id: # Another process already registered it for us, so we are # fine. In the race condition when another process is # in the middle of deleting this domain, we know the domain # is already disabled and hence telling the caller that we # are registered is benign. LOG.debug( 'While attempting to register domain %s to use ' 'the SQL driver, found that another process had ' 'already registered this domain. This is normal ' 'in multi-process configurations.', domain_id, ) return # So we don't have it, but someone else does...let's check that # this domain is still valid try: PROVIDERS.resource_api.get_domain(domain_registered) except exception.DomainNotFound: msg = ( 'While attempting to register domain %(domain)s to ' 'use the SQL driver, found that it was already ' 'registered to a domain that no longer exists ' '(%(old_domain)s). Removing this stale ' 'registration and retrying (attempt %(attempt)s).' ) LOG.debug( msg, { 'domain': domain_id, 'old_domain': domain_registered, 'attempt': attempt + 1, }, ) PROVIDERS.domain_config_api.release_registration( domain_registered, type=SQL_DRIVER ) continue # The domain is valid, so we really do have an attempt at more # than one SQL driver. details = ( _('Config API entity at /domains/%s/config') % domain_id ) raise exception.MultipleSQLDriversInConfig(source=details) # We fell out of the loop without either registering our domain or # being able to find who has it...either we were very very very # unlucky or something is awry. msg = _( 'Exceeded attempts to register domain %(domain)s to use ' 'the SQL driver, the last domain that appears to have ' 'had it is %(last_domain)s, giving up' ) % {'domain': domain_id, 'last_domain': domain_registered} raise exception.UnexpectedError(msg) domain_config = {} domain_config['cfg'] = cfg.ConfigOpts() keystone.conf.configure(conf=domain_config['cfg']) domain_config['cfg']( args=[], project='keystone', default_config_files=[], default_config_dirs=[], ) # Try to identify the required driver for the domain to let it register # supported configuration options. In difference to the FS based # configuration this is being set through `oslo_cfg.set_override` and # thus require special treatment. try: driver_name = specific_config.get("identity", {}).get( "driver", domain_config["cfg"].identity.driver ) # For the non in-tree drivers ... if driver_name not in ["sql", "ldap"]: # Locate the driver without invoking ... driver = get_driver(Manager.driver_namespace, driver_name) # Check whether it wants to register additional config options # ... if hasattr(driver, "register_opts"): # And register them for the domain_config (not the global # Keystone config) driver.register_opts(domain_config["cfg"]) except Exception as ex: # If we failed for some reason - something wrong with the driver, # so let's just skip registering config options. This matches older # behavior of Keystone where out-of-tree drivers were not able to # register config options with the DB configuration loading branch. LOG.debug( f"Exception during attempt to load domain specific " f"configuration options: {ex}" ) # Override any options that have been passed in as specified in the # database. for group in specific_config: for option in specific_config[group]: # NOTE(gtema): Very first time default driver is being ordered # to process the domain. This will change once initialization # completes. Until the driver specific configuration is being # registered `set_override` will fail for options not known by # the core Keystone. Make this loading not failing letting code # to complete the process properly. try: domain_config['cfg'].set_override( option, specific_config[group][option], group ) except (cfg.NoSuchOptError, cfg.NoSuchGroupError): # Error to register config overrides for wrong driver. This # is not worth of logging since it is a normal case during # Keystone initialization. pass domain_config['cfg_overrides'] = specific_config domain_config['driver'] = self._load_driver(domain_config) _assert_no_more_than_one_sql_driver(domain_id, domain_config) self[domain_id] = domain_config def _setup_domain_drivers_from_database( self, standard_driver, resource_api ): """Read domain specific configuration from database and load drivers. Domain configurations are stored in the domain-config backend, so we go through each domain to find those that have a specific config defined, and for those that do we: - Create a new config structure, overriding any specific options defined in the resource backend - Initialise a new instance of the required driver with this new config """ for domain in resource_api.list_domains(): domain_config_options = ( PROVIDERS.domain_config_api.get_config_with_sensitive_info( domain['id'] ) ) if domain_config_options: self._load_config_from_database( domain['id'], domain_config_options ) def setup_domain_drivers(self, standard_driver, resource_api): # This is called by the api call wrapper self.driver = standard_driver if CONF.identity.domain_configurations_from_database: self._setup_domain_drivers_from_database( standard_driver, resource_api ) else: self._setup_domain_drivers_from_files( standard_driver, resource_api ) self.configured = True def get_domain_driver(self, domain_id): self.check_config_and_reload_domain_driver_if_required(domain_id) if domain_id in self: return self[domain_id]['driver'] def get_domain_conf(self, domain_id): self.check_config_and_reload_domain_driver_if_required(domain_id) if domain_id in self: return self[domain_id]['cfg'] else: return CONF def reload_domain_driver(self, domain_id): # Only used to support unit tests that want to set # new config values. This should only be called once # the domains have been configured, since it relies on # the fact that the configuration files/database have already been # read. if self.configured: if domain_id in self: self[domain_id]['driver'] = self._load_driver(self[domain_id]) else: # The standard driver self.driver = self.driver() def check_config_and_reload_domain_driver_if_required(self, domain_id): """Check for, and load, any new domain specific config for this domain. This is only supported for the database-stored domain specific configuration. When the domain specific drivers were set up, we stored away the specific config for this domain that was available at that time. So we now read the current version and compare. While this might seem somewhat inefficient, the sensitive config call is cached, so should be light weight. More importantly, when the cache timeout is reached, we will get any config that has been updated from any other keystone process. This cache-timeout approach works for both multi-process and multi-threaded keystone configurations. In multi-threaded configurations, even though we might remove a driver object (that could be in use by another thread), this won't actually be thrown away until all references to it have been broken. When that other thread is released back and is restarted with another command to process, next time it accesses the driver it will pickup the new one. """ if ( not CONF.identity.domain_specific_drivers_enabled or not CONF.identity.domain_configurations_from_database ): # If specific drivers are not enabled, then there is nothing to do. # If we are not storing the configurations in the database, then # we'll only re-read the domain specific config files on startup # of keystone. return latest_domain_config = ( PROVIDERS.domain_config_api.get_config_with_sensitive_info( domain_id ) ) domain_config_in_use = domain_id in self if latest_domain_config: if ( not domain_config_in_use or latest_domain_config != self[domain_id]['cfg_overrides'] ): self._load_config_from_database( domain_id, latest_domain_config ) elif domain_config_in_use: # The domain specific config has been deleted, so should remove the # specific driver for this domain. try: del self[domain_id] except KeyError: # nosec # Allow this error in case we are unlucky and in a # multi-threaded situation, two threads happen to be running # in lock step. pass # If we fall into the else condition, this means there is no domain # config set, and there is none in use either, so we have nothing # to do. def domains_configured(f): """Wrap API calls to lazy load domain configs after init. This is required since the assignment manager needs to be initialized before this manager, and yet this manager's init wants to be able to make assignment calls (to build the domain configs). So instead, we check if the domains have been initialized on entry to each call, and if requires load them, """ @functools.wraps(f) def wrapper(self, *args, **kwargs): if ( not self.domain_configs.configured and CONF.identity.domain_specific_drivers_enabled ): # If domain specific driver has not been configured, acquire the # lock and proceed with loading the driver. with self.domain_configs.lock: # Check again just in case some other thread has already # completed domain config. if not self.domain_configs.configured: self.domain_configs.setup_domain_drivers( self.driver, PROVIDERS.resource_api ) return f(self, *args, **kwargs) return wrapper def exception_translated(exception_type): """Wrap API calls to map to correct exception.""" def _exception_translated(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): try: return f(self, *args, **kwargs) except exception.PublicIDNotFound as e: if exception_type == 'user': raise exception.UserNotFound(user_id=str(e)) elif exception_type == 'group': raise exception.GroupNotFound(group_id=str(e)) elif exception_type == 'assertion': raise AssertionError(_('Invalid user / password')) else: raise return wrapper return _exception_translated @notifications.listener class Manager(manager.Manager): """Default pivot point for the Identity backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. This class also handles the support of domain specific backends, by using the DomainConfigs class. The setup call for DomainConfigs is called from with the @domains_configured wrapper in a lazy loading fashion to get around the fact that we can't satisfy the assignment api it needs from within our __init__() function since the assignment driver is not itself yet initialized. Each of the identity calls are pre-processed here to choose, based on domain, which of the drivers should be called. The non-domain-specific driver is still in place, and is used if there is no specific driver for the domain in question (or we are not using multiple domain drivers). Starting with Juno, in order to be able to obtain the domain from just an ID being presented as part of an API call, a public ID to domain and local ID mapping is maintained. This mapping also allows for the local ID of drivers that do not provide simple UUIDs (such as LDAP) to be referenced via a public facing ID. The mapping itself is automatically generated as entities are accessed via the driver. This mapping is only used when: - the entity is being handled by anything other than the default driver, or - the entity is being handled by the default LDAP driver and backward compatible IDs are not required. This means that in the standard case of a single SQL backend or the default settings of a single LDAP backend (since backward compatible IDs is set to True by default), no mapping is used. An alternative approach would be to always use the mapping table, but in the cases where we don't need it to make the public and local IDs the same. It is felt that not using the mapping by default is a more prudent way to introduce this functionality. """ driver_namespace = 'keystone.identity' _provides_api = 'identity_api' _USER = 'user' _GROUP = 'group' def __init__(self): super().__init__(CONF.identity.driver) self.domain_configs = DomainConfigs() notifications.register_event_callback( notifications.ACTIONS.internal, notifications.DOMAIN_DELETED, self._domain_deleted, ) self.event_callbacks = { notifications.ACTIONS.deleted: { 'project': [self._unset_default_project], }, } def _domain_deleted(self, service, resource_type, operation, payload): domain_id = payload['resource_info'] driver = self._select_identity_driver(domain_id) if driver.is_sql: group_refs = self.list_groups(domain_scope=domain_id) for group in group_refs: # Cleanup any existing groups. try: self.delete_group(group['id']) except exception.GroupNotFound: LOG.debug( ( 'Group %(groupid)s not found when deleting ' 'domain contents for %(domainid)s, continuing ' 'with cleanup.' ), {'groupid': group['id'], 'domainid': domain_id}, ) # And finally, delete the users themselves user_refs = self.list_users(domain_scope=domain_id) for user in user_refs: try: if not driver.is_sql: PROVIDERS.shadow_users_api.delete_user(user['id']) else: self.delete_user(user['id']) except exception.UserNotFound: LOG.debug( ( 'User %(userid)s not found when deleting domain ' 'contents for %(domainid)s, continuing with ' 'cleanup.' ), {'userid': user['id'], 'domainid': domain_id}, ) def _unset_default_project( self, service, resource_type, operation, payload ): """Callback, clears user default_project_id after project deletion. Notifications are used to unset a user's default project because there is no foreign key to the project. Projects can be in a non-SQL backend, making FKs impossible. """ project_id = payload['resource_info'] drivers = itertools.chain( self.domain_configs.values(), [{'driver': self.driver}] ) for d in drivers: try: d['driver'].unset_default_project_id(project_id) except exception.Forbidden: # NOTE(lbragstad): If the driver throws a Forbidden, it's # because the driver doesn't support writes. This is the case # with the in-tree LDAP implementation since it is read-only. # This also ensures consistency for out-of-tree backends that # might be read-only. pass # Domain ID normalization methods def _set_domain_id_and_mapping(self, ref, domain_id, driver, entity_type): """Patch the domain_id/public_id into the resulting entity(ies). :param ref: the entity or list of entities to post process :param domain_id: the domain scope used for the call :param driver: the driver used to execute the call :param entity_type: whether this is a user or group :returns: post processed entity or list or entities Called to post-process the entity being returned, using a mapping to substitute a public facing ID as necessary. This method must take into account: - If the driver is not domain aware, then we must set the domain attribute of all entities irrespective of mapping. - If the driver does not support UUIDs, then we always want to provide a mapping, except for the special case of this being the default driver and backward_compatible_ids is set to True. This is to ensure that entity IDs do not change for an existing LDAP installation (only single domain/driver LDAP configurations were previously supported). - If the driver does support UUIDs, then we always create a mapping entry, but use the local UUID as the public ID. The exception to this is that if we just have single driver (i.e. not using specific multi-domain configs), then we don't bother with the mapping at all. """ conf = CONF.identity if not self._needs_post_processing(driver): # a classic case would be when running with a single SQL driver return ref LOG.debug( 'ID Mapping - Domain ID: %(domain)s, ' 'Default Driver: %(driver)s, ' 'Domains: %(aware)s, UUIDs: %(generate)s, ' 'Compatible IDs: %(compat)s', { 'domain': domain_id, 'driver': (driver == self.driver), 'aware': driver.is_domain_aware(), 'generate': driver.generates_uuids(), 'compat': CONF.identity_mapping.backward_compatible_ids, }, ) if isinstance(ref, dict): return self._set_domain_id_and_mapping_for_single_ref( ref, domain_id, driver, entity_type, conf ) elif isinstance(ref, list): return self._set_domain_id_and_mapping_for_list( ref, domain_id, driver, entity_type, conf ) else: raise ValueError(_('Expected dict or list: %s') % type(ref)) def _needs_post_processing(self, driver): """Return whether entity from driver needs domain added or mapping.""" return ( driver is not self.driver or not driver.generates_uuids() or not driver.is_domain_aware() ) def _insert_new_public_id(self, local_entity, ref, driver): # Need to create a mapping. If the driver generates UUIDs # then pass the local UUID in as the public ID to use. public_id = None if driver.generates_uuids(): public_id = ref['id'] ref['id'] = PROVIDERS.id_mapping_api.create_id_mapping( local_entity, public_id ) LOG.debug('Created new mapping to public ID: %s', ref['id']) def _set_domain_id_and_mapping_for_single_ref( self, ref, domain_id, driver, entity_type, conf ): LOG.debug('Local ID: %s', ref['id']) ref = ref.copy() if not driver.is_domain_aware(): if not domain_id: domain_id = CONF.identity.default_domain_id ref['domain_id'] = domain_id if self._is_mapping_needed(driver): local_entity = { 'domain_id': ref['domain_id'], 'local_id': ref['id'], 'entity_type': entity_type, } public_id = PROVIDERS.id_mapping_api.get_public_id(local_entity) if public_id: ref['id'] = public_id LOG.debug('Found existing mapping to public ID: %s', ref['id']) else: self._insert_new_public_id(local_entity, ref, driver) return ref def _set_domain_id_and_mapping_for_list( self, ref_list, domain_id, driver, entity_type, conf ): """Set domain id and mapping for a list of refs. The method modifies refs in-place. """ if not ref_list: return [] # If the domain_id is None that means we are running in a single # backend mode, so to remain backwards compatible we will use the # default domain ID. if not domain_id: domain_id = CONF.identity.default_domain_id if not driver.is_domain_aware(): for ref in ref_list: ref['domain_id'] = domain_id if not self._is_mapping_needed(driver): return ref_list # build a map of refs for fast look-up refs_map = {} for r in ref_list: refs_map[(r['id'], entity_type, r['domain_id'])] = r # fetch all mappings for the domain, lookup the user at the map built # at previous step and replace his id. domain_mappings = PROVIDERS.id_mapping_api.get_domain_mapping_list( domain_id, entity_type=entity_type ) for _mapping in domain_mappings: idx = (_mapping.local_id, _mapping.entity_type, _mapping.domain_id) try: ref = refs_map.pop(idx) # due to python specifics, `ref` still points to an item in # `ref_list`. That's why when we change it here, it gets # changed in `ref_list`. ref['id'] = _mapping.public_id except KeyError: pass # some old entry, skip it # at this point, all known refs were granted a public_id. For the refs # left, there are no mappings. They need to be created. for ref in refs_map.values(): local_entity = { 'domain_id': ref['domain_id'], 'local_id': ref['id'], 'entity_type': entity_type, } self._insert_new_public_id(local_entity, ref, driver) return ref_list def _is_mapping_needed(self, driver): """Return whether mapping is needed. There are two situations where we must use the mapping: - this isn't the default driver (i.e. multiple backends), or - we have a single backend that doesn't use UUIDs The exception to the above is that we must honor backward compatibility if this is the default driver (e.g. to support current LDAP) """ is_not_default_driver = driver is not self.driver return is_not_default_driver or ( not driver.generates_uuids() and not CONF.identity_mapping.backward_compatible_ids ) def _clear_domain_id_if_domain_unaware(self, driver, ref): """Clear domain_id details if driver is not domain aware.""" if not driver.is_domain_aware() and 'domain_id' in ref: ref = ref.copy() ref.pop('domain_id') return ref def _select_identity_driver(self, domain_id): """Choose a backend driver for the given domain_id. :param domain_id: The domain_id for which we want to find a driver. If the domain_id is specified as None, then this means we need a driver that handles multiple domains. :returns: chosen backend driver If there is a specific driver defined for this domain then choose it. If the domain is None, or there no specific backend for the given domain is found, then we chose the default driver. """ if domain_id is None: driver = self.driver else: driver = ( self.domain_configs.get_domain_driver(domain_id) or self.driver ) # If the driver is not domain aware (e.g. LDAP) then check to # ensure we are not mapping multiple domains onto it - the only way # that would happen is that the default driver is LDAP and the # domain is anything other than None or the default domain. if ( not driver.is_domain_aware() and driver == self.driver and domain_id != CONF.identity.default_domain_id and domain_id is not None ): LOG.warning( 'Found multiple domains being mapped to a ' 'driver that does not support that (e.g. ' 'LDAP) - Domain ID: %(domain)s, ' 'Default Driver: %(driver)s', {'domain': domain_id, 'driver': (driver == self.driver)}, ) raise exception.DomainNotFound(domain_id=domain_id) return driver def _get_domain_driver_and_entity_id(self, public_id): """Look up details using the public ID. :param public_id: the ID provided in the call :returns: domain_id, which can be None to indicate that the driver in question supports multiple domains driver selected based on this domain entity_id which will is understood by the driver. Use the mapping table to look up the domain, driver and local entity that is represented by the provided public ID. Handle the situations where we do not use the mapping (e.g. single driver that understands UUIDs etc.) """ conf = CONF.identity # First, since we don't know anything about the entity yet, we must # assume it needs mapping, so long as we are using domain specific # drivers. if conf.domain_specific_drivers_enabled: local_id_ref = PROVIDERS.id_mapping_api.get_id_mapping(public_id) if local_id_ref: return ( local_id_ref['domain_id'], self._select_identity_driver(local_id_ref['domain_id']), local_id_ref['local_id'], ) # So either we are using multiple drivers but the public ID is invalid # (and hence was not found in the mapping table), or the public ID is # being handled by the default driver. Either way, the only place left # to look is in that standard driver. However, we don't yet know if # this driver also needs mapping (e.g. LDAP in non backward # compatibility mode). driver = self.driver if driver.generates_uuids(): if driver.is_domain_aware: # No mapping required, and the driver can handle the domain # information itself. The classic case of this is the # current SQL driver. return (None, driver, public_id) else: # Although we don't have any drivers of this type, i.e. that # understand UUIDs but not domains, conceptually you could. return (conf.default_domain_id, driver, public_id) # So the only place left to find the ID is in the default driver which # we now know doesn't generate UUIDs if not CONF.identity_mapping.backward_compatible_ids: # We are not running in backward compatibility mode, so we # must use a mapping. local_id_ref = PROVIDERS.id_mapping_api.get_id_mapping(public_id) if local_id_ref: return ( local_id_ref['domain_id'], driver, local_id_ref['local_id'], ) else: raise exception.PublicIDNotFound(id=public_id) # If we reach here, this means that the default driver # requires no mapping - but also doesn't understand domains # (e.g. the classic single LDAP driver situation). Hence we pass # back the public_ID unmodified and use the default domain (to # keep backwards compatibility with existing installations). # # It is still possible that the public ID is just invalid in # which case we leave this to the caller to check. return (conf.default_domain_id, driver, public_id) def _assert_user_and_group_in_same_backend( self, user_entity_id, user_driver, group_entity_id, group_driver ): """Ensure that user and group IDs are backed by the same backend. Raise a CrossBackendNotAllowed exception if they are not from the same backend, otherwise return None. """ if user_driver is not group_driver: # Determine first if either IDs don't exist by calling # the driver.get methods (which will raise a NotFound # exception). user_driver.get_user(user_entity_id) group_driver.get_group(group_entity_id) # If we get here, then someone is attempting to create a cross # backend membership, which is not allowed. raise exception.CrossBackendNotAllowed( group_id=group_entity_id, user_id=user_entity_id ) def _mark_domain_id_filter_satisfied(self, hints): if hints: for filter in hints.filters: if ( filter['name'] == 'domain_id' and filter['comparator'] == 'equals' ): hints.filters.remove(filter) def _ensure_domain_id_in_hints(self, hints, domain_id): if domain_id is not None and not hints.get_exact_filter_by_name( 'domain_id' ): hints.add_filter('domain_id', domain_id) def _set_list_limit_in_hints(self, hints, driver): """Set list limit in hints from driver. If a hints list is provided, the wrapper will insert the relevant limit into the hints so that the underlying driver call can try and honor it. If the driver does truncate the response, it will update the 'truncated' attribute in the 'limit' entry in the hints list, which enables the caller of this function to know if truncation has taken place. If, however, the driver layer is unable to perform truncation, the 'limit' entry is simply left in the hints list for the caller to handle. A _get_list_limit() method is required to be present in the object class hierarchy, which returns the limit for this backend to which we will truncate. If a hints list is not provided in the arguments of the wrapped call then any limits set in the config file are ignored. This allows internal use of such wrapped methods where the entire data set is needed as input for the calculations of some other API (e.g. get role assignments for a given project). This method, specific to identity manager, is used instead of more general response_truncated, because the limit for identity entities can be overridden in domain-specific config files. The driver to use is determined during processing of the passed parameters and response_truncated is designed to set the limit before any processing. """ if hints is None: return list_limit = driver._get_list_limit() if list_limit: hints.set_limit(list_limit) # The actual driver calls - these are pre/post processed here as # part of the Manager layer to make sure we: # # - select the right driver for this domain # - clear/set domain_ids for drivers that do not support domains # - create any ID mapping that might be required @notifications.emit_event('authenticate') @domains_configured @exception_translated('assertion') def authenticate(self, user_id, password): domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( user_id ) ref = driver.authenticate(entity_id, password) ref = self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER ) ref = self._shadow_nonlocal_user(ref) PROVIDERS.shadow_users_api.set_last_active_at(ref['id']) return ref def _assert_default_project_id_is_not_domain(self, default_project_id): if default_project_id: # make sure project is not a domain try: project_ref = PROVIDERS.resource_api.get_project( default_project_id ) if project_ref['is_domain'] is True: msg = _( "User's default project ID cannot be a " "domain ID: %s" ) raise exception.ValidationError( message=(msg % default_project_id) ) except exception.ProjectNotFound: # should be idempotent if project is not found so that it is # backward compatible pass def _validate_federated_objects(self, fed_obj_list): # Validate that the ipd and protocols exist for fed_obj in fed_obj_list: try: self.federation_api.get_idp(fed_obj['idp_id']) except exception.IdentityProviderNotFound: msg = ( _("Could not find Identity Provider: %s") % fed_obj['idp_id'] ) raise exception.ValidationError(msg) for protocol in fed_obj['protocols']: try: self.federation_api.get_protocol( fed_obj['idp_id'], protocol['protocol_id'] ) except exception.FederatedProtocolNotFound: msg = _( "Could not find federated protocol " "%(protocol)s for Identity Provider: %(idp)s." ) % { 'protocol': protocol['protocol_id'], 'idp': fed_obj['idp_id'], } raise exception.ValidationError(msg) def _create_federated_objects(self, user_ref, fed_obj_list): for fed_obj in fed_obj_list: for protocols in fed_obj['protocols']: federated_dict = { 'user_id': user_ref['id'], 'idp_id': fed_obj['idp_id'], 'protocol_id': protocols['protocol_id'], 'unique_id': protocols['unique_id'], 'display_name': user_ref['name'], } self.shadow_users_api.create_federated_object(federated_dict) def _create_user_with_federated_objects(self, user, driver): # If the user did not pass a federated object along inside the user # object then we simply create the user as normal. if not user.get('federated'): if 'federated' in user: del user['federated'] user = driver.create_user(user['id'], user) return user # Otherwise, validate the federated object and create the user. else: user_ref = user.copy() del user['federated'] self._validate_federated_objects(user_ref['federated']) user = driver.create_user(user['id'], user) self._create_federated_objects(user_ref, user_ref['federated']) user['federated'] = user_ref['federated'] return user @domains_configured @exception_translated('user') def create_user(self, user_ref, initiator=None): user = user_ref.copy() if 'password' in user: validators.validate_password(user['password']) user['name'] = user['name'].strip() user.setdefault('enabled', True) domain_id = user['domain_id'] PROVIDERS.resource_api.get_domain(domain_id) self._assert_default_project_id_is_not_domain( user_ref.get('default_project_id') ) # For creating a user, the domain is in the object itself domain_id = user_ref['domain_id'] driver = self._select_identity_driver(domain_id) user = self._clear_domain_id_if_domain_unaware(driver, user) # Generate a local ID - in the future this might become a function of # the underlying driver so that it could conform to rules set down by # that particular driver type. user['id'] = uuid.uuid4().hex ref = self._create_user_with_federated_objects(user, driver) notifications.Audit.created(self._USER, user['id'], initiator) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER ) @domains_configured @exception_translated('user') @MEMOIZE def get_user(self, user_id): domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( user_id ) ref = driver.get_user(entity_id) # Add user's federated objects fed_objects = self.shadow_users_api.get_federated_objects(user_id) if fed_objects: ref['federated'] = fed_objects return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER ) def assert_user_enabled(self, user_id, user=None): """Assert the user and the user's domain are enabled. :raise AssertionError if the user or the user's domain is disabled. """ if user is None: user = self.get_user(user_id) PROVIDERS.resource_api.assert_domain_enabled(user['domain_id']) if not user.get('enabled', True): raise AssertionError(_('User is disabled: %s') % user_id) @domains_configured @exception_translated('user') @MEMOIZE def get_user_by_name(self, user_name, domain_id): driver = self._select_identity_driver(domain_id) ref = driver.get_user_by_name(user_name, domain_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER ) def _translate_expired_password_hints(self, hints): """Clean Up Expired Password Hints. Any `password_expires_at` filters on the `list_users` or `list_users_in_group` queries are modified so the call will return valid data. The filters `comparator` is changed to the operator specified in the call, otherwise it is assumed to be `equals`. The filters `value` becomes the timestamp specified. Both the operator and timestamp are validated, and will raise a InvalidOperatorError or ValidationTimeStampError exception respectively if invalid. """ operators = { 'lt': operator.lt, 'gt': operator.gt, 'eq': operator.eq, 'lte': operator.le, 'gte': operator.ge, 'neq': operator.ne, } for filter_ in hints.filters: if 'password_expires_at' == filter_['name']: # password_expires_at must be in the format # 'lt:2016-11-06T15:32:17Z'. So we can assume the position # of the ':' otherwise assign the operator to equals. if ':' in filter_['value'][2:4]: op, timestamp = filter_['value'].split(':', 1) else: op = 'eq' timestamp = filter_['value'] try: filter_['value'] = timeutils.parse_isotime(timestamp) except ValueError: raise exception.ValidationTimeStampError try: filter_['comparator'] = operators[op] except KeyError: raise exception.InvalidOperatorError(_op=op) return hints def _handle_shadow_and_local_users(self, driver, hints): federated_attributes = {'idp_id', 'protocol_id', 'unique_id'} fed_res = [] for filter_ in hints.filters: if filter_['name'] in federated_attributes: return PROVIDERS.shadow_users_api.get_federated_users(hints) # Note: If the filters contain 'name', we should get the user from # both local user and shadow user backend. if filter_['name'] == 'name': fed_hints = copy.deepcopy(hints) fed_res = PROVIDERS.shadow_users_api.get_federated_users( fed_hints ) break return driver.list_users(hints) + fed_res @domains_configured @exception_translated('user') def list_users(self, domain_scope=None, hints=None): driver = self._select_identity_driver(domain_scope) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if driver.is_domain_aware(): # Force the domain_scope into the hint to ensure that we only get # back domains for that scope. self._ensure_domain_id_in_hints(hints, domain_scope) else: # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter. self._mark_domain_id_filter_satisfied(hints) hints = self._translate_expired_password_hints(hints) ref_list = self._handle_shadow_and_local_users(driver, hints) return self._set_domain_id_and_mapping( ref_list, domain_scope, driver, mapping.EntityType.USER ) def _require_matching_domain_id(self, new_ref, orig_ref): """Ensure the current domain ID matches the reference one, if any. Provided we want domain IDs to be immutable, check whether any domain_id specified in the ref dictionary matches the existing domain_id for this entity. :param new_ref: the dictionary of new values proposed for this entity :param orig_ref: the dictionary of original values proposed for this entity :raises: :class:`keystone.exception.ValidationError` """ if 'domain_id' in new_ref: if new_ref['domain_id'] != orig_ref['domain_id']: raise exception.ValidationError(_('Cannot change Domain ID')) def _update_user_with_federated_objects(self, user, driver, entity_id): # If the user did not pass a federated object along inside the user # object then we simply update the user as normal and add the # currently associated federated objects to user to be added to the # dictionary. if not user.get('federated'): if 'federated' in user: del user['federated'] user = driver.update_user(entity_id, user) fed_objects = self.shadow_users_api.get_federated_objects( user['id'] ) if fed_objects: user['federated'] = fed_objects return user # Otherwise, we validate, remove the previous user's federated objects, # and update the user along with their updated federated objects. else: user_ref = user.copy() self._validate_federated_objects(user_ref['federated']) self.shadow_users_api.delete_federated_object(entity_id) del user['federated'] user = driver.update_user(entity_id, user) self._create_federated_objects(user, user_ref['federated']) user['federated'] = user_ref['federated'] return user @domains_configured @exception_translated('user') def update_user(self, user_id, user_ref, initiator=None): old_user_ref = self.get_user(user_id) user = user_ref.copy() self._require_matching_domain_id(user, old_user_ref) if 'password' in user: validators.validate_password(user['password']) if 'name' in user: user['name'] = user['name'].strip() if 'id' in user: if user_id != user['id']: raise exception.ValidationError(_('Cannot change user ID')) # Since any ID in the user dict is now irrelevant, remove its so as # the driver layer won't be confused by the fact the this is the # public ID not the local ID user.pop('id') self._assert_default_project_id_is_not_domain( user_ref.get('default_project_id') ) domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( user_id ) user = self._clear_domain_id_if_domain_unaware(driver, user) self.get_user.invalidate(self, old_user_ref['id']) self.get_user_by_name.invalidate( self, old_user_ref['name'], old_user_ref['domain_id'] ) ref = self._update_user_with_federated_objects(user, driver, entity_id) notifications.Audit.updated(self._USER, user_id, initiator) enabled_change = (user.get('enabled') is False) and user[ 'enabled' ] != old_user_ref.get('enabled') if enabled_change or user.get('password') is not None: self._persist_revocation_event_for_user(user_id) reason = ( 'Invalidating the token cache because user %(user_id)s was ' 'enabled or disabled. Authorization will be calculated and ' 'enforced accordingly the next time they authenticate or ' 'validate a token.' % {'user_id': user_id} ) notifications.invalidate_token_cache_notification(reason) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.USER ) @domains_configured @exception_translated('user') def delete_user(self, user_id, initiator=None): domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( user_id ) # Get user details to invalidate the cache. user_old = self.get_user(user_id) hints = driver_hints.Hints() hints.add_filter('user_id', user_id) driver.delete_user(entity_id) PROVIDERS.assignment_api.delete_user_assignments(user_id) self.get_user.invalidate(self, user_id) self.get_user_by_name.invalidate( self, user_old['name'], user_old['domain_id'] ) PROVIDERS.credential_api.delete_credentials_for_user(user_id) PROVIDERS.id_mapping_api.delete_id_mapping(user_id) notifications.Audit.deleted(self._USER, user_id, initiator) # Invalidate user role assignments cache region, as it may be caching # role assignments where the actor is the specified user assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() @domains_configured @exception_translated('group') def create_group(self, group_ref, initiator=None): group = group_ref.copy() group.setdefault('description', '') domain_id = group['domain_id'] PROVIDERS.resource_api.get_domain(domain_id) # For creating a group, the domain is in the object itself domain_id = group_ref['domain_id'] driver = self._select_identity_driver(domain_id) group = self._clear_domain_id_if_domain_unaware(driver, group) # Generate a local ID - in the future this might become a function of # the underlying driver so that it could conform to rules set down by # that particular driver type. group['id'] = uuid.uuid4().hex group['name'] = group['name'].strip() ref = driver.create_group(group['id'], group) notifications.Audit.created(self._GROUP, group['id'], initiator) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP ) @domains_configured @exception_translated('group') @MEMOIZE def get_group(self, group_id): domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( group_id ) ref = driver.get_group(entity_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP ) @domains_configured @exception_translated('group') def get_group_by_name(self, group_name, domain_id): driver = self._select_identity_driver(domain_id) ref = driver.get_group_by_name(group_name, domain_id) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP ) @domains_configured @exception_translated('group') def update_group(self, group_id, group, initiator=None): old_group_ref = self.get_group(group_id) self._require_matching_domain_id(group, old_group_ref) domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( group_id ) group = self._clear_domain_id_if_domain_unaware(driver, group) if 'name' in group: group['name'] = group['name'].strip() ref = driver.update_group(entity_id, group) self.get_group.invalidate(self, group_id) notifications.Audit.updated(self._GROUP, group_id, initiator) return self._set_domain_id_and_mapping( ref, domain_id, driver, mapping.EntityType.GROUP ) @domains_configured @exception_translated('group') def delete_group(self, group_id, initiator=None): domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( group_id ) roles = PROVIDERS.assignment_api.list_role_assignments( group_id=group_id ) user_ids = (u['id'] for u in self.list_users_in_group(group_id)) driver.delete_group(entity_id) self.get_group.invalidate(self, group_id) PROVIDERS.id_mapping_api.delete_id_mapping(group_id) PROVIDERS.assignment_api.delete_group_assignments(group_id) notifications.Audit.deleted(self._GROUP, group_id, initiator) # If the group has been created and has users but has no role # assignment for the group then we do not need to revoke all the users # tokens and can just delete the group. if roles: for user_id in user_ids: self._persist_revocation_event_for_user(user_id) reason_s = ( 'Invalidating the token cache because group %(group_id)s ' 'has been deleted.' % {'group_id': group_id} ) notifications.invalidate_token_cache_notification(reason_s) # Invalidate user role assignments cache region, as it may be caching # role assignments expanded from the specified group to its users assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() @domains_configured @exception_translated('group') def add_user_to_group(self, user_id, group_id, initiator=None): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id) ) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = get_entity_info_for_user( user_id ) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver ) group_driver.add_user_to_group(user_entity_id, group_entity_id) # Invalidate user role assignments cache region, as it may now need to # include role assignments from the specified group to its users assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() notifications.Audit.added_to( self._GROUP, group_id, self._USER, user_id, initiator ) @domains_configured @exception_translated('group') def remove_user_from_group(self, user_id, group_id, initiator=None): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id) ) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = get_entity_info_for_user( user_id ) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver ) group_driver.remove_user_from_group(user_entity_id, group_entity_id) self._persist_revocation_event_for_user(user_id) # Invalidate user role assignments cache region, as it may be caching # role assignments expanded from this group to this user assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() reason = ( 'Invalidating the token cache because user %(user_id)s was ' 'removed from group %(group_id)s. Authorization will be ' 'calculated and enforced accordingly the next time they ' 'authenticate or validate a token.' % { 'user_id': user_id, 'group_id': group_id, } ) notifications.invalidate_token_cache_notification(reason) notifications.Audit.removed_from( self._GROUP, group_id, self._USER, user_id, initiator ) def _persist_revocation_event_for_user(self, user_id): """Emit a notification to invoke a revocation event callback. Fire off an internal notification that will be consumed by the revocation API to store a revocation record for a specific user. :param user_id: user identifier :type user_id: string """ notifications.Audit.internal( notifications.PERSIST_REVOCATION_EVENT_FOR_USER, user_id ) @domains_configured @exception_translated('user') def list_groups_for_user(self, user_id, hints=None): domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( user_id ) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if not driver.is_domain_aware(): # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_groups_for_user(entity_id, hints) for ref in ref_list: if 'membership_expires_at' not in ref: ref['membership_expires_at'] = None return self._set_domain_id_and_mapping( ref_list, domain_id, driver, mapping.EntityType.GROUP ) @domains_configured @exception_translated('group') def list_groups(self, domain_scope=None, hints=None): driver = self._select_identity_driver(domain_scope) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if driver.is_domain_aware(): # Force the domain_scope into the hint to ensure that we only get # back domains for that scope. self._ensure_domain_id_in_hints(hints, domain_scope) else: # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter. self._mark_domain_id_filter_satisfied(hints) ref_list = driver.list_groups(hints) return self._set_domain_id_and_mapping( ref_list, domain_scope, driver, mapping.EntityType.GROUP ) @domains_configured @exception_translated('group') def list_users_in_group(self, group_id, hints=None): domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( group_id ) self._set_list_limit_in_hints(hints, driver) hints = hints or driver_hints.Hints() if not driver.is_domain_aware(): # We are effectively satisfying any domain_id filter by the above # driver selection, so remove any such filter self._mark_domain_id_filter_satisfied(hints) hints = self._translate_expired_password_hints(hints) ref_list = driver.list_users_in_group(entity_id, hints) return self._set_domain_id_and_mapping( ref_list, domain_id, driver, mapping.EntityType.USER ) @domains_configured @exception_translated('group') def check_user_in_group(self, user_id, group_id): @exception_translated('user') def get_entity_info_for_user(public_id): return self._get_domain_driver_and_entity_id(public_id) _domain_id, group_driver, group_entity_id = ( self._get_domain_driver_and_entity_id(group_id) ) # Get the same info for the user_id, taking care to map any # exceptions correctly _domain_id, user_driver, user_entity_id = get_entity_info_for_user( user_id ) self._assert_user_and_group_in_same_backend( user_entity_id, user_driver, group_entity_id, group_driver ) return group_driver.check_user_in_group( user_entity_id, group_entity_id ) @domains_configured def change_password( self, user_id, original_password, new_password, initiator=None ): # authenticate() will raise an AssertionError if authentication fails try: self.authenticate(user_id, original_password) except exception.PasswordExpired: # If a password has expired, we want users to be able to change it pass domain_id, driver, entity_id = self._get_domain_driver_and_entity_id( user_id ) try: validators.validate_password(new_password) driver.change_password(entity_id, new_password) except exception.PasswordValidationError as ex: audit_reason = reason.Reason(str(ex), str(ex.code)) notifications.Audit.updated( self._USER, user_id, initiator, reason=audit_reason ) raise notifications.Audit.updated(self._USER, user_id, initiator) self._persist_revocation_event_for_user(user_id) reason_s = ( 'Invalidating the token cache because user %(user_id)s changed ' 'the password. Authorization will be calculated and enforced ' 'accordingly the next time they authenticate or validate a ' 'token.' % {'user_id': user_id} ) notifications.invalidate_token_cache_notification(reason_s) @MEMOIZE def _shadow_nonlocal_user(self, user): try: return PROVIDERS.shadow_users_api.get_user(user['id']) except exception.UserNotFound: return PROVIDERS.shadow_users_api.create_nonlocal_user(user) def _shadow_federated_user(self, idp_id, protocol_id, user): user_dict = {} email = user.get('email') try: LOG.debug("Trying to update name for federated user [%s].", user) PROVIDERS.shadow_users_api.update_federated_user_display_name( idp_id, protocol_id, user['id'], user['name'] ) user_dict = PROVIDERS.shadow_users_api.get_federated_user( idp_id, protocol_id, user['id'] ) if email: LOG.debug( "Executing the e-mail update for federated user [%s].", user, ) user_ref = {"email": email} self.update_user(user_dict['id'], user_ref) user_dict.update({"email": email}) except exception.UserNotFound: federated_dict = { 'idp_id': idp_id, 'protocol_id': protocol_id, 'unique_id': user['id'], 'display_name': user['name'], } LOG.debug("Creating federated user [%s].", user) user_dict = PROVIDERS.shadow_users_api.create_federated_user( user["domain"]['id'], federated_dict, email=email ) PROVIDERS.shadow_users_api.set_last_active_at(user_dict['id']) return user_dict def shadow_federated_user(self, idp_id, protocol_id, user, group_ids=None): """Map a federated user to a user. :param idp_id: identity provider id :param protocol_id: protocol id :param user: User dictionary :param group_ids: list of group ids to add the user to :returns: dictionary of the mapped User entity """ user_dict = self._shadow_federated_user(idp_id, protocol_id, user) # Note(knikolla): The shadowing operation can be cached, # however we need to update the expiring group memberships. if group_ids: for group_id in group_ids: LOG.info( "Adding user [%s] to group [%s].", user_dict, group_id ) PROVIDERS.shadow_users_api.add_user_to_group_expires( user_dict['id'], group_id ) return user_dict class MappingManager(manager.Manager): """Default pivot point for the ID Mapping backend.""" driver_namespace = 'keystone.identity.id_mapping' _provides_api = 'id_mapping_api' def __init__(self): super().__init__(CONF.identity_mapping.driver) @MEMOIZE_ID_MAPPING def _get_public_id(self, domain_id, local_id, entity_type): return self.driver.get_public_id( { 'domain_id': domain_id, 'local_id': local_id, 'entity_type': entity_type, } ) def get_public_id(self, local_entity): return self._get_public_id( local_entity['domain_id'], local_entity['local_id'], local_entity['entity_type'], ) @MEMOIZE_ID_MAPPING def get_id_mapping(self, public_id): return self.driver.get_id_mapping(public_id) def create_id_mapping(self, local_entity, public_id=None): public_id = self.driver.create_id_mapping(local_entity, public_id) if MEMOIZE_ID_MAPPING.should_cache(public_id): self._get_public_id.set( public_id, self, local_entity['domain_id'], local_entity['local_id'], local_entity['entity_type'], ) self.get_id_mapping.set(local_entity, self, public_id) return public_id def delete_id_mapping(self, public_id): local_entity = self.get_id_mapping.get(self, public_id) self.driver.delete_id_mapping(public_id) # Delete the key of entity from cache if local_entity: self._get_public_id.invalidate( self, local_entity['domain_id'], local_entity['local_id'], local_entity['entity_type'], ) self.get_id_mapping.invalidate(self, public_id) def purge_mappings(self, purge_filter): # Purge mapping is rarely used and only used by the command client, # it's quite complex to invalidate part of the cache based on the purge # filters, so here invalidate the whole cache when purging mappings. self.driver.purge_mappings(purge_filter) ID_MAPPING_REGION.invalidate() class ShadowUsersManager(manager.Manager): """Default pivot point for the Shadow Users backend.""" driver_namespace = 'keystone.identity.shadow_users' _provides_api = 'shadow_users_api' def __init__(self): shadow_driver = CONF.shadow_users.driver super().__init__(shadow_driver) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/generator.py0000664000175000017500000000302700000000000021517 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ID Generator provider interface.""" import abc from keystone.common import manager import keystone.conf from keystone import exception CONF = keystone.conf.CONF class Manager(manager.Manager): """Default pivot point for the identifier generator backend.""" driver_namespace = 'keystone.identity.id_generator' _provides_api = 'id_generator_api' def __init__(self): super().__init__(CONF.identity_mapping.generator) class IDGenerator(metaclass=abc.ABCMeta): """Interface description for an ID Generator provider.""" @abc.abstractmethod def generate_public_ID(self, mapping): """Return a Public ID for the given mapping dict. :param dict mapping: The items to be hashed. The ID must be reproducible and no more than 64 chars in length. The ID generated should be independent of the order of the items in the mapping dict. """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5221138 keystone-26.0.0/keystone/identity/id_generators/0000775000175000017500000000000000000000000022002 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/id_generators/__init__.py0000664000175000017500000000000000000000000024101 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/id_generators/sha256.py0000664000175000017500000000233700000000000023371 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib from keystone.identity import generator class Generator(generator.IDGenerator): def generate_public_ID(self, mapping): m = hashlib.sha256() for key in sorted(mapping.keys()): # python-ldap >3.0 returns bytes data type for attribute values # except distinguished names, relative distinguished names, # attribute names, queries on python3. # Please see Bytes/text management in python-ldap module. if isinstance(mapping[key], bytes): m.update(mapping[key]) else: m.update(mapping[key].encode('utf-8')) return m.hexdigest() ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/identity/mapping_backends/0000775000175000017500000000000000000000000022442 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/mapping_backends/__init__.py0000664000175000017500000000000000000000000024541 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/mapping_backends/base.py0000664000175000017500000000643400000000000023735 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone.common import provider_api from keystone import exception class MappingDriverBase(provider_api.ProviderAPIMixin, metaclass=abc.ABCMeta): """Interface description for an ID Mapping driver.""" @abc.abstractmethod def get_public_id(self, local_entity): """Return the public ID for the given local entity. :param dict local_entity: Containing the entity domain, local ID and type ('user' or 'group'). :returns: public ID, or None if no mapping is found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_domain_mapping_list(self, domain_id, entity_type=None): """Return mappings for the domain. :param domain_id: Domain ID to get mappings for. :param entity_type: Optional entity_type to get mappings for. :type entity_type: String, one of mappings defined in keystone.identity.mapping_backends.mapping.EntityType :returns: list of mappings. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_id_mapping(self, public_id): """Return the local mapping. :param public_id: The public ID for the mapping required. :returns dict: Containing the entity domain, local ID and type. If no mapping is found, it returns None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_id_mapping(self, local_entity, public_id=None): """Create and store a mapping to a public_id. :param dict local_entity: Containing the entity domain, local ID and type ('user' or 'group'). :param public_id: If specified, this will be the public ID. If this is not specified, a public ID will be generated. :returns: public ID """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_id_mapping(self, public_id): """Delete an entry for the given public_id. :param public_id: The public ID for the mapping to be deleted. The method is silent if no mapping is found. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def purge_mappings(self, purge_filter): """Purge selected identity mappings. :param dict purge_filter: Containing the attributes of the filter that defines which entries to purge. An empty filter means purge all mappings. """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/mapping_backends/mapping.py0000664000175000017500000000117000000000000024446 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class EntityType: USER = 'user' GROUP = 'group' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/mapping_backends/sql.py0000664000175000017500000001136200000000000023616 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.identity.mapping_backends import base from keystone.identity.mapping_backends import mapping as identity_mapping class IDMapping(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'id_mapping' public_id = sql.Column(sql.String(64), primary_key=True) domain_id = sql.Column(sql.String(64), nullable=False) local_id = sql.Column(sql.String(255), nullable=False) # NOTE(henry-nash): Postgres requires a name to be defined for an Enum entity_type = sql.Column( sql.Enum( identity_mapping.EntityType.USER, identity_mapping.EntityType.GROUP, name='entity_type', ), nullable=False, ) # Unique constraint to ensure you can't store more than one mapping to the # same underlying values __table_args__ = ( sql.UniqueConstraint('domain_id', 'local_id', 'entity_type'), ) class Mapping(base.MappingDriverBase): def get_public_id(self, local_entity): # NOTE(henry-nash): Since the Public ID is regeneratable, rather # than search for the entry using the local entity values, we # could create the hash and do a PK lookup. However this would only # work if we hashed all the entries, even those that already generate # UUIDs, like SQL. Further, this would only work if the generation # algorithm was immutable (e.g. it had always been sha256). with sql.session_for_read() as session: query = session.query(IDMapping.public_id) query = query.filter_by(domain_id=local_entity['domain_id']) query = query.filter_by(local_id=local_entity['local_id']) query = query.filter_by(entity_type=local_entity['entity_type']) try: public_ref = query.one() public_id = public_ref.public_id return public_id except sql.NotFound: return None def get_domain_mapping_list(self, domain_id, entity_type=None): filters = {'domain_id': domain_id} if entity_type is not None: filters['entity_type'] = entity_type with sql.session_for_read() as session: return session.query(IDMapping).filter_by(**filters) def get_id_mapping(self, public_id): with sql.session_for_read() as session: mapping_ref = session.get(IDMapping, public_id) if mapping_ref: return mapping_ref.to_dict() def create_id_mapping(self, local_entity, public_id=None): entity = local_entity.copy() try: with sql.session_for_write() as session: if public_id is None: public_id = self.id_generator_api.generate_public_ID( entity ) entity['public_id'] = public_id mapping_ref = IDMapping.from_dict(entity) session.add(mapping_ref) except sql.DBDuplicateEntry: # something else created the mapping already. We can use it. public_id = self.get_public_id(local_entity) return public_id def delete_id_mapping(self, public_id): with sql.session_for_write() as session: try: session.query(IDMapping).filter( IDMapping.public_id == public_id ).delete() except sql.NotFound: # nosec # NOTE(morganfainberg): There is nothing to delete and nothing # to do. pass def purge_mappings(self, purge_filter): with sql.session_for_write() as session: query = session.query(IDMapping) if 'domain_id' in purge_filter: query = query.filter_by(domain_id=purge_filter['domain_id']) if 'public_id' in purge_filter: query = query.filter_by(public_id=purge_filter['public_id']) if 'local_id' in purge_filter: query = query.filter_by(local_id=purge_filter['local_id']) if 'entity_type' in purge_filter: query = query.filter_by( entity_type=purge_filter['entity_type'] ) query.delete() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/schema.py0000664000175000017500000000702700000000000020775 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types import keystone.conf from keystone.identity.backends import resource_options as ro CONF = keystone.conf.CONF _identity_name = { 'type': 'string', 'minLength': 1, 'maxLength': 255, 'pattern': r'[\S]+', } # Schema for Identity v3 API _user_properties = { 'default_project_id': validation.nullable(parameter_types.id_string), 'description': validation.nullable(parameter_types.description), 'domain_id': parameter_types.id_string, 'enabled': parameter_types.boolean, 'federated': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'idp_id': {'type': 'string'}, 'protocols': { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'protocol_id': {'type': 'string'}, 'unique_id': {'type': 'string'}, }, 'required': ['protocol_id', 'unique_id'], }, 'minItems': 1, }, }, 'required': ['idp_id', 'protocols'], }, }, 'name': _identity_name, 'password': {'type': ['string', 'null']}, 'options': ro.USER_OPTIONS_REGISTRY.json_schema, } # TODO(notmorgan): Provide a mechanism for options to supply real jsonschema # validation based upon the option object and the option validator(s) user_create = { 'type': 'object', 'properties': _user_properties, 'required': ['name'], 'options': {'type': 'object'}, 'additionalProperties': True, } user_update = { 'type': 'object', 'properties': _user_properties, 'minProperties': 1, 'options': {'type': 'object'}, 'additionalProperties': True, } _group_properties = { 'description': validation.nullable(parameter_types.description), 'domain_id': parameter_types.id_string, 'name': _identity_name, } group_create = { 'type': 'object', 'properties': _group_properties, 'required': ['name'], 'additionalProperties': True, } group_update = { 'type': 'object', 'properties': _group_properties, 'minProperties': 1, 'additionalProperties': True, } _password_change_properties = { 'original_password': {'type': 'string'}, 'password': {'type': 'string'}, } if getattr(CONF, 'strict_password_check', None): _password_change_properties['password'][ 'maxLength' ] = CONF.identity.max_password_length if getattr(CONF, 'security_compliance', None): if getattr(CONF.security_compliance, 'password_regex', None): _password_change_properties['password'][ 'pattern' ] = CONF.security_compliance.password_regex password_change = { 'type': 'object', 'properties': _password_change_properties, 'required': ['original_password', 'password'], 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/identity/shadow_backends/0000775000175000017500000000000000000000000022274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/shadow_backends/__init__.py0000664000175000017500000000000000000000000024373 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/shadow_backends/base.py0000664000175000017500000001163600000000000023567 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception def federated_objects_to_list(fed_ref): """Create a new reformatted federated object list using the one passed in. When returning federated objects with a user we only need the attributes idp_id, protocol_id, and unique_id. Therefore, we pull these elements out of the fed_ref and create a newly formatted list with the needed information. We simply group each federated object's protocol_ids and unique_ids under the corresponding idp_id. :returns list: Containing the user's federated objects """ if not fed_ref: return [] fed = {} for fed_dict in fed_ref: fed.setdefault( fed_dict['idp_id'], {'idp_id': fed_dict['idp_id'], 'protocols': []} )['protocols'].append( { 'protocol_id': fed_dict['protocol_id'], 'unique_id': fed_dict['unique_id'], } ) return list(fed.values()) class ShadowUsersDriverBase(metaclass=abc.ABCMeta): """Interface description for an Shadow Users driver.""" @abc.abstractmethod def create_federated_object(self, fed_dict): """Create a new federated object. :param dict federated_dict: Reference to the federated user """ raise exception.NotImplemented() @abc.abstractmethod def create_federated_user(self, domain_id, federated_dict, email=None): """Create a new user with the federated identity. :param domain_id: The domain ID of the IdP used for the federated user :param dict federated_dict: Reference to the federated user :param email: Federated user's email :returns dict: Containing the user reference """ raise exception.NotImplemented() def delete_federated_object(self, user_id): """Delete a user's federated objects. :param user_id: Unique identifier of the user """ raise exception.NotImplemented() @abc.abstractmethod def get_federated_objects(self, user_id): """Get all federated objects for a user. :param user_id: Unique identifier of the user :returns list: Containing the user's federated objects """ raise exception.NotImplemented() @abc.abstractmethod def get_federated_user(self, idp_id, protocol_id, unique_id): """Return the found user for the federated identity. :param idp_id: The identity provider ID :param protocol_id: The federation protocol ID :param unique_id: The unique ID for the user :returns dict: Containing the user reference """ raise exception.NotImplemented() @abc.abstractmethod def update_federated_user_display_name( self, idp_id, protocol_id, unique_id, display_name ): """Update federated user's display name if changed. :param idp_id: The identity provider ID :param protocol_id: The federation protocol ID :param unique_id: The unique ID for the user :param display_name: The user's display name """ raise exception.NotImplemented() @abc.abstractmethod def get_user(self, user_id): """Return the found user. :param user_id: Unique identifier of the user :returns dict: Containing the user reference """ raise exception.NotImplemented() @abc.abstractmethod def create_nonlocal_user(self, user_dict): """Create a new non-local user. :param dict user_dict: Reference to the non-local user :returns dict: Containing the user reference """ raise exception.NotImplemented() @abc.abstractmethod def set_last_active_at(self, user_id): """Set the last active at date for the user. :param user_id: Unique identifier of the user """ raise exception.NotImplemented() @abc.abstractmethod def list_federated_users_info(self, hints=None): """Get the shadow users info with the specified filters. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns list: A list of objects that containing the shadow users reference. """ raise exception.NotImplemented() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/identity/shadow_backends/sql.py0000664000175000017500000002514400000000000023453 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_config import cfg from oslo_db import api as oslo_db_api from oslo_utils import timeutils import sqlalchemy from keystone.common import provider_api from keystone.common import sql from keystone import exception from keystone.identity.backends import base as identity_base from keystone.identity.backends import sql_model as model from keystone.identity.shadow_backends import base CONF = cfg.CONF PROVIDERS = provider_api.ProviderAPIs class ShadowUsers(base.ShadowUsersDriverBase): @sql.handle_conflicts(conflict_type='federated_user') def create_federated_user(self, domain_id, federated_dict, email=None): local_entity = { 'domain_id': domain_id, 'local_id': federated_dict['unique_id'], 'entity_type': 'user', } public_id = PROVIDERS.id_generator_api.generate_public_ID(local_entity) user = {'id': public_id, 'domain_id': domain_id, 'enabled': True} if email: user['email'] = email with sql.session_for_write() as session: federated_ref = model.FederatedUser.from_dict(federated_dict) user_ref = model.User.from_dict(user) user_ref.created_at = timeutils.utcnow() user_ref.federated_users.append(federated_ref) session.add(user_ref) return identity_base.filter_user(user_ref.to_dict()) @sql.handle_conflicts(conflict_type='federated_user') def create_federated_object(self, fed_dict): with sql.session_for_write() as session: fed_ref = model.FederatedUser.from_dict(fed_dict) session.add(fed_ref) def delete_federated_object(self, user_id): with sql.session_for_write() as session: q = session.query(model.FederatedUser) q = q.filter(model.FederatedUser.user_id == user_id) q.delete(False) def get_federated_objects(self, user_id): with sql.session_for_read() as session: query = session.query(model.FederatedUser) query = query.filter(model.FederatedUser.user_id == user_id) fed_ref = [] for row in query: m = model.FederatedUser( idp_id=row.idp_id, protocol_id=row.protocol_id, unique_id=row.unique_id, ) fed_ref.append(m.to_dict()) return base.federated_objects_to_list(fed_ref) def _update_query_with_federated_statements(self, hints, query): statements = [] for filter_ in hints.filters: if filter_['name'] == 'idp_id': statements.append( model.FederatedUser.idp_id == filter_['value'] ) if filter_['name'] == 'protocol_id': statements.append( model.FederatedUser.protocol_id == filter_['value'] ) if filter_['name'] == 'unique_id': statements.append( model.FederatedUser.unique_id == filter_['value'] ) # Remove federated attributes to prevent redundancies from # sql.filter_limit_query which filters remaining hints hints.filters = [ x for x in hints.filters if x['name'] not in ('idp_id', 'protocol_id', 'unique_id') ] if statements: query = query.filter(sqlalchemy.and_(*statements)) return query def get_federated_users(self, hints): with sql.session_for_read() as session: query = ( session.query(model.User) .outerjoin(model.LocalUser) .outerjoin(model.FederatedUser) ) query = query.filter(model.User.id == model.FederatedUser.user_id) query = self._update_query_with_federated_statements(hints, query) name_filter = None for filter_ in hints.filters: if filter_['name'] == 'name': name_filter = filter_ query = query.filter( model.FederatedUser.display_name == name_filter['value'] ) break if name_filter: hints.filters.remove(name_filter) user_refs = sql.filter_limit_query(model.User, query, hints) return [identity_base.filter_user(x.to_dict()) for x in user_refs] def get_federated_user(self, idp_id, protocol_id, unique_id): # NOTE(notmorgan): Open a session here to ensure .to_dict is called # within an active session context. This will prevent lazy-load # relationship failure edge-cases # FIXME(notmorgan): Eventually this should not call `to_dict` here and # rely on something already in the session context to perform the # `to_dict` call. with sql.session_for_read(): user_ref = self._get_federated_user(idp_id, protocol_id, unique_id) return identity_base.filter_user(user_ref.to_dict()) def _get_federated_user(self, idp_id, protocol_id, unique_id): """Return the found user for the federated identity. :param idp_id: The identity provider ID :param protocol_id: The federation protocol ID :param unique_id: The user's unique ID (unique within the IdP) :returns User: Returns a reference to the User """ with sql.session_for_read() as session: query = session.query(model.User).outerjoin(model.LocalUser) query = query.join(model.FederatedUser) query = query.filter(model.FederatedUser.idp_id == idp_id) query = query.filter( model.FederatedUser.protocol_id == protocol_id ) query = query.filter(model.FederatedUser.unique_id == unique_id) try: user_ref = query.one() except sql.NotFound: raise exception.UserNotFound(user_id=unique_id) return user_ref def set_last_active_at(self, user_id): with sql.session_for_write() as session: user_ref = session.get(model.User, user_id) if user_ref: user_ref.last_active_at = timeutils.utcnow().date() @sql.handle_conflicts(conflict_type='federated_user') def update_federated_user_display_name( self, idp_id, protocol_id, unique_id, display_name ): with sql.session_for_write() as session: query = session.query(model.FederatedUser) query = query.filter(model.FederatedUser.idp_id == idp_id) query = query.filter( model.FederatedUser.protocol_id == protocol_id ) query = query.filter(model.FederatedUser.unique_id == unique_id) query = query.filter( model.FederatedUser.display_name != display_name ) query.update({'display_name': display_name}) return @sql.handle_conflicts(conflict_type='nonlocal_user') def create_nonlocal_user(self, user_dict): new_user_dict = copy.deepcopy(user_dict) # remove local_user attributes from new_user_dict new_user_dict.pop('name', None) new_user_dict.pop('password', None) # create nonlocal_user dict new_nonlocal_user_dict = {'name': user_dict['name']} with sql.session_for_write() as session: new_nonlocal_user_ref = model.NonLocalUser.from_dict( new_nonlocal_user_dict ) new_user_ref = model.User.from_dict(new_user_dict) new_user_ref.created_at = timeutils.utcnow() new_user_ref.nonlocal_user = new_nonlocal_user_ref session.add(new_user_ref) return identity_base.filter_user(new_user_ref.to_dict()) @oslo_db_api.wrap_db_retry(retry_on_deadlock=True) def delete_user(self, user_id): with sql.session_for_write() as session: ref = self._get_user(session, user_id) q = session.query(model.UserGroupMembership) q = q.filter_by(user_id=user_id) q.delete(False) session.delete(ref) def get_user(self, user_id): with sql.session_for_read() as session: user_ref = self._get_user(session, user_id) return identity_base.filter_user(user_ref.to_dict()) def _get_user(self, session, user_id): user_ref = session.get(model.User, user_id) if not user_ref: raise exception.UserNotFound(user_id=user_id) return user_ref def list_federated_users_info(self, hints=None): with sql.session_for_read() as session: query = session.query(model.FederatedUser) fed_user_refs = sql.filter_limit_query( model.FederatedUser, query, hints ) return [x.to_dict() for x in fed_user_refs] def add_user_to_group_expires(self, user_id, group_id): def get_federated_user(): with sql.session_for_read() as session: query = session.query(model.FederatedUser) query = query.filter_by(user_id=user_id) user = query.first() if not user: # Note(knikolla): This shouldn't really ever happen, since # this requires the user to already be logged in. raise exception.UserNotFound(user_id=user_id) return user with sql.session_for_write() as session: user = get_federated_user() query = session.query(model.ExpiringUserGroupMembership) query = query.filter_by(user_id=user_id) query = query.filter_by(group_id=group_id) membership = query.first() if membership: membership.last_verified = timeutils.utcnow() else: session.add( model.ExpiringUserGroupMembership( user_id=user_id, group_id=group_id, idp_id=user.idp_id, last_verified=timeutils.utcnow(), ) ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/limit/0000775000175000017500000000000000000000000016442 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/__init__.py0000664000175000017500000000114600000000000020555 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.limit.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/limit/backends/0000775000175000017500000000000000000000000020214 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/backends/__init__.py0000664000175000017500000000000000000000000022313 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/backends/base.py0000664000175000017500000001317500000000000021507 0ustar00zuulzuul00000000000000# Copyright 2017 SUSE Linux Gmbh # Copyright 2017 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import keystone.conf from keystone import exception CONF = keystone.conf.CONF class UnifiedLimitDriverBase(metaclass=abc.ABCMeta): def _get_list_limit(self): return CONF.unified_limit.list_limit or CONF.list_limit @abc.abstractmethod def create_registered_limits(self, registered_limits): """Create new registered limits. :param registered_limits: a list of dictionaries representing limits to create. :returns: all the newly created registered limits. :raises keystone.exception.Conflict: If a duplicate registered limit exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_registered_limit(self, registered_limit_id, registered_limit): """Update existing registered limits. :param registered_limit_id: the id of the registered limit. :param registered_limit: a dict containing the registered limit attributes to update. :returns: the updated registered limit. :raises keystone.exception.RegisteredLimitNotFound: If registered limit doesn't exist. :raises keystone.exception.Conflict: If update to a duplicate registered limit. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_registered_limits(self, hints): """List all registered limits. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: a list of dictionaries or an empty registered limit. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_registered_limit(self, registered_limit_id): """Get a registered limit. :param registered_limit_id: the registered limit id to get. :returns: a dictionary representing a registered limit reference. :raises keystone.exception.RegisteredLimitNotFound: If registered limit doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_registered_limit(self, registered_limit_id): """Delete an existing registered limit. :param registered_limit_id: the registered limit id to delete. :raises keystone.exception.RegisteredLimitNotFound: If registered limit doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_limits(self, limits): """Create new limits. :param limits: a list of dictionaries representing limits to create. :returns: all the newly created limits. :raises keystone.exception.Conflict: If a duplicate limit exists. :raises keystone.exception.NoLimitReference: If no reference registered limit exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_limit(self, limit_id, limit): """Update existing limits. :param limit_id: the id of the limit. :param limit: a dict containing the limit attributes to update. :returns: the updated limit. :raises keystone.exception.LimitNotFound: If limit doesn't exist. :raises keystone.exception.Conflict: If update to a duplicate limit. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_limits(self, hints): """List all limits. :param hints: contains the list of filters yet to be satisfied. Any filters satisfied here will be removed so that the caller will know if any filters remain. :returns: a list of dictionaries or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_limit(self, limit_id): """Get a limit. :param limit_id: the limit id to get. :returns: a dictionary representing a limit reference. :raises keystone.exception.LimitNotFound: If limit doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_limit(self, limit_id): """Delete an existing limit. :param limit_id: the limit id to delete. :raises keystone.exception.LimitNotFound: If limit doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_limits_for_project(self, project_id): """Delete the existing limits which belong to the specified project. :param project_id: the limits' project id. :returns: a dictionary representing the deleted limits id. Used for cache invalidating. """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/backends/sql.py0000664000175000017500000003060300000000000021367 0ustar00zuulzuul00000000000000# Copyright 2017 SUSE Linux Gmbh # Copyright 2017 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from oslo_db import exception as db_exception import sqlalchemy from sqlalchemy.ext.hybrid import hybrid_property from keystone.common import driver_hints from keystone.common import sql from keystone import exception from keystone.i18n import _ from keystone.limit.backends import base class RegisteredLimitModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'registered_limit' attributes = [ 'internal_id', 'id', 'service_id', 'region_id', 'resource_name', 'default_limit', 'description', ] internal_id = sql.Column(sql.Integer, primary_key=True, nullable=False) id = sql.Column(sql.String(length=64), nullable=False, unique=True) service_id = sql.Column(sql.String(255), sql.ForeignKey('service.id')) region_id = sql.Column( sql.String(64), sql.ForeignKey('region.id'), nullable=True ) resource_name = sql.Column(sql.String(255)) default_limit = sql.Column(sql.Integer, nullable=False) description = sql.Column(sql.Text()) def to_dict(self): ref = super().to_dict() ref.pop('internal_id') return ref class LimitModel(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'limit' attributes = [ 'internal_id', 'id', 'project_id', 'domain_id', 'service_id', 'region_id', 'resource_name', 'resource_limit', 'description', 'registered_limit_id', ] internal_id = sql.Column(sql.Integer, primary_key=True, nullable=False) id = sql.Column(sql.String(length=64), nullable=False, unique=True) project_id = sql.Column(sql.String(64)) domain_id = sql.Column(sql.String(64)) resource_limit = sql.Column(sql.Integer, nullable=False) description = sql.Column(sql.Text()) registered_limit_id = sql.Column( sql.String(64), sql.ForeignKey('registered_limit.id') ) registered_limit = sqlalchemy.orm.relationship('RegisteredLimitModel') @hybrid_property def service_id(self): if self.registered_limit: return self.registered_limit.service_id return None @service_id.expression # type: ignore[no-redef] def service_id(self): return RegisteredLimitModel.service_id @hybrid_property def region_id(self): if self.registered_limit: return self.registered_limit.region_id return None @region_id.expression # type: ignore[no-redef] def region_id(self): return RegisteredLimitModel.region_id @hybrid_property def resource_name(self): if self.registered_limit: return self.registered_limit.resource_name return self._resource_name @resource_name.expression # type: ignore[no-redef] def resource_name(self): return RegisteredLimitModel.resource_name def to_dict(self): ref = super().to_dict() if self.registered_limit: ref['service_id'] = self.registered_limit.service_id ref['region_id'] = self.registered_limit.region_id ref['resource_name'] = self.registered_limit.resource_name ref.pop('internal_id') ref.pop('registered_limit_id') return ref class UnifiedLimit(base.UnifiedLimitDriverBase): def _check_unified_limit_unique( self, unified_limit, is_registered_limit=True ): # Ensure the new created or updated unified limit won't break the # current reference between registered limit and limit. i.e. We should # ensure that there is no duplicate entry. hints = driver_hints.Hints() if is_registered_limit: hints.add_filter('service_id', unified_limit['service_id']) hints.add_filter('resource_name', unified_limit['resource_name']) hints.add_filter('region_id', unified_limit.get('region_id')) with sql.session_for_read() as session: query = session.query(RegisteredLimitModel) unified_limits = sql.filter_limit_query( RegisteredLimitModel, query, hints ).all() else: hints.add_filter( 'registered_limit_id', unified_limit['registered_limit_id'] ) is_project_limit = ( True if unified_limit.get('project_id') else False ) if is_project_limit: hints.add_filter('project_id', unified_limit['project_id']) else: hints.add_filter('domain_id', unified_limit['domain_id']) with sql.session_for_read() as session: query = session.query(LimitModel) unified_limits = sql.filter_limit_query( LimitModel, query, hints ).all() if unified_limits: msg = _('Duplicate entry') limit_type = 'registered_limit' if is_registered_limit else 'limit' raise exception.Conflict(type=limit_type, details=msg) def _check_referenced_limit_reference(self, registered_limit): # When updating or deleting a registered limit, we should ensure there # is no reference limit. with sql.session_for_read() as session: limits = session.query(LimitModel).filter_by( registered_limit_id=registered_limit['id'] ) if limits.all(): raise exception.RegisteredLimitError(id=registered_limit.id) @sql.handle_conflicts(conflict_type='registered_limit') def create_registered_limits(self, registered_limits): with sql.session_for_write() as session: new_registered_limits = [] for registered_limit in registered_limits: self._check_unified_limit_unique(registered_limit) ref = RegisteredLimitModel.from_dict(registered_limit) session.add(ref) new_registered_limits.append(ref.to_dict()) return new_registered_limits @sql.handle_conflicts(conflict_type='registered_limit') def update_registered_limit(self, registered_limit_id, registered_limit): try: with sql.session_for_write() as session: ref = self._get_registered_limit(session, registered_limit_id) self._check_referenced_limit_reference(ref) old_dict = ref.to_dict() old_dict.update(registered_limit) if ( registered_limit.get('service_id') or 'region_id' in registered_limit or registered_limit.get('resource_name') ): self._check_unified_limit_unique(old_dict) new_registered_limit = RegisteredLimitModel.from_dict(old_dict) for attr in registered_limit: if attr != 'id': setattr(ref, attr, getattr(new_registered_limit, attr)) return ref.to_dict() except db_exception.DBReferenceError: raise exception.RegisteredLimitError(id=registered_limit_id) @driver_hints.truncated def list_registered_limits(self, hints): with sql.session_for_read() as session: registered_limits = session.query(RegisteredLimitModel) registered_limits = sql.filter_limit_query( RegisteredLimitModel, registered_limits, hints ) return [s.to_dict() for s in registered_limits] def _get_registered_limit(self, session, registered_limit_id): query = session.query(RegisteredLimitModel).filter_by( id=registered_limit_id ) ref = query.first() if ref is None: raise exception.RegisteredLimitNotFound(id=registered_limit_id) return ref def get_registered_limit(self, registered_limit_id): with sql.session_for_read() as session: return self._get_registered_limit( session, registered_limit_id ).to_dict() def delete_registered_limit(self, registered_limit_id): try: with sql.session_for_write() as session: ref = self._get_registered_limit(session, registered_limit_id) self._check_referenced_limit_reference(ref) session.delete(ref) except db_exception.DBReferenceError: raise exception.RegisteredLimitError(id=registered_limit_id) def _check_and_fill_registered_limit_id(self, limit): # Make sure there is a referenced registered limit first. Then add # the registered limit id to the new created limit. hints = driver_hints.Hints() limit_copy = copy.deepcopy(limit) hints.add_filter('service_id', limit_copy.pop('service_id')) hints.add_filter('resource_name', limit_copy.pop('resource_name')) hints.add_filter('region_id', limit_copy.pop('region_id', None)) with sql.session_for_read() as session: registered_limits = session.query(RegisteredLimitModel) registered_limits = sql.filter_limit_query( RegisteredLimitModel, registered_limits, hints ) reg_limits = registered_limits.all() if not reg_limits: raise exception.NoLimitReference limit_copy['registered_limit_id'] = reg_limits[0]['id'] return limit_copy @sql.handle_conflicts(conflict_type='limit') def create_limits(self, limits): try: with sql.session_for_write() as session: new_limits = [] for limit in limits: target = self._check_and_fill_registered_limit_id(limit) self._check_unified_limit_unique( target, is_registered_limit=False ) ref = LimitModel.from_dict(target) session.add(ref) new_limit = ref.to_dict() new_limit['service_id'] = limit['service_id'] new_limit['region_id'] = limit.get('region_id') new_limit['resource_name'] = limit['resource_name'] new_limits.append(new_limit) return new_limits except db_exception.DBReferenceError: raise exception.NoLimitReference() @sql.handle_conflicts(conflict_type='limit') def update_limit(self, limit_id, limit): with sql.session_for_write() as session: ref = self._get_limit(session, limit_id) if limit.get('resource_limit'): ref.resource_limit = limit['resource_limit'] if limit.get('description'): ref.description = limit['description'] return ref.to_dict() @driver_hints.truncated def list_limits(self, hints): with sql.session_for_read() as session: query = session.query(LimitModel).outerjoin(RegisteredLimitModel) limits = sql.filter_limit_query(LimitModel, query, hints) return [limit.to_dict() for limit in limits] def _get_limit(self, session, limit_id): query = session.query(LimitModel).filter_by(id=limit_id) ref = query.first() if ref is None: raise exception.LimitNotFound(id=limit_id) return ref def get_limit(self, limit_id): with sql.session_for_read() as session: return self._get_limit(session, limit_id).to_dict() def delete_limit(self, limit_id): with sql.session_for_write() as session: ref = self._get_limit(session, limit_id) session.delete(ref) def delete_limits_for_project(self, project_id): limit_ids = [] with sql.session_for_write() as session: query = session.query(LimitModel) query = query.filter_by(project_id=project_id) for limit in query.all(): limit_ids.append(limit.id) query.delete() return limit_ids ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/core.py0000664000175000017500000001323000000000000017743 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.limit.models import base CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs MEMOIZE = cache.get_memoization_decorator(group='unified_limit') class Manager(manager.Manager): driver_namespace = 'keystone.unified_limit' _provides_api = 'unified_limit_api' def __init__(self): unified_limit_driver = CONF.unified_limit.driver super().__init__(unified_limit_driver) self.enforcement_model = base.load_driver( CONF.unified_limit.enforcement_model ) def check_project_depth(self): """Check if project depth satisfies current enforcement model.""" PROVIDERS.resource_api.check_project_depth( self.enforcement_model.MAX_PROJECT_TREE_DEPTH ) def _assert_resource_exist(self, unified_limit, target): try: service_id = unified_limit.get('service_id') if service_id is not None: PROVIDERS.catalog_api.get_service(service_id) region_id = unified_limit.get('region_id') if region_id is not None: PROVIDERS.catalog_api.get_region(region_id) project_id = unified_limit.get('project_id') if project_id is not None: project = PROVIDERS.resource_api.get_project(project_id) if project['is_domain']: # Treat the input limit as domain level limit. unified_limit['domain_id'] = unified_limit.pop( 'project_id' ) domain_id = unified_limit.get('domain_id') if domain_id is not None: PROVIDERS.resource_api.get_domain(domain_id) except exception.ServiceNotFound: raise exception.ValidationError( attribute='service_id', target=target ) except exception.RegionNotFound: raise exception.ValidationError( attribute='region_id', target=target ) except exception.ProjectNotFound: raise exception.ValidationError( attribute='project_id', target=target ) except exception.DomainNotFound: raise exception.ValidationError( attribute='domain_id', target=target ) def get_model(self): """Return information of the configured enforcement model.""" return { 'name': self.enforcement_model.NAME, 'description': self.enforcement_model.DESCRIPTION, } def create_registered_limits(self, registered_limits): for registered_limit in registered_limits: self._assert_resource_exist(registered_limit, 'registered_limit') return self.driver.create_registered_limits(registered_limits) def update_registered_limit(self, registered_limit_id, registered_limit): self._assert_resource_exist(registered_limit, 'registered_limit') updated_registered_limit = self.driver.update_registered_limit( registered_limit_id, registered_limit ) self.get_registered_limit.invalidate( self, updated_registered_limit['id'] ) return updated_registered_limit @manager.response_truncated def list_registered_limits(self, hints=None): return self.driver.list_registered_limits( hints or driver_hints.Hints() ) @MEMOIZE def get_registered_limit(self, registered_limit_id): return self.driver.get_registered_limit(registered_limit_id) def delete_registered_limit(self, registered_limit_id): self.driver.delete_registered_limit(registered_limit_id) self.get_registered_limit.invalidate(self, registered_limit_id) def create_limits(self, limits): for limit in limits: self._assert_resource_exist(limit, 'limit') self.enforcement_model.check_limit(copy.deepcopy(limits)) return self.driver.create_limits(limits) def update_limit(self, limit_id, limit): self._assert_resource_exist(limit, 'limit') limit_ref = self.get_limit(limit_id) limit_ref.update(limit) self.enforcement_model.check_limit(copy.deepcopy([limit_ref])) updated_limit = self.driver.update_limit(limit_id, limit) self.get_limit.invalidate(self, updated_limit['id']) return updated_limit @manager.response_truncated def list_limits(self, hints=None): return self.driver.list_limits(hints or driver_hints.Hints()) @MEMOIZE def get_limit(self, limit_id): return self.driver.get_limit(limit_id) def delete_limit(self, limit_id): self.driver.delete_limit(limit_id) self.get_limit.invalidate(self, limit_id) def delete_limits_for_project(self, project_id): limit_ids = self.driver.delete_limits_for_project(project_id) for limit_id in limit_ids: self.get_limit.invalidate(self, limit_id) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/limit/models/0000775000175000017500000000000000000000000017725 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/models/__init__.py0000664000175000017500000000000000000000000022024 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/models/base.py0000664000175000017500000000337600000000000021222 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import typing as ty import stevedore import keystone.conf from keystone.i18n import _ CONF = keystone.conf.CONF def load_driver(driver_name, *args): namespace = 'keystone.unified_limit.model' try: driver_manager = stevedore.DriverManager( namespace, driver_name, invoke_on_load=True, invoke_args=args ) return driver_manager.driver except stevedore.exception.NoMatches: msg = _('Unable to find %(name)r driver in %(namespace)r.') raise ImportError(msg % {'name': driver_name, 'namespace': namespace}) class ModelBase(metaclass=abc.ABCMeta): """Interface for a limit model driver.""" NAME: str DESCRIPTION: str MAX_PROJECT_TREE_DEPTH: ty.Optional[int] = None def check_limit(self, limits): """Check the new creating or updating limits if satisfy the model. :param limits: A list of the limit references to be checked. :type limits: A list of the limits. Each limit is a dictionary reference containing all limit attributes. :raises keystone.exception.InvalidLimit: If any of the input limits doesn't satisfy the limit model. """ raise NotImplementedError() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/models/flat.py0000664000175000017500000000166400000000000021234 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.limit.models import base class FlatModel(base.ModelBase): NAME = 'flat' DESCRIPTION = ( 'Limit enforcement and validation does not take project hierarchy ' 'into consideration.' ) MAX_PROJECT_TREE_DEPTH = None def check_limit(self, limits): # Flat limit model is not hierarchical, so don't need to check the # value. return ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/models/strict_two_level.py0000664000175000017500000001761300000000000023677 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from keystone.common import driver_hints from keystone.common import provider_api from keystone import exception from keystone.i18n import _ from keystone.limit.models import base LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs class StrictTwoLevelModel(base.ModelBase): NAME = 'strict_two_level' DESCRIPTION = ( 'This model requires project hierarchy never exceeds a depth of two' ) MAX_PROJECT_TREE_DEPTH = 2 def _get_specified_limit_value( self, resource_name, service_id, region_id, project_id=None, domain_id=None, ): """Get the specified limit value. Try to give the resource limit first. If the specified limit is a domain limit and the resource limit value is None, get the related registered limit value instead. """ hints = driver_hints.Hints() if project_id: hints.add_filter('project_id', project_id) else: hints.add_filter('domain_id', domain_id) hints.add_filter('service_id', service_id) hints.add_filter('resource_name', resource_name) hints.add_filter('region_id', region_id) limits = PROVIDERS.unified_limit_api.list_limits(hints) limit_value = limits[0]['resource_limit'] if limits else None if not limits and domain_id: hints = driver_hints.Hints() hints.add_filter('service_id', service_id) hints.add_filter('resource_name', resource_name) hints.add_filter('region_id', region_id) limits = PROVIDERS.unified_limit_api.list_registered_limits(hints) limit_value = limits[0]['default_limit'] if limits else None return limit_value def _check_limit( self, resource_name, service_id, region_id, resource_limit, domain_id=None, parent_id=None, ): """Check the specified limit value satisfies the related project tree. 1. Ensure the limit is smaller than its parent. 2. Ensure the limit is bigger than its children. """ if parent_id: # This is a project limit, need make sure its limit is not bigger # than its parent. parent_limit_value = self._get_specified_limit_value( resource_name, service_id, region_id, domain_id=parent_id ) if parent_limit_value and resource_limit > parent_limit_value: raise exception.InvalidLimit( reason="Limit is bigger than parent." ) else: # This is a domain limit, need make sure its limit is not smaller # than its children. sub_projects = PROVIDERS.resource_api.list_projects_in_subtree( domain_id ) for sub_project in sub_projects: sub_limit_value = self._get_specified_limit_value( resource_name, service_id, region_id, project_id=sub_project['id'], ) if sub_limit_value and resource_limit < sub_limit_value: raise exception.InvalidLimit( reason="Limit is smaller than child." ) def check_limit(self, limits): """Check the input limits satisfy the related project tree or not. 1. Ensure the input is legal. 2. Ensure the input will not break the exist limit tree. """ for limit in limits: project_id = limit.get('project_id') domain_id = limit.get('domain_id') resource_name = limit['resource_name'] resource_limit = limit['resource_limit'] service_id = limit['service_id'] region_id = limit.get('region_id') try: # Since domain is considered as the first level, if the input # limit is project level, its parent must be a domain. if project_id: parent_id = PROVIDERS.resource_api.get_project(project_id)[ 'parent_id' ] parent_limit = list( filter( lambda x: ( x.get('domain_id') == parent_id and x['service_id'] == service_id and x.get('region_id') == region_id and x['resource_name'] == resource_name ), limits, ) ) if parent_limit: if resource_limit > parent_limit[0]['resource_limit']: error = _( "The value of the limit which project is" " %(project_id)s should not bigger than " "its parent domain %(domain_id)s." ) % { "project_id": project_id, "domain_id": parent_limit[0]['domain_id'], } raise exception.InvalidLimit(reason=error) # The limit's parent is in request body, no need to # check the backend limit any more. continue else: parent_id = None self._check_limit( resource_name, service_id, region_id, resource_limit, domain_id=domain_id, parent_id=parent_id, ) except exception.InvalidLimit: error = ( "The resource limit (%(level)s: %(id)s, " "resource_name: %(resource_name)s, " "resource_limit: %(resource_limit)s, " "service_id: %(service_id)s, " "region_id: %(region_id)s) doesn't satisfy " "current hierarchy model." ) % { 'level': 'project_id' if project_id else 'domain_id', 'id': project_id or domain_id, 'resource_name': resource_name, 'resource_limit': resource_limit, 'service_id': service_id, 'region_id': region_id, } tr_error = _( "The resource limit (%(level)s: %(id)s, " "resource_name: %(resource_name)s, " "resource_limit: %(resource_limit)s, " "service_id: %(service_id)s, " "region_id: %(region_id)s) doesn't satisfy " "current hierarchy model." ) % { 'level': 'project_id' if project_id else 'domain_id', 'id': project_id or domain_id, 'resource_name': resource_name, 'resource_limit': resource_limit, 'service_id': service_id, 'region_id': region_id, } LOG.error(error) raise exception.InvalidLimit(reason=tr_error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/limit/schema.py0000664000175000017500000000675700000000000020273 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _registered_limit_properties = { 'service_id': parameter_types.id_string, 'region_id': {'type': ['null', 'string']}, 'resource_name': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'default_limit': { 'type': 'integer', 'minimum': -1, 'maximum': 0x7FFFFFFF, # The maximum value a signed INT may have }, 'description': validation.nullable(parameter_types.description), } _registered_limit_create = { 'type': 'object', 'properties': _registered_limit_properties, 'additionalProperties': False, 'required': ['service_id', 'resource_name', 'default_limit'], } registered_limit_create = { 'type': 'array', 'items': _registered_limit_create, 'minItems': 1, } registered_limit_update = { 'type': 'object', 'properties': _registered_limit_properties, 'additionalProperties': False, } _project_limit_create_properties = { 'project_id': parameter_types.id_string, 'service_id': parameter_types.id_string, 'region_id': {'type': 'string'}, 'resource_name': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'resource_limit': { 'type': 'integer', 'minimum': -1, 'maximum': 0x7FFFFFFF, # The maximum value a signed INT may have }, 'description': validation.nullable(parameter_types.description), } _domain_limit_create_properties = { 'domain_id': parameter_types.id_string, 'service_id': parameter_types.id_string, 'region_id': {'type': 'string'}, 'resource_name': {'type': 'string', 'minLength': 1, 'maxLength': 255}, 'resource_limit': { 'type': 'integer', 'minimum': -1, 'maximum': 0x7FFFFFFF, # The maximum value a signed INT may have }, 'description': validation.nullable(parameter_types.description), } _limit_create = { 'type': 'object', 'oneOf': [ { 'properties': _project_limit_create_properties, 'required': [ 'project_id', 'service_id', 'resource_name', 'resource_limit', ], 'additionalProperties': False, }, { 'properties': _domain_limit_create_properties, 'required': [ 'domain_id', 'service_id', 'resource_name', 'resource_limit', ], 'additionalProperties': False, }, ], } limit_create = {'type': 'array', 'items': _limit_create, 'minItems': 1} _limit_update_properties = { 'resource_limit': { 'type': 'integer', 'minimum': -1, 'maximum': 0x7FFFFFFF, # The maximum value a signed INT may have }, 'description': validation.nullable(parameter_types.description), } limit_update = { 'type': 'object', 'properties': _limit_update_properties, 'additionalProperties': False, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4261153 keystone-26.0.0/keystone/locale/0000775000175000017500000000000000000000000016563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/de/0000775000175000017500000000000000000000000017153 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000020740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/de/LC_MESSAGES/keystone.po0000664000175000017500000010112700000000000023143 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Ettore Atalan , 2014 # Robert Simai, 2014 # Reik Keutterling , 2015 # Andreas Jaeger , 2016. #zanata # Andreas Jaeger , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-04-25 10:12+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: German\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "Der %(entity)s-Name darf nicht die folgenden reservierten Zeichen enthalten: " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s ist kein gültiges Benachrichtigungsereignis; erforderlich ist " "%(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s ist kein vertrauenswürdiger Dashboard-Host" #, python-format msgid "%(key_repo)s does not exist" msgstr "%(key_repo)s ist nicht vorhanden" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s bietet keine Datenbankmigrationen. Der Migrations-Repository-" "Pfad unter %(path)s ist nicht vorhanden oder ist kein Verzeichnis." #, python-format msgid "%s field is required and cannot be empty" msgstr "%s-Feld ist erforderlich und darf nicht leer sein" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Modus insecure_debug inaktivieren, um diese Details zu unterdrücken.)" msgid "--all option cannot be mixed with other options" msgstr "--all-Option kann nicht zusammen mit anderen Optionen verwendet werden" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "Ein projektorientiertes Token ist zum Produzieren eines Servicekatalogs " "erforderlich." msgid "Access token is expired" msgstr "Zugriffstoken ist abgelaufen" msgid "Access token not found" msgstr "Zugriffstoken nicht gefunden" msgid "Additional authentications steps required." msgstr "Zusätzliche Authentifizierungsschritte sind notwendig." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Beim Abrufen der Domänenkonfigurationen ist ein unerwarteter Fehler " "aufgetreten" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Beim Versuch, %s zu speichern, ist ein unerwarteter Fehler aufgetreten" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "" "Wegen eines unerwarteten Fehlers konnte der Server Ihre Anforderung nicht " "ausführen." msgid "At least one option must be provided" msgstr "Mindestens eine Option muss angegeben werden" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "Mindestens eine Option muss angegeben werden. Verwenden Sie entweder --all " "oder --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "Versuch einer Authentifizierung mit einer nicht unterstützten Methode." msgid "Authentication plugin error." msgstr "Authentifizierung-Plugin-Fehler" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Anforderungstoken kann mit einem per Delegierung ausgegebenen Token nicht " "autorisiert werden." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "%(option_name)s %(attr)s kann nicht geändert werden" msgid "Cannot change Domain ID" msgstr "Die Domänen-ID kann nicht geändert werden" msgid "Cannot change user ID" msgstr "Benutzer-ID kann nicht geändert werden" msgid "Cannot change user name" msgstr "Benutzername kann nicht geändert werden" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s." msgstr "" "Es kann kein Endpunkt mit einer ungültigen URL erstellt werden: %(url)s." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Eine aktivierte Domäne kann nicht gelöscht werden. Inaktivieren Sie sie " "zuerst." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Kann Projekt %(project_id)s nicht löschen, da die zugehörige untergeordnete " "Baumstruktur aktivierte Projekte enthält." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Das Projekt %s kann nicht gelöscht werden, da es kein Blattelement in der " "Hierarchie darstellt. Verwenden Sie die Option 'cascade', wenn Sie eine " "vollständige, untergeordnete Baumstruktur löschen möchten. " #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Kann Projekt %(project_id)s nicht inaktivieren, da die zugehörige " "untergeordnete Baumstruktur aktivierte Projekte enthält." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "Kann Projekt %s nicht aktivieren, da es über inaktivierte übergeordnete " "Projekte verfügt" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Aus Gruppen erstellte und nach Benutzer-ID gefilterte Zuordnungen können " "nicht aufgelistet werden." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Anforderungstokens können mit einem per Delegierung ausgegebenen Token nicht " "aufgelistet werden." #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Nicht gewährte Rolle kann nicht entfernt werden, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Abschneiden eines Treiberaufrufs ohne Hinweisliste als erstem Parameter nach " "dem Treiber nicht möglich " msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Die Abfrageparameter parents_as_list und parents_as_ids können nicht " "gleichzeitig verwendet werden." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Die Abfrageparameter subtree_as_list und subtree_as_ids können nicht " "gleichzeitig verwendet werden." msgid "Cascade update is only allowed for enabled attribute." msgstr "" "Die Aktualisierungsweitergabe ist nur für aktivierte Attribute zulässig." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Die Kombination von effektivem Filter und Gruppenfilter führt immer zu einer " "leeren Liste." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Die Kombination von effektivem Filter, Domänenfilter und vererbten Filtern " "führt immer zu einer leeren Liste." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Konfigurations-API-Entität unter /domains/%s/config" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "Angabe von Regions-IDs, die miteinander im Konflikt stehen: \"%(url_id)s\" !" "= \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Kunde nicht gefunden" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Identitätsprovider-ID nicht gefunden. Die Konfigurationsoption " "%(issuer_attribute)s wurde in der Anforderungsumgebung nicht gefunden." msgid "Could not find Identity Provider identifier in environment" msgstr "Identitätsprovider-ID konnte in der Umgebung nicht gefunden werden" msgid "Could not find token" msgstr "Token konnte nicht gefunden werden" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Es konnten keine eingebundenen Benutzereigenschaften Identitätswerten " "zugeordnet werden. Überprüfen Sie die Debugprotokolle oder die verwendete " "Zuordnung, um weitere Details zu erhalten." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Benutzer konnte beim Festlegen der ephemeren Benutzeridentität nicht " "zugeordnet werden. Entweder muss in Zuordnungsregeln Benutzer-ID/Name " "angegeben werden oder Umgebungsvariable REMOTE_USER muss festgelegt werden." msgid "Could not validate the access token" msgstr "Das Zugriffstoken konnte nicht geprüft werden" msgid "Credential signature mismatch" msgstr "Übereinstimmungsfehler bei Berechtigungssignatur" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Eine Entität inaktivieren, in der das Attribut 'enable' ignoriert wird von " #, python-format msgid "Domain cannot be named %s" msgstr "Domäne kann nicht mit %s benannt werden" #, python-format msgid "Domain cannot have ID %s" msgstr "Domäne kann nicht die ID %s haben" #, python-format msgid "Domain is disabled: %s" msgstr "Domäne ist inaktiviert: %s" msgid "Domain name cannot contain reserved characters." msgstr "Der Domänenname darf keine reservierten Zeichen enthalten." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Domäne: für %(domain)s ist bereits eine Konfiguration definiert - Datei wird " "ignoriert: %(file)s." #, python-format msgid "Duplicate ID, %s." msgstr "Doppelte ID, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Doppelter Eintrag: %s" #, python-format msgid "Duplicate name, %s." msgstr "Doppelter Name, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "Doppelte ferne ID: %s" msgid "EC2 access key not found." msgstr "EC2 Zugriffsschlüssel nicht gefunden." msgid "EC2 signature not supplied." msgstr "EC2-Signatur nicht angegeben." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Endpunkt %(endpoint_id)s nicht gefunden in Projekt %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Projektzuordnung für Endpunktgruppe nicht gefunden" msgid "Ensure configuration option idp_entity_id is set." msgstr "" "Stellen Sie sicher, dass die Konfigurationsoption idp_entity_id gesetzt ist. " msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Stellen Sie sicher, dass die Konfigurationsoption idp_sso_endpoint gesetzt " "ist. " #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Fehler bei der Auswertung der Konfigurationsdatei für Domäne: %(domain)s, " "Datei: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Fehler beim Öffnen der Datei %(path)s: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Fehler beim Parsing der Regeln %(path)s: %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Die maximal zulässige Anzahl an Versuchen, die Domäne %(domain)s für die " "Verwendung des SQL-Treibers zu registrieren, wurde überschritten. Die letzte " "Domäne, bei der die Registrierung erfolgreich gewesen zu sein scheint, war " "%(last_domain)s. Abbruch." #, python-format msgid "Expected dict or list: %s" msgstr "Verzeichnis oder Liste erwartet: %s" #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s. The server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "Es wurde erwartet, %(attribute)s in %(target)s zu finden. Der Server konnte " "die Anforderung nicht erfüllen, da ein fehlerhaftes Format oder ein anderer " "Fehler vorliegt. Es wird angenommen, dass der Fehler beim Client liegt." msgid "Failed to validate token" msgstr "Token konnte nicht geprüft werden" msgid "Federation token is expired" msgstr "Föderationstoken ist abgelaufen" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "Feld \"remaining_uses\" ist auf %(value)s festgelegt, es darf jedoch nicht " "festgelegt werden, um eine Vertrauensbeziehung zu übertragen" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Gruppe %(group)s wird für domänenspezifische Konfigurationen nicht " "unterstützt" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Die von der Zuordnung %(mapping_id)s zurückgegebene Gruppe %(group_id)s " "konnte im Back-End nicht gefunden werden." #, python-format msgid "" "Group membership across backend boundaries is not allowed. Group in question " "is %(group_id)s, user is %(user_id)s." msgstr "" "Back-End-übergreifende Gruppenmitgliedschaft ist nicht zulässig. Die " "betroffene Gruppe ist %(group_id)s, Benutzer ist %(user_id)s." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID-Attribut %(id_attr)s wurde in LDAP-Objekt %(dn)s nicht gefunden" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Identitätsprovider %(idp)s ist inaktiviert" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "Eingehende Identitätsprovider-ID ist nicht in den akzeptierten IDs enthalten." msgid "Invalid EC2 signature." msgstr "Ungültige EC2-Signatur." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Ungültige LDAP-TLS-Zertifikatsoption: %(option)s. Wählen Sie aus: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Ungültige LDAP TLS_AVAIL Option: %s. TLS nicht verfügbar" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Ungültige LDAP-TLS-deref-Option: %(option)s. Wählen Sie aus: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Ungültiger LDAP Bereich: %(scope)s. Wählen Sie aus: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Ungültige TLS /LDAPS Kombination" msgid "Invalid blob in credential" msgstr "Ungültiges Blob-Objekt im Berechtigungsnachweis" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Ungültiger Domänenname: %(domain)s im Konfigurationsdateinamen gefunden: " "%(file)s - diese Datei wird ignoriert." #, python-format msgid "Invalid domain specific configuration: %(reason)s." msgstr "Ungültige domänenspezifische Konfiguration: %(reason)s." #, python-format msgid "" "Invalid mix of entities for policy association: only Endpoint, Service, or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s." msgstr "" "Ungültige Mischung von Entitäten für Richtlinienzuordnung: nur Endpunkt, " "Service oder Region+Service sind zulässig. Anforderung war - Endpunkt: " "%(endpoint_id)s, Service: %(service_id)s, Region: %(region_id)s." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Ungültige Regel: %(identity_value)s. Die Suchbegriffe 'groups' und 'domain' " "müssen angegeben sein." msgid "Invalid signature" msgstr "Ungültige Signatur" msgid "Invalid user / password" msgstr "Ungültiger Benutzer / Passwort" msgid "Invalid username or TOTP passcode" msgstr "Ungültiger Benutzername oder TOTP-Kenncode" msgid "Invalid username or password" msgstr "Ungültiger Benutzername oder ungültiges Passwort." msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Länge der transformierbaren Ressourcen-ID liegt über der maximal zulässigen " "Anzahl von 64 Zeichen. " #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "Der lokale Abschnitt in der Zuordnung %(mapping_id)s bezieht sich auf eine " "ferne Übereinstimmung, die nicht vorhanden ist (z. B. '{0}' in einem lokalen " "Abschnitt)." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Fehlerhafte Endpunkt-URL (%(endpoint)s), siehe Details im FEHLER-Protokoll. " #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Die maximale Hierarchietiefe für den %s-Branch wurde erreicht." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Mitglied %(member)s ist bereits Mitglied der Gruppe %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Methode kann nicht aufgerufen werden: %s" msgid "Missing entity ID from environment" msgstr "Fehlende Entitäts-ID von Umgebung" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "Das Ändern von \"redelegation_count\" ist bei der Redelegation nicht " "zulässig. Es wird empfohlen, diesen Parameter auszulassen." msgid "Multiple domains are not supported" msgstr "Mehrere Domänen werden nicht unterstützt" msgid "Must specify either domain or project" msgstr "Entweder Domäne oder Projekt muss angegeben werden" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "Weder Projektdomänen-ID noch Projektdomänenname wurde angegeben." msgid "No authenticated user" msgstr "Kein authentifizierter Benutzer" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Keine Chiffrierschlüssel gefunden; Führen Sie keystone-manage fernet_setup " "aus, um über Bootstrapping einen Schlüssel zu erhalten." msgid "No options specified" msgstr "Keine Optionen angegeben" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Endpunkt %(endpoint_id)s ist keine Richtlinie zugeordnet. " msgid "No token in the request" msgstr "Kein Token in der Anforderung" msgid "One of the trust agents is disabled or deleted" msgstr "Einer der Vertrauensagenten wurde inaktiviert oder gelöscht" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Option %(option)s ohne angegebene Gruppe gefunden, während die Domänen- " "Konfigurationsanforderung geprüft wurde" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "Option %(option)s in Gruppe %(group)s wird für domänenspezifische " "Konfigurationen nicht unterstützt" msgid "Project field is required and cannot be empty." msgstr "Projektfeld ist erforderlich und darf nicht leer sein." #, python-format msgid "Project is disabled: %s" msgstr "Projekt ist inaktiviert: %s" msgid "Project name cannot contain reserved characters." msgstr "Der Projektname darf keine reservierten Zeichen enthalten." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "Lesen des Standardwerts für die Option %(option)s in der Gruppe %(group)s " "wird nicht unterstützt." msgid "Redelegation allowed for delegated by trust only" msgstr "Redelegation nur zulässig für im Vertrauen redelegierte" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Verbleibende Redelegationstiefe von %(redelegation_depth)d aus dem " "zulässigen Bereich von [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "Anforderung muss über einen ursprünglichen Abfrageparameter verfügen" msgid "Request token is expired" msgstr "Anforderungstoken ist abgelaufen" msgid "Request token not found" msgstr "Anforderungstoken nicht gefunden" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Angeforderte Ablaufzeit übersteigt die, die von der redelegierten " "Vertrauensbeziehung bereitgestellt werden kann" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "Die angeforderte Redelegationstiefe von %(requested_count)d übersteigt den " "zulässigen Wert von %(max_count)d" msgid "Scoping to both domain and project is not allowed" msgstr "Scoping sowohl auf 'domain' als auch auf 'project' ist nicht zulässig" msgid "Scoping to both domain and trust is not allowed" msgstr "Scoping sowohl auf 'domain' als auch auf 'trust' ist nicht zulässig" msgid "Scoping to both project and trust is not allowed" msgstr "Scoping sowohl auf 'project' als auch auf 'trust' ist nicht zulässig" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Service-Provider %(sp)s ist inaktiviert" msgid "Some of requested roles are not in redelegated trust" msgstr "" "Einige angeforderte Rollen befinden sich nicht in einer redelegierten " "Vertrauensbeziehung" msgid "Specify a domain or project, not both" msgstr "Geben Sie eine Domäne oder ein Projekt an, nicht beides" msgid "Specify a user or group, not both" msgstr "Geben Sie einen Benutzer oder eine Gruppe an, nicht beides" #, python-format msgid "" "String length exceeded. The length of string '%(string)s' exceeds the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "Zeichenfolgelänge überschritten. Die Länge der Zeichenfolge '%(string)s' hat " "den Grenzwert von Spalte %(type)s(CHAR(%(length)d)) überschritten." msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "Die Zeitangabe in 'expires_at' darf nicht vor dem jetzigen Zeitpunkt liegen. " "Der Server konnte der Anforderung nicht nachkommen, da ein fehlerhaftes " "Format oder ein anderer Fehler vorliegt. Es wird angenommen, dass der Fehler " "beim Client liegt." msgid "The --all option cannot be used with the --domain-name option" msgstr "" "Die Option --all kann nicht zusammen mit der Option --domain-name verwendet " "werden" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "Die Keystone-Konfigurationsdatei %(config_file)s konnte nicht gefunden " "werden." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "Die domänenspezifische Keystone-Konfiguration hat mehrere SQL-Treiber " "angegeben (nur einer ist zulässig): %(source)s." msgid "The action you have requested has not been implemented." msgstr "Die von Ihnen angeforderte Aktion wurde nicht implementiert." #, python-format msgid "The password does not match the requirements: %(detail)s." msgstr "Das Passwort entspricht nicht den Anforderungen: %(detail)s." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "Die Kennwortlänge muss kleiner-gleich %(size)i sein. Der Server konnte die " "Anforderung nicht erfüllen, da das Kennwort ungültig ist." msgid "The request you have made requires authentication." msgstr "Die von Ihnen gestellte Anfrage erfoderdert eine Authentifizierung." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "Der Aufruf zum Entziehen darf nicht sowohl domain_id als auch project_id " "aufweisen. Dies ist ein Fehler im Keystone-Server. Die aktuelle Anforderung " "wird abgebrochen. " msgid "The service you have requested is no longer available on this server." msgstr "" "Der Service, den Sie angefordert haben, ist auf diesem Server nicht mehr " "verfügbar." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "Die angegebene übergeordnete Region %(parent_region_id)s würde eine " "zirkuläre Regionshierarchie erstellen." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "Der Wert der Gruppe %(group)s, der in der Konfiguration angegeben ist, muss " "ein Verzeichnis mit Optionen sein" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Dies ist keine anerkannte Fernet-Nutzdatenversion: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Zeitstempel nicht im erwarteten Format. Der Server konnte der Anforderung " "nicht nachkommen, da ein fehlerhaftes Format oder ein anderer Fehler " "vorliegt. Es wird angenommen, dass der Fehler beim Client liegt." msgid "Token version is unrecognizable or unsupported." msgstr "Tokenversion ist nicht erkennbar oder wird nicht unterstützt." msgid "Trustee has no delegated roles." msgstr "Trustee hat keine beauftragten Rollen." msgid "Trustor is disabled." msgstr "Trustor ist inaktiviert." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Es wird versucht, Gruppe %(group)s zu aktualisieren, damit nur diese Gruppe " "in der Konfiguration angegeben werden muss" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Es wird versucht, Option %(option)s in Gruppe %(group)s zu aktualisieren, " "die angegebene Konfiguration enthält jedoch stattdessen Option " "%(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Es wird versucht, Option %(option)s in Gruppe %(group)s zu aktualisieren, " "damit nur diese Option in der Konfiguration angegeben werden muss" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Auf die Keystone-Datenbank kann nicht zugegriffen werden, überprüfen Sie, ob " "sie ordnungsgemäß konfiguriert ist. " #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Region %(region_id)s kann nicht gelöscht werden, da sie oder ihr " "untergeordnete Regionen über zugeordnete Endpunkte verfügen. " #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Domänenkonfigurationsverzeichnis wurde nicht gefunden: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Suche nach Benutzer %s nicht möglich" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Identitätsattribut %(attribute)s kann nicht abgeglichen werden, da es die " "kollidierenden Werte %(new)s und %(old)s aufweist" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Unerwarteter Zuordnungstyp: %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Unerwarteter Status für JSON-Home-Antwort angefordert, %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Unbekannte Domäne '%(name)s' angegeben durch --domain-name" msgid "Update of `domain_id` is not allowed." msgstr "Das Aktualisieren von `domain_id` ist nicht zulässig. " msgid "Update of `is_domain` is not allowed." msgstr "Das Aktualisieren von 'is_domain' ist nicht zulässig." msgid "Update of `parent_id` is not allowed." msgstr "Das Aktualisieren von 'parent_id' ist nicht zulässig." #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "Benutzer %(user_id)s hat keinen Zugriff auf Domäne %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "Benutzer %(user_id)s hat keinen Zugriff auf Projekt %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "Benutzer %(user_id)s ist bereits Mitglied der Gruppe %(group_id)s." #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Benutzer '%(user_id)s' nicht gefunden in Gruppe '%(group_id)s'" msgid "User IDs do not match" msgstr "Benutzerkennungen stimmen nicht überein" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "Benutzerauthentifizierung kann nicht erstellt werden, da entweder Benutzer-" "ID oder Benutzername mit Domänen-ID oder Benutzername mit Domänenname fehlt." #, python-format msgid "User is disabled: %s" msgstr "Benutzer ist inaktiviert: %s" msgid "User is not a trustee." msgstr "Benutzer ist kein Trustee." #, python-format msgid "User type %s not supported" msgstr "Benutzertyp %s nicht unterstützt" msgid "You are not authorized to perform the requested action." msgstr "" "Sie sind nicht dazu authorisiert, die angeforderte Aktion durchzuführen." #, python-format msgid "You are not authorized to perform the requested action: %(action)s." msgstr "" "Sie sind nicht berechtigt, die angeforderte Aktion %(action)s auszuführen." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Sie haben versucht, eine Ressourcen mit dem Admin-Token zu erstellen. Da " "sich dieses Token nicht innerhalb einer Domäne befindet, müssen Sie explizit " "eine Domäne angeben, zu der diese Ressource gehört. " msgid "any options" msgstr "beliebige Optionen" msgid "auth_type is not Negotiate" msgstr "auth_type ist nicht 'Negotiate'" msgid "authorizing user does not have role required" msgstr "Der autorisierte Benutzer verfügt nicht über die erforderliche Rolle" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "kann kein Projekt in einer Niederlassung erstellen, die ein inaktiviertes " "Projekt enthält: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "Ein aktiviertes Projekt, das als Domäne agiert, kann nicht gelöscht werden. " "Inaktivieren Sie zuerst das Projekt %s." #, python-format msgid "group %(group)s" msgstr "Gruppe %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "Es ist nicht zulässig, zwei Projekte zu haben, die als Domänen mit demselben " "Namen agieren: %s" msgid "only root projects are allowed to act as domains." msgstr "Nur Rootprojekte dürfen als Domänen agieren." #, python-format msgid "option %(option)s in group %(group)s" msgstr "Option %(option)s in Gruppe %(group)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses muss eine positive Ganzzahl oder null sein." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses darf nicht festgelegt werden, wenn eine Redelegation zulässig " "ist" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "Anforderung zur Aktualisierung von Gruppe %(group)s, die angegebene " "Konfiguration enthält jedoch stattdessen Gruppe %(group_other)s" msgid "rescope a scoped token" msgstr "Bereich für bereichsorientierten Token ändern" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id muss angegeben werden, wenn include_subtree angegeben wurde." #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s nicht gefunden oder ist kein Verzeichnis" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s wurde nicht gefunden oder ist keine Datei" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/en_GB/0000775000175000017500000000000000000000000017535 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000021322 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/en_GB/LC_MESSAGES/keystone.po0000664000175000017500000016520500000000000023534 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-07-19 18:47+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-07-03 01:24+0000\n" "Last-Translator: Andi Chandler \n" "Language: en_GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: English (United Kingdom)\n" #, python-format msgid "" "\n" "WARNING: %s" msgstr "" "\n" "WARNING: %s" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s is not a valid notification event, must be one of: %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s is not a trusted dashboard host" #, python-format msgid "" "%(key_repo)s does not contain keys, use keystone-manage fernet_setup to " "create Fernet keys." msgstr "" "%(key_repo)s does not contain keys, use keystone-manage fernet_setup to " "create Fernet keys." #, python-format msgid "%(key_repo)s does not exist" msgstr "%(key_repo)s does not exist" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." #, python-format msgid "%(prior_role_id)s does not imply %(implied_role_id)s." msgstr "%(prior_role_id)s does not imply %(implied_role_id)s." #, python-format msgid "" "%(private_key)s does not exist. You can generate a key pair using `keystone-" "manage create_jws_keypair`." msgstr "" "%(private_key)s does not exist. You can generate a key pair using `keystone-" "manage create_jws_keypair`." #, python-format msgid "" "%(public_key_repo)s does not exist. Please make sure the directory exists " "and is readable by the process running keystone." msgstr "" "%(public_key_repo)s does not exist. Please make sure the directory exists " "and is readable by the process running Keystone." #, python-format msgid "" "%(public_key_repo)s must contain at least one public key but it is empty. " "You can generate a key pair using `keystone-manage create_jws_keypair`." msgstr "" "%(public_key_repo)s must contain at least one public key but it is empty. " "You can generate a key pair using `keystone-manage create_jws_keypair`." #, python-format msgid "%(role_id)s cannot be an implied roles." msgstr "%(role_id)s cannot be an implied roles." #, python-format msgid "%s can not be updated for credential" msgstr "%s can not be updated for credential" #, python-format msgid "%s field is required and cannot be empty" msgstr "%s field is required and cannot be empty" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Disable insecure_debug mode to suppress these details.)" msgid "--all option cannot be mixed with other options" msgstr "--all option cannot be mixed with other options" msgid "A project-scoped token is required to produce a service catalog." msgstr "A project-scoped token is required to produce a service catalogue." msgid "Access token is expired" msgstr "Access token is expired" msgid "Access token not found" msgstr "Access token not found" msgid "Additional authentications steps required." msgstr "Additional authentications steps required." msgid "An unexpected error occurred when retrieving domain configs" msgstr "An unexpected error occurred when retrieving domain configs" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "An unexpected error occurred when trying to store %s" msgid "" "An unexpected error prevented the server from accessing encrypted " "credentials." msgstr "" "An unexpected error prevented the server from accessing encrypted " "credentials." msgid "An unexpected error prevented the server from fulfilling your request." msgstr "An unexpected error prevented the server from fulfilling your request." #, python-format msgid "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s." msgstr "" "An unexpected error prevented the server from fulfilling your request: " "%(exception)s." msgid "At least one option must be provided" msgstr "At least one option must be provided" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "At least one option must be provided, use either --all or --domain-name" msgid "At least one role should be specified" msgstr "At least one role should be specified" msgid "Attempted to authenticate with an unsupported method." msgstr "Attempted to authenticate with an unsupported method." msgid "Auth Method Plugins are not loaded." msgstr "Auth Method Plugins are not loaded." msgid "Authentication plugin error." msgstr "Authentication plugin error." msgid "Available commands" msgstr "Available commands" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "Cannot authorise a request token with a token issued via delegation." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Cannot change %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Cannot change Domain ID" msgid "Cannot change user ID" msgstr "Cannot change user ID" msgid "Cannot change user name" msgstr "Cannot change user name" msgid "Cannot create an application credential for another user." msgstr "Cannot create an application credential for another user." msgid "Cannot create an application credential with unassigned role" msgstr "Cannot create an application credential with unassigned role" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s." msgstr "Cannot create an endpoint with an invalid URL: %(url)s." #, python-format msgid "" "Cannot create project tags for %(project_id)s, project is immutable. Set " "\"immutable\" option to false before creating project tags." msgstr "" "Cannot create project tags for %(project_id)s, project is immutable. Set " "\"immutable\" option to false before creating project tags." #, python-format msgid "" "Cannot create project, since it specifies its domain_id %(domain_id)s, but " "specifies a parent in a different domain (%(parent_domain_id)s)." msgstr "" "Cannot create project, since it specifies its domain_id %(domain_id)s, but " "specifies a parent in a different domain (%(parent_domain_id)s)." #, python-format msgid "" "Cannot create project, the parent (%(parent_id)s) is acting as a domain, but " "this project's domain id (%(domain_id)s) does not match the parent's id." msgstr "" "Cannot create project, the parent (%(parent_id)s) is acting as a domain, but " "this project's domain id (%(domain_id)s) does not match the parent's id." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "Cannot delete a domain that is enabled, please disable it first." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." #, python-format msgid "" "Cannot delete project tags for %(project_id)s, project is immutable. Set " "\"immutable\" option to false before creating project tags." msgstr "" "Cannot delete project tags for %(project_id)s, project is immutable. Set " "\"immutable\" option to false before creating project tags." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "Cannot enable project %s since it has disabled parents" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "Cannot list assignments sourced from groups and filtered by user ID." msgid "Cannot list request tokens with a token issued via delegation." msgstr "Cannot list request tokens with a token issued via delegation." #, python-format msgid "Cannot open certificate %(cert_file)s.Reason: %(reason)s" msgstr "Cannot open certificate %(cert_file)s.Reason: %(reason)s" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Cannot remove role that has not been granted, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Cannot truncate a driver call without hints list as first parameter after " "self " #, python-format msgid "" "Cannot update project tags for %(project_id)s, project is immutable. Set " "\"immutable\" option to false before creating project tags." msgstr "" "Cannot update project tags for %(project_id)s, project is immutable. Set " "\"immutable\" option to false before creating project tags." msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgid "Cascade update is only allowed for enabled attribute." msgstr "Cascade update is only allowed for enabled attribute." msgid "Client authentication failed." msgstr "Client authentication failed." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Combining effective and group filter will always result in an empty list." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Combining effective, domain and inherited filters will always result in an " "empty list." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Config API entity at /domains/%s/config" #, python-format msgid "Conflict occurred attempting to store %(type)s - %(details)s." msgstr "Conflict occurred attempting to store %(type)s - %(details)s." #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Consumer not found" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." #, python-format msgid "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s." msgstr "" "Could not find %(group_or_option)s in domain configuration for domain " "%(domain_id)s." #, python-format msgid "Could not find Access Rule: %(access_rule_id)s." msgstr "Could not find Access Rule: %(access_rule_id)s." #, python-format msgid "Could not find Application Credential: %(application_credential_id)s." msgstr "Could not find Application Credential: %(application_credential_id)s." #, python-format msgid "Could not find Endpoint Group: %(endpoint_group_id)s." msgstr "Could not find Endpoint Group: %(endpoint_group_id)s." msgid "Could not find Identity Provider identifier in environment" msgstr "Could not find Identity Provider identifier in environment" #, python-format msgid "Could not find Identity Provider: %(idp_id)s." msgstr "Could not find Identity Provider: %(idp_id)s." #, python-format msgid "Could not find Identity Provider: %s" msgstr "Could not find Identity Provider: %s" #, python-format msgid "Could not find Service Provider: %(sp_id)s." msgstr "Could not find Service Provider: %(sp_id)s." #, python-format msgid "Could not find application credential: %s" msgstr "Could not find application credential: %s" #, python-format msgid "Could not find auth receipt: %(receipt_id)s." msgstr "Could not find auth receipt: %(receipt_id)s." #, python-format msgid "Could not find credential: %(credential_id)s." msgstr "Could not find credential: %(credential_id)s." #, python-format msgid "Could not find domain: %(domain_id)s." msgstr "Could not find domain: %(domain_id)s." #, python-format msgid "Could not find endpoint: %(endpoint_id)s." msgstr "Could not find endpoint: %(endpoint_id)s." #, python-format msgid "" "Could not find federated protocol %(protocol)s for Identity Provider: " "%(idp)s." msgstr "" "Could not find federated protocol %(protocol)s for Identity Provider: " "%(idp)s." #, python-format msgid "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s." msgstr "" "Could not find federated protocol %(protocol_id)s for Identity Provider: " "%(idp_id)s." #, python-format msgid "Could not find group: %(group_id)s." msgstr "Could not find group: %(group_id)s." #, python-format msgid "Could not find limit for %(id)s." msgstr "Could not find limit for %(id)s." #, python-format msgid "Could not find mapping: %(mapping_id)s." msgstr "Could not find mapping: %(mapping_id)s." msgid "Could not find policy association." msgstr "Could not find policy association." #, python-format msgid "Could not find policy: %(policy_id)s." msgstr "Could not find policy: %(policy_id)s." #, python-format msgid "Could not find project tag: %(project_tag)s." msgstr "Could not find project tag: %(project_tag)s." #, python-format msgid "Could not find project: %(project_id)s." msgstr "Could not find project: %(project_id)s." #, python-format msgid "Could not find region: %(region_id)s." msgstr "Could not find region: %(region_id)s." #, python-format msgid "Could not find registered limit for %(id)s." msgstr "Could not find registered limit for %(id)s." #, python-format msgid "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project, domain, or system: %(target_id)s." msgstr "" "Could not find role assignment with role: %(role_id)s, user or group: " "%(actor_id)s, project, domain, or system: %(target_id)s." #, python-format msgid "Could not find role: %(role_id)s." msgstr "Could not find role: %(role_id)s." #, python-format msgid "Could not find service: %(service_id)s." msgstr "Could not find service: %(service_id)s." msgid "Could not find token" msgstr "Could not find token" #, python-format msgid "Could not find token: %(token_id)s." msgstr "Could not find token: %(token_id)s." #, python-format msgid "Could not find trust: %(trust_id)s." msgstr "Could not find trust: %(trust_id)s." #, python-format msgid "Could not find user: %(user_id)s." msgstr "Could not find user: %(user_id)s." #, python-format msgid "Could not find version: %(version)s." msgstr "Could not find version: %(version)s." #, python-format msgid "Could not find: %(target)s." msgstr "Could not find: %(target)s." msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgid "Could not recognize Fernet token" msgstr "Could not recognise Fernet token" msgid "Could not validate the access token" msgstr "Could not validate the access token" msgid "Credential could not be decrypted. Please contact the administrator" msgstr "Credential could not be decrypted. Please contact the administrator" #, python-format msgid "Credential could not be encrypted: %s" msgstr "Credential could not be encrypted: %s" msgid "Credential signature mismatch" msgstr "Credential signature mismatch" #, python-format msgid "DN attribute %(dn)s not found in LDAP" msgstr "DN attribute %(dn)s not found in LDAP" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgid "Domain ID does not conform to required UUID format." msgstr "Domain ID does not conform to required UUID format." #, python-format msgid "Domain cannot be named %s" msgstr "Domain cannot be named %s" #, python-format msgid "Domain cannot have ID %s" msgstr "Domain cannot have ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "Domain is disabled: %s" msgid "Domain name cannot contain reserved characters." msgstr "Domain name cannot contain reserved characters." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." #, python-format msgid "Don't know if operation is an expand or contract at the moment: %s" msgstr "Don't know if operation is an expand or contract at the moment: %s" #, python-format msgid "Duplicate ID, %s." msgstr "Duplicate ID, %s." msgid "Duplicate entry" msgstr "Duplicate entry" #, python-format msgid "Duplicate entry at domain ID %s" msgstr "Duplicate entry at domain ID %s" #, python-format msgid "Duplicate entry found with %(field)s %(name)s" msgstr "Duplicate entry found with %(field)s %(name)s" #, python-format msgid "" "Duplicate entry found with %(field)s %(name)s at domain ID %(domain_id)s" msgstr "" "Duplicate entry found with %(field)s %(name)s at domain ID %(domain_id)s" #, python-format msgid "Duplicate entry: %s" msgstr "Duplicate entry: %s" #, python-format msgid "Duplicate name, %s." msgstr "Duplicate name, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "Duplicate remote ID: %s" msgid "EC2 access key not found." msgstr "EC2 access key not found." msgid "EC2 signature not supplied." msgstr "EC2 signature not supplied." msgid "" "ERROR: Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be " "set." msgstr "" "ERROR: Either --bootstrap-password argument or OS_BOOTSTRAP_PASSWORD must be " "set." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Endpoint Group Project Association not found" msgid "Ensure configuration option idp_entity_id is set." msgstr "Ensure configuration option idp_entity_id is set." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "Ensure configuration option idp_sso_endpoint is set." #, python-format msgid "Error authenticating with application credential: %(detail)s" msgstr "Error authenticating with application credential: %(detail)s" #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." #, python-format msgid "Error when changing user password: %s" msgstr "Error when changing user password: %s" #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Error while opening file %(path)s: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Error while parsing rules %(path)s: %(err)s" #, python-format msgid "Error while reading metadata file: %(reason)s." msgstr "Error while reading metadata file: %(reason)s." #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" #, python-format msgid "Expected boolean value, got %r" msgstr "Expected boolean value, got %r" #, python-format msgid "Expected dict or list: %s" msgstr "Expected dict or list: %s" #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s. The server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "Expecting to find %(attribute)s in %(target)s. The server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." #, python-format msgid "Failed to deserialize %(obj)s. Data is %(data)s" msgstr "Failed to deserialise %(obj)s. Data is %(data)s" msgid "Failed to validate receipt" msgstr "Failed to validate receipt" msgid "Failed to validate token" msgstr "Failed to validate token" msgid "Federation token is expired" msgstr "Federation token is expired" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgid "Global role cannot imply a domain-specific role" msgstr "Global role cannot imply a domain-specific role" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "Group %(group)s is not supported for domain specific configurations" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." #, python-format msgid "" "Group membership across backend boundaries is not allowed. Group in question " "is %(group_id)s, user is %(user_id)s." msgstr "" "Group membership across backend boundaries is not allowed. Group in question " "is %(group_id)s, user is %(user_id)s." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID attribute %(id_attr)s not found in LDAP object %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Identity Provider %(idp)s is disabled" #, python-format msgid "" "Impersonation is not allowed because redelegated trust does not specify " "impersonation. Redelegated trust id: %s" msgstr "" "Impersonation is not allowed because redelegated trust does not specify " "impersonation. Redelegated trust id: %s" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "Incoming identity provider identifier not included among the accepted " "identifiers." #, python-format msgid "" "Insufficient auth methods received for %(user_id)s. Auth Methods Provided: " "%(methods)s." msgstr "" "Insufficient auth methods received for %(user_id)s. Auth Methods Provided: " "%(methods)s." msgid "Internal RBAC enforcement error, invalid rule (action) name." msgstr "Internal RBAC enforcement error, invalid rule (action) name." msgid "Internal error processing authentication and authorization." msgstr "Internal error processing authentication and authorisation." msgid "Invalid EC2 signature." msgstr "Invalid EC2 signature." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Invalid LDAP TLS_AVAIL option: %s. TLS not available" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Invalid TLS / LDAPS combination" msgid "Invalid application credential ID or secret" msgstr "Invalid application credential ID or secret" #, python-format msgid "Invalid application credential: %(detail)s" msgstr "Invalid application credential: %(detail)s" msgid "Invalid blob in credential" msgstr "Invalid blob in credential" msgid "" "Invalid data type, must be a list of lists comprised of strings. Sub-lists " "may not be duplicated. Strings in sub-lists may not be duplicated." msgstr "" "Invalid data type, must be a list of lists comprised of strings. Sub-lists " "may not be duplicated. Strings in sub-lists may not be duplicated." #, python-format msgid "Invalid domain name: %(domain)s" msgstr "Invalid domain name: %(domain)s" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." #, python-format msgid "Invalid domain specific configuration: %(reason)s." msgstr "Invalid domain specific configuration: %(reason)s." #, python-format msgid "Invalid input for field '%(path)s': %(message)s" msgstr "Invalid input for field '%(path)s': %(message)s" msgid "" "Invalid input for field identity/password/user/domain: id or name must be " "present." msgstr "" "Invalid input for field identity/password/user/domain: id or name must be " "present." msgid "" "Invalid input for field identity/password/user: id or name must be present." msgstr "" "Invalid input for field identity/password/user: id or name must be present." msgid "Invalid input for field scope/domain: id or name must be present." msgstr "Invalid input for field scope/domain: id or name must be present." msgid "" "Invalid input for field scope/project/domain: id or name must be present." msgstr "" "Invalid input for field scope/project/domain: id or name must be present." msgid "Invalid input for field scope/project: id or name must be present." msgstr "Invalid input for field scope/project: id or name must be present." #, python-format msgid "Invalid mapping id: %s" msgstr "Invalid mapping id: %s" #, python-format msgid "" "Invalid mix of entities for policy association: only Endpoint, Service, or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s." msgstr "" "Invalid mix of entities for policy association: only Endpoint, Service, or " "Region+Service allowed. Request was - Endpoint: %(endpoint_id)s, Service: " "%(service_id)s, Region: %(region_id)s." #, python-format msgid "Invalid resource limit: %(reason)s." msgstr "Invalid resource limit: %(reason)s." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgid "Invalid signature" msgstr "Invalid signature" msgid "Invalid user / password" msgstr "Invalid user / password" msgid "Invalid username or TOTP passcode" msgstr "Invalid username or TOTP passcode" msgid "Invalid username or password" msgstr "Invalid username or password" #, python-format msgid "" "Keystone cannot start due to project hierarchical depth in the current " "deployment (project_ids: %(project_id)s) exceeds the enforcement model's " "maximum limit of %(max_limit_depth)s. Please use a different enforcement " "model to correct the issue." msgstr "" "Keystone cannot start due to project hierarchical depth in the current " "deployment (project_ids: %(project_id)s) exceeds the enforcement model's " "maximum limit of %(max_limit_depth)s. Please use a different enforcement " "model to correct the issue." msgid "LDAP does not support write operations" msgstr "LDAP does not support write operations" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Length of transformable resource id > 64, which is max allowed characters" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Max hierarchy depth reached for %s branch." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Member %(member)s is already a member of group %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Method not callable: %s" msgid "Missing entity ID from environment" msgstr "Missing entity ID from environment" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgid "Multiple domains are not supported" msgstr "Multiple domains are not supported" msgid "Must specify either domain or project" msgstr "Must specify either domain or project" msgid "Negative delta (downgrade) not supported" msgstr "Negative delta (downgrade) not supported" msgid "Negative relative revision (downgrade) not supported" msgstr "Negative relative revision (downgrade) not supported" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "Neither Project Domain ID nor Project Domain Name was provided." msgid "" "No Authorization headers found, cannot proceed with OAuth related calls. If " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgstr "" "No Authorisation headers found, cannot proceed with OAuth related calls. If " "running under HTTPd or Apache, ensure WSGIPassAuthorization is set to On." msgid "No authenticated user" msgstr "No authenticated user" msgid "No domain information specified as part of list request" msgstr "No domain information specified as part of list request" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgid "No options specified" msgstr "No options specified" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "No policy is associated with endpoint %(endpoint_id)s." msgid "No receipt in the request" msgstr "No receipt in the request" #, python-format msgid "No remaining uses for trust: %(trust_id)s." msgstr "No remaining uses for trust: %(trust_id)s." msgid "No token in the request" msgstr "No token in the request" msgid "Not authorized." msgstr "Not authorised." msgid "" "Number of User/Group entities returned by LDAP exceeded size limit. Contact " "your LDAP administrator." msgstr "" "Number of User/Group entities returned by LDAP exceeded size limit. Contact " "your LDAP administrator." msgid "One of the trust agents is disabled or deleted" msgstr "One of the trust agents is disabled or deleted" msgid "Only admin or trustor can delete a trust" msgstr "Only admin or trustor can delete a trust" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Option %(option)s found with no group specified while checking domain " "configuration request" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" #, python-format msgid "Option %(option_id)s already defined in %(registry)s." msgstr "Option %(option_id)s already defined in %(registry)s." #, python-format msgid "Option %(option_name)s already defined in %(registry)s" msgstr "Option %(option_name)s already defined in %(registry)s" #, python-format msgid "Password Hash Algorithm %s not found" msgstr "Password Hash Algorithm %s not found" msgid "Password must be a string type" msgstr "Password must be a string type" #, python-format msgid "Password validation error: %(detail)s." msgstr "Password validation error: %(detail)s." msgid "Phase upgrade options do not accept revision specification" msgstr "Phase upgrade options do not accept revision specification" #, python-format msgid "Private key %(path)s already exists" msgstr "Private key %(path)s already exists" msgid "Programming Error: Invalid arguments supplied to build scope." msgstr "Programming Error: Invalid arguments supplied to build scope." msgid "Programming Error: value to be stored must be a datetime object." msgstr "Programming Error: value to be stored must be a datetime object." #, python-format msgid "" "Project %(project_id)s must be in the same domain as the role %(role_id)s " "being assigned." msgstr "" "Project %(project_id)s must be in the same domain as the role %(role_id)s " "being assigned." msgid "Project field is required and cannot be empty." msgstr "Project field is required and cannot be empty." #, python-format msgid "Project is disabled: %s" msgstr "Project is disabled: %s" msgid "Project name cannot contain reserved characters." msgstr "Project name cannot contain reserved characters." msgid "Provided consumer does not exist." msgstr "Provided consumer does not exist." msgid "Provided consumer key does not match stored consumer key." msgstr "Provided consumer key does not match stored consumer key." msgid "Provided verifier does not match stored verifier" msgstr "Provided verifier does not match stored verifier" #, python-format msgid "Public key %(path)s already exists" msgstr "Public key %(path)s already exists" msgid "" "Reading security compliance information for any domain other than the " "default domain is not allowed or supported." msgstr "" "Reading security compliance information for any domain other than the " "default domain is not allowed or supported." msgid "" "Reading security compliance values other than password_regex and " "password_regex_description is not allowed." msgstr "" "Reading security compliance values other than password_regex and " "password_regex_description is not allowed." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "Reading the default for option %(option)s in group %(group)s is not supported" msgid "Redelegation allowed for delegated by trust only" msgstr "Redelegation allowed for delegated by trust only" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Remaining re-delegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgid "Request Token does not have an authorizing user id." msgstr "Request Token does not have an authorising user id." msgid "Request must have an origin query parameter" msgstr "Request must have an origin query parameter" msgid "Request token is expired" msgstr "Request token is expired" msgid "Request token not found" msgstr "Request token not found" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "Requested expiration time is more than redelegated trust can provide" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgid "Requested user has no relation to this trust" msgstr "Requested user has no relation to this trust" #, python-format msgid "Role %s is not defined" msgstr "Role %s is not defined" msgid "Scoping to both domain and project is not allowed" msgstr "Scoping to both domain and project is not allowed" msgid "Scoping to both domain and system is not allowed" msgstr "Scoping to both domain and system is not allowed" msgid "Scoping to both domain and trust is not allowed" msgstr "Scoping to both domain and trust is not allowed" msgid "Scoping to both project and system is not allowed" msgstr "Scoping to both project and system is not allowed" msgid "Scoping to both project and trust is not allowed" msgstr "Scoping to both project and trust is not allowed" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Service Provider %(sp)s is disabled" msgid "Some of requested roles are not in redelegated trust" msgstr "Some of requested roles are not in redelegated trust" msgid "Specify a domain or project, not both" msgstr "Specify a domain or project, not both" msgid "Specify a user or group, not both" msgstr "Specify a user or group, not both" msgid "Specify system or domain, not both" msgstr "Specify system or domain, not both" msgid "Specify system or project, not both" msgstr "Specify system or project, not both" #, python-format msgid "" "String length exceeded. The length of string '%(string)s' exceeds the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "String length exceeded. The length of string '%(string)s' exceeds the limit " "of column %(type)s(CHAR(%(length)d))." msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgid "The --all option cannot be used with the --domain-name option" msgstr "The --all option cannot be used with the --domain-name option" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "The Keystone configuration file %(config_file)s could not be found." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." #, python-format msgid "The account is disabled for user: %(user_id)s." msgstr "The account is disabled for user: %(user_id)s." #, python-format msgid "The account is locked for user: %(user_id)s." msgstr "The account is locked for user: %(user_id)s." msgid "The action you have requested has not been implemented." msgstr "The action you have requested has not been implemented." msgid "The authenticated user should match the trustor" msgstr "The authenticated user should match the trustor" msgid "The certificate content is not PEM format." msgstr "The certificate content is not PEM format." #, python-format msgid "" "The given operator %(_op)s is not valid. It must be one of the following: " "'eq', 'neq', 'lt', 'lte', 'gt', or 'gte'." msgstr "" "The given operator %(_op)s is not valid. It must be one of the following: " "'eq', 'neq', 'lt', 'lte', 'gt', or 'gte'." msgid "The method is not allowed for the requested URL." msgstr "The method is not allowed for the requested URL." #, python-format msgid "" "The new password cannot be identical to a previous password. The total " "number which includes the new password must be unique is %(unique_count)s." msgstr "" "The new password cannot be identical to a previous password. The total " "number which includes the new password must be unique is %(unique_count)s." #, python-format msgid "The parameter grant_type %s is not supported." msgstr "The parameter grant_type %s is not supported." msgid "The parameter grant_type is required." msgstr "The parameter grant_type is required." #, python-format msgid "The password does not match the requirements: %(detail)s." msgstr "The password does not match the requirements: %(detail)s." #, python-format msgid "The password is expired and needs to be changed for user: %(user_id)s." msgstr "The password is expired and needs to be changed for user: %(user_id)s." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgid "The request you have made requires authentication." msgstr "The request you have made requires authentication." #, python-format msgid "" "The resource limit (%(level)s: %(id)s, resource_name: %(resource_name)s, " "resource_limit: %(resource_limit)s, service_id: %(service_id)s, region_id: " "%(region_id)s) doesn't satisfy current hierarchy model." msgstr "" "The resource limit (%(level)s: %(id)s, resource_name: %(resource_name)s, " "resource_limit: %(resource_limit)s, service_id: %(service_id)s, region_id: " "%(region_id)s) doesn't satisfy current hierarchy model." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgid "The service you have requested is no longer available on this server." msgstr "The service you have requested is no longer available on this server." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "The value of group %(group)s specified in the config should be a dictionary " "of options" #, python-format msgid "" "The value of the limit which project is %(project_id)s should not bigger " "than its parent domain %(domain_id)s." msgstr "" "The value of the limit which project is %(project_id)s should not bigger " "than its parent domain %(domain_id)s." #, python-format msgid "" "There are multiple %(resource)s entities named '%(name)s'. Please use ID " "instead of names to resolve the ambiguity." msgstr "" "There are multiple %(resource)s entities named '%(name)s'. Please use ID " "instead of names to resolve the ambiguity." msgid "" "This API is no longer available due to the removal of support for PKI tokens." msgstr "" "This API is no longer available due to the removal of support for PKI tokens." #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "This is not a recognised Fernet payload version: %s" #, python-format msgid "This is not a recognized Fernet receipt %s" msgstr "This is not a recognised Fernet receipt %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgid "Token version is unrecognizable or unsupported." msgstr "Token version is unrecognisable or unsupported." msgid "Trustee domain is disabled." msgstr "Trustee domain is disabled." msgid "Trustee has no delegated roles." msgstr "Trustee has no delegated roles." msgid "Trustor domain is disabled." msgstr "Trustor domain is disabled." msgid "Trustor is disabled." msgstr "Trustor is disabled." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Unable to access the keystone database, please check it is configured " "correctly." msgid "" "Unable to authenticate against Identity backend - Invalid username or " "password" msgstr "" "Unable to authenticate against Identity backend - Invalid username or " "password" #, python-format msgid "Unable to consume trust %(trust_id)s. Unable to acquire lock." msgstr "Unable to consume trust %(trust_id)s. Unable to acquire lock." msgid "Unable to create a limit that has no corresponding registered limit." msgstr "Unable to create a limit that has no corresponding registered limit." #, python-format msgid "" "Unable to create additional application credentials, maximum of %(limit)d " "already exceeded for user." msgstr "" "Unable to create additional application credentials, maximum of %(limit)d " "already exceeded for user." #, python-format msgid "" "Unable to create additional credentials, maximum of %(limit)d already " "exceeded for user." msgstr "" "Unable to create additional credentials, maximum of %(limit)d already " "exceeded for user." #, python-format msgid "" "Unable to delete immutable %(type)s resource: `%(resource_id)s. Set resource " "option \"immutable\" to false first." msgstr "" "Unable to delete immutable %(type)s resource: `%(resource_id)s. Set resource " "option \"immutable\" to false first." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." #, python-format msgid "Unable to establish a connection to LDAP Server (%(url)s)." msgstr "Unable to establish a connection to LDAP Server (%(url)s)." #, python-format msgid "Unable to find %(name)r driver in %(namespace)r." msgstr "Unable to find %(name)r driver in %(namespace)r." #, python-format msgid "" "Unable to locate %(binary)s binary on the system. Check to make sure it is " "installed." msgstr "" "Unable to locate %(binary)s binary on the system. Check to make sure it is " "installed." #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Unable to locate domain config directory: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Unable to lookup user %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgid "" "Unable to rotate credential keys because not all credentials are encrypted " "with the primary key. Please make sure all credentials have been encrypted " "with the primary key using `keystone-manage credential_migrate`." msgstr "" "Unable to rotate credential keys because not all credentials are encrypted " "with the primary key. Please make sure all credentials have been encrypted " "with the primary key using `keystone-manage credential_migrate`." #, python-format msgid "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed or this is the result of misconfiguration. Reason " "%(reason)s." msgstr "" "Unable to sign SAML assertion. It is likely that this server does not have " "xmlsec1 installed or this is the result of misconfiguration. Reason " "%(reason)s." #, python-format msgid "" "Unable to update immutable %(type)s resource: `%(resource_id)s. Set resource " "option \"immutable\" to false first." msgstr "" "Unable to update immutable %(type)s resource: `%(resource_id)s. Set resource " "option \"immutable\" to false first." #, python-format msgid "" "Unable to update or delete registered limit %(id)s because there are project " "limits associated with it." msgstr "" "Unable to update or delete registered limit %(id)s because there are project " "limits associated with it." msgid "Unable to validate password due to invalid configuration" msgstr "Unable to validate password due to invalid configuration" #, python-format msgid "Unable to validate token because domain %(id)s is disabled" msgstr "Unable to validate token because domain %(id)s is disabled" #, python-format msgid "Unable to validate token because project %(id)s is disabled" msgstr "Unable to validate token because project %(id)s is disabled" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Unexpected assignment type encountered, %s" #, python-format msgid "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s." msgstr "" "Unexpected combination of grant attributes - User: %(user_id)s, Group: " "%(group_id)s, Project: %(project_id)s, Domain: %(domain_id)s." #, python-format msgid "Unexpected evaluation type \"%(eval_type)s\"" msgstr "Unexpected evaluation type \"%(eval_type)s\"" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Unexpected status requested for JSON Home response, %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Unknown domain '%(name)s' specified by --domain-name" msgid "Unknown parameters found,please provide only oauth parameters." msgstr "Unknown parameters found,please provide only oauth parameters." #, python-format msgid "Unsupported password hashing algorithm ident: %s" msgstr "Unsupported password hashing algorithm ident: %s" msgid "Update of `domain_id` is not allowed." msgstr "Update of `domain_id` is not allowed." msgid "Update of `is_domain` is not allowed." msgstr "Update of `is_domain` is not allowed." msgid "Update of `parent_id` is not allowed." msgstr "Update of `parent_id` is not allowed." msgid "Use either --delta or relative revision, not both" msgstr "Use either --delta or relative revision, not both" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "User %(user_id)s has no access to domain %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "User %(user_id)s has no access to project %(project_id)s" #, python-format msgid "User %(user_id)s has no access to the system" msgstr "User %(user_id)s has no access to the system" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "User %(user_id)s is already a member of group %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "User '%(user_id)s' not found in group '%(group_id)s'" msgid "User IDs do not match" msgstr "User IDs do not match" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." #, python-format msgid "User is disabled: %s" msgstr "User is disabled: %s" msgid "User is not a trustee." msgstr "User is not a trustee." #, python-format msgid "User type %s not supported" msgstr "User type %s not supported" #, python-format msgid "User's default project ID cannot be a domain ID: %s" msgstr "User's default project ID cannot be a domain ID: %s" msgid "" "Using OAuth-scoped token to create another token. Create a new OAuth-scoped " "token instead" msgstr "" "Using OAuth-scoped token to create another token. Create a new OAuth-scoped " "token instead" msgid "" "Using a system-scoped token to create a project-scoped or domain-scoped " "token is not allowed." msgstr "" "Using a system-scoped token to create a project-scoped or domain-scoped " "token is not allowed." msgid "" "Using method 'application_credential' is not allowed for managing additional " "application credentials." msgstr "" "Using method 'application_credential' is not allowed for managing additional " "application credentials." msgid "" "Using method 'application_credential' is not allowed for managing trusts." msgstr "" "Using method 'application_credential' is not allowed for managing trusts." msgid "" "Using trust-scoped token to create another token. Create a new trust-scoped " "token instead" msgstr "" "Using trust-scoped token to create another token. Create a new trust-scoped " "token instead" #, python-format msgid "Validation failed with errors: %(error)s, detail message is: %(desc)s." msgstr "Validation failed with errors: %(error)s, detail message is: %(desc)s." msgid "You are not authorized to perform the requested action." msgstr "You are not authorised to perform the requested action." #, python-format msgid "You are not authorized to perform the requested action: %(action)s." msgstr "You are not authorised to perform the requested action: %(action)s." msgid "" "You cannot change your password at this time due to password policy " "disallowing password changes. Please contact your administrator to reset " "your password." msgstr "" "You cannot change your password at this time due to password policy " "disallowing password changes. Please contact your administrator to reset " "your password." #, python-format msgid "" "You cannot change your password at this time due to the minimum password " "age. Once you change your password, it must be used for %(min_age_days)d " "day(s) before it can be changed. Please try again in %(days_left)d day(s) or " "contact your administrator to reset your password." msgstr "" "You cannot change your password at this time due to the minimum password " "age. Once you change your password, it must be used for %(min_age_days)d " "day(s) before it can be changed. Please try again in %(days_left)d day(s) or " "contact your administrator to reset your password." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgid "You must provide a revision or relative delta" msgstr "You must provide a revision or relative delta" msgid "" "You need to set tls_cacertfile or tls_cacertdir if use_tls is true or url " "uses ldaps: scheme." msgstr "" "You need to set tls_cacertfile or tls_cacertdir if use_tls is true or url " "uses ldaps: scheme." #, python-format msgid "`option_id` must be 4 characters in length. Got %r" msgstr "`option_id` must be 4 characters in length. Got %r" #, python-format msgid "`option_id` must be a string, got %r" msgstr "`option_id` must be a string, got %r" #, python-format msgid "`option_name` must be a string. Got %r" msgstr "`option_name` must be a string. Got %r" msgid "any options" msgstr "any options" #, python-format msgid "" "assertion file %(pathname)s at line %(line_num)d expected 'key: value' but " "found '%(line)s' see help for file format" msgstr "" "assertion file %(pathname)s at line %(line_num)d expected 'key: value' but " "found '%(line)s' see help for file format" msgid "auth_context did not decode anything useful" msgstr "auth_context did not decode anything useful" msgid "auth_type is not Negotiate" msgstr "auth_type is not Negotiate" msgid "authorizing user does not have role required" msgstr "authorising user does not have role required" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "cannot create a project in a branch containing a disabled project: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." #, python-format msgid "group %(group)s" msgstr "group %(group)s" #, python-format msgid "invalidate date format %s" msgstr "invalidate date format %s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "it is not permitted to have two projects acting as domains with the same " "name: %s" #, python-format msgid "" "it is not permitted to have two projects with either the same name or same " "id in the same domain: name is %(name)s, project id %(id)s" msgstr "" "it is not permitted to have two projects with either the same name or same " "id in the same domain: name is %(name)s, project id %(id)s" msgid "only root projects are allowed to act as domains." msgstr "only root projects are allowed to act as domains." #, python-format msgid "option %(option)s in group %(group)s" msgstr "option %(option)s in group %(group)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses must be a positive integer or null." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "remaining_uses must not be set if redelegation is allowed" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgid "rescope a scoped token" msgstr "rescope a scoped token" msgid "resulting JSON load was not a dict" msgstr "resulting JSON load was not a dict" #, python-format msgid "" "role: %(role_name)s must be within the same domain as the identity provider: " "%(identity_provider)s." msgstr "" "role: %(role_name)s must be within the same domain as the identity provider: " "%(identity_provider)s." msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id must be specified if include_subtree is also specified" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s not found or is not a directory" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s not found or is not a file" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/es/0000775000175000017500000000000000000000000017172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/es/LC_MESSAGES/0000775000175000017500000000000000000000000020757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/es/LC_MESSAGES/keystone.po0000664000175000017500000007421000000000000023164 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Alberto Molina Coballes , 2014 # dario hereñu , 2015 # Guillermo Vitas Gil , 2014 # Jose Enrique Ruiz Navarro , 2014 # Jose Ramirez Garcia , 2014 # Pablo Sanchez , 2015 # Andreas Jaeger , 2016. #zanata # Ana Santos , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-04-10 06:40+0000\n" "Last-Translator: Ana Santos \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "El nombre %(entity)s no puede contener los siguientes caracteres " "reservados: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s no es u suceso de notificación válido, debe ser uno de: %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s no es un host de panel de control de confianza" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s no proporciona migración de base de datos. La vía de acceso de " "repositorio de migración en %(path)s no existe o no es un directorio." #, python-format msgid "%s field is required and cannot be empty" msgstr "campo %s es necesario y no puede estar vacío" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Inhabilite la modalidad insecure_debug para suprimir estos detalles.)" msgid "--all option cannot be mixed with other options" msgstr "La opción --all no puede mezclarse con otras opciones" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "Se necesita una señal con ámbito de proyecto para producir un catálogo de " "servicio." msgid "Access token is expired" msgstr "El token de acceso ha expirado" msgid "Access token not found" msgstr "No se ha encontrado el token de acceso" msgid "Additional authentications steps required." msgstr "Se precisan pasos adicionales de autenticación." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Se ha producido un error inesperado al recuperar configuraciones de dominio" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Un error inesperado ocurrió cuando se intentaba almacenar %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "" "El servidor no ha podido completar su petición debido a un error inesperado." msgid "At least one option must be provided" msgstr "Debe especificar al menos una opción" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "Debe proporcionarse al menos una opción, utilice --all o --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "Se ha intentado autenticar con un método no compatible." msgid "Authentication plugin error." msgstr "Error en el complemento de autenticación " msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "No se puede autorizar una señal de solicitud con una señal emitida mediante " "delegación." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "No se puede cambiar %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "No se puede cambiar el ID del Dominio" msgid "Cannot change user ID" msgstr "No se puede cambiar el ID de usuario" msgid "Cannot change user name" msgstr "No se puede cambiar el nombre de usuario" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "No se puede suprimir un dominio que está habilitado, antes debe " "inhabilitarlo." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "No se puede suprimir el proyecto %(project_id)s porque su subárbol contiene " "proyectos habilitados." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "No se puede suprimir el proyecto %s porque no es una hoja en la jerarquía. " "Utilice la opción de casacada si desea suprimir un subárbol entero." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "No se puede inhabilitar el proyecto %(project_id)s porque su subárbol " "contiene proyectos habilitados." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "No se puede habilitar el proyecto %s, ya que tiene padres inhabilitados" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "No se pueden enumerar las asignaciones obtenidas de grupos y filtradas por " "ID de usuario." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "No se pueden listar las señales de solicitud con una señal emitida mediante " "delegación." #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "No se puede eliminar un rol que no se ha otorgado, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "No se puede truncar una llamada de controlador si lista de sugerencias como " "primer parámetro después de self " msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "No se pueden utilizar los parámetros de consulta parents_as_list y " "parents_as_ids al mismo tiempo." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "No se pueden utilizar los parámetros de consulta subtree_as_list y " "subtree_as_ids al mismo tiempo." msgid "Cascade update is only allowed for enabled attribute." msgstr "" "Solo se permite la actualización en casacada de los atributos habilitados." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "La combinación de filtro de grupo y efectivo dará siempre como resultado una " "lista vacía." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "La combinación de filtros heredados, de dominio y efectivos dará siempre " "como resultado una lista vacía." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entidad de API de config en /domains/%s/config" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "Se han especificado ID de región conflictivos: \"%(url_id)s\" != \"%(ref_id)s" "\"" msgid "Consumer not found" msgstr "No se ha encontrado el consumidor" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "No se ha podido determinar el ID del proveedor de identidades. La opción de " "configuración %(issuer_attribute)s no se ha encontrado en el entorno de la " "solicitud." msgid "Could not find Identity Provider identifier in environment" msgstr "" "No se ha podido encontrar el identificador del proveedor de identidad en el " "entorno" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "No se ha podido correlacionar ninguna propiedad de usuario federado a valor " "de identidad. Compruebe los registros de depuración o la correlación " "utilizada para otener información más detallada." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "No se ha podido correlacionar el usuario al establecer la identidad de " "usuario efímera. Las reglas de correlación deben especificar ID/nombre de " "usuario o se debe establecer la variable de entorno REMOTE_USER." msgid "Could not validate the access token" msgstr "No se ha podido validar la señal de acceso" msgid "Credential signature mismatch" msgstr "Discrepancia en la firma de credencial" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Inhabilitando una entidad donde el atributo 'enable' se omite en la " "configuración." #, python-format msgid "Domain cannot be named %s" msgstr "El dominio no se puede llamar %s" #, python-format msgid "Domain cannot have ID %s" msgstr "El dominio no puede tener el ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "El dominio está inhabilitado: %s" msgid "Domain name cannot contain reserved characters." msgstr "El nombre de dominio no puede contener caracteres reservados." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Dominio: %(domain)s ya tiene definida una configuración - ignorando el " "archivo: %(file)s." #, python-format msgid "Duplicate ID, %s." msgstr "ID duplicado, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Entrada duplicada: %s" #, python-format msgid "Duplicate name, %s." msgstr "Nombre duplicado, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID remoto duplicado: %s" msgid "EC2 access key not found." msgstr "No se ha encontrado la clave de acceso de EC2." msgid "EC2 signature not supplied." msgstr "No se ha proporcionado la firma de EC2." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "" "No se ha encontrado el punto final %(endpoint_id)s en el proyecto " "%(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "" "No se ha encontrado la asociación del proyecto del grupo de puntos finales" msgid "Ensure configuration option idp_entity_id is set." msgstr "" "Compruebe que se haya establecido la opción de configuración idp_entity_id." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Compruebe que se haya establecido la opción de configuración " "idp_sso_endpoint." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Error al analizar el archivo de configuración para el dominio: %(domain)s, " "archivo: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Error al abrir el archivo %(path)s: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Error al analizar las reglas %(path)s: %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Se ha superado el número máximo de intentos de registrar un dominio " "%(domain)s para utilizar el controlador SQL, el último dominio que parece " "haberlo tenido es %(last_domain)s, abandonando" #, python-format msgid "Expected dict or list: %s" msgstr "Se espera un diccionario o una lista: %s" msgid "Failed to validate token" msgstr "Ha fallado la validación del token" msgid "Federation token is expired" msgstr "La señal de federación ha caducado" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "El campo \"remaining_uses\" está establecido en %(value)s, pero no debe " "estar establecido para poder redelegar una confianza" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "El grupo %(group)s no se admite para las configuraciones específicas de " "dominio" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "El grupo %(group_id)s devuelto por la correlación %(mapping_id)s no se ha " "encontrado en el programa de fondo." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "" "No se ha encontrado el ID de atributo %(id_attr)s en el objeto LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "El proveedor de identidad %(idp)s está inhabilitado" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "No se ha incluido el identificador del proveedor de identidad de entrada " "entre los identificadores aceptados." msgid "Invalid EC2 signature." msgstr "Firma de EC2 no válida." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "Opción de LDAP TLS no válida: %(option)s. Elegir uno de: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Opción LDAP TLS_AVAIL inválida: %s. TLS no disponible" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "Opción deref LDAP no válida: %(option)s. Elija una de: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "Ámbito LDAP incorrecto: %(scope)s. Selecciones una de las siguientes " "opciones: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinación TLS/LDAPS no válida" msgid "Invalid blob in credential" msgstr "Blob no válido en credencial" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nombre de dominio no válido: %(domain)s encontrado en el nombre de archivo " "de configuración: %(file)s - ignorando este archivo." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Regla no válida: %(identity_value)s. Se deben especificar las palabras clave " "'grupos' y 'dominio ." msgid "Invalid signature" msgstr "Firma no válida" msgid "Invalid user / password" msgstr "Usuario / contraseña no válidos" msgid "Invalid username or TOTP passcode" msgstr "Nombre de usuario o código de acceso TOTP no válido" msgid "Invalid username or password" msgstr "Usuario o contraseña no válidos" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Longitud del ID de recurso transformable > 64, que es el número máximo de " "caracteres permitidos" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "La sección local de la correlación %(mapping_id)s hace referencia a una " "coincidencia remota que no existe (p.e. {0} en una sección local)." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "URL de punto final formado incorrectamente (%(endpoint)s), vea el registro " "de ERROR para obtener detalles." #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Se ha alcanzado la profundidad máxima de jerarquía en la rama %s." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "El miembro %(member)s ya es miembro del grupo %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Método no invocable: %s" msgid "Missing entity ID from environment" msgstr "Falta el ID de entidad del entorno" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "La modificación de \"redelegation_count\" tras la redelegación está " "prohibida. Se recomienda omitir este parámetro." msgid "Multiple domains are not supported" msgstr "No se admiten varios dominios" msgid "Must specify either domain or project" msgstr "Debe especificar dominio o proyecto" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "No se ha proporcionado el ID de dominio de proyecto ni el nombre de dominio " "de proyecto." msgid "No authenticated user" msgstr "Ningún usuario autenticado " msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "No se han encontrado claves de cifrado; ejecute keystone-manage fernet_setup " "para el programa de arranque uno." msgid "No options specified" msgstr "No se especificaron opciones" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "No hay ninguna política asociada con el punto final %(endpoint_id)s." msgid "No token in the request" msgstr "No hay ningún token en la solicitud" msgid "One of the trust agents is disabled or deleted" msgstr "Uno de los agentes de confianza está inhabilitado o se ha suprimido" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Se ha encontrado la opción %(option)s sin grupo especificado al comprobar la " "solicitud de configuración del dominio" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "La opción %(option)s del grupo %(group)s no se admite para las " "configuraciones específicas del dominio" msgid "Project field is required and cannot be empty." msgstr "El campo de proyecto es obligatorio y no puede estar vacío." #, python-format msgid "Project is disabled: %s" msgstr "El proyecto está inhabilitado: %s" msgid "Project name cannot contain reserved characters." msgstr "El nombre de proyecto no puede contener caracteres reservados." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "No se da soporte para leer el valor predeterminado para la opción %(option)s " "del grupo %(group)s" msgid "Redelegation allowed for delegated by trust only" msgstr "Sólo se permite volver a delegar un delegado por confianza" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "La profundidad de redelegación restante de %(redelegation_depth)d está fuera " "del rango permitido de [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "La solicitud debe tener un parámetro de consulta de origen" msgid "Request token is expired" msgstr "El token solicitado ha expirado" msgid "Request token not found" msgstr "No se ha encontrado el token solicitado" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "El tiempo de caducidad solicitado es mayor que el que puede proporcionar la " "confianza redelegada" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "La profundidad de redelegación solicitada de %(requested_count)d es mayor " "que la permitida %(max_count)d" msgid "Scoping to both domain and project is not allowed" msgstr "El ámbito para dominio y proyecto no está permitido" msgid "Scoping to both domain and trust is not allowed" msgstr "El ámbito para dominio y confianza no está permitido" msgid "Scoping to both project and trust is not allowed" msgstr "El ámbito para proyecto y confianza no está permitido" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "El proveedor de servicios %(sp)s está inhabilitado" msgid "Some of requested roles are not in redelegated trust" msgstr "Algunos roles solicitados no están en la confianza redelegada" msgid "Specify a domain or project, not both" msgstr "Especifique un dominio o proyecto, no ambos" msgid "Specify a user or group, not both" msgstr "Especifique un usuario o grupo, no ambos" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' no debe ser antes que ahora. El servidor podría no cumplir la " "solicitud porque tiene un formato incorrecto o es incorrecta de alguna otra " "forma. Se supone que el cliente es erróneo." msgid "The --all option cannot be used with the --domain-name option" msgstr "La opción --all no se puede utilizar con la opción --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "El archivo de configuración de Keystone %(config_file)s no se ha podido " "encontrar." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "La configuración específica del dominio Keystone ha especificado más de un " "controlador SQL (sólo se permite uno): %(source)s." msgid "The action you have requested has not been implemented." msgstr "La acción que ha solicitado no ha sido implemento" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "La longitud de la contraseña debe ser menor o igual que %(size)i. El " "servidor no pudo cumplir la solicitud porque la contraseña no es válida." msgid "The request you have made requires authentication." msgstr "La solicitud que ha hecho requiere autenticación." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "La llamada de revocación debe tener un id_dominio y un id_proyecto. Esto es " "un error del servidor de Keystone. La solicitud actual ha terminado " "anormalmente." msgid "The service you have requested is no longer available on this server." msgstr "El servicio que ha solicitado ya no está disponible en este servidor." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "La región padre %(parent_region_id)s especificada crearía una jerarquía de " "regiones circular." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "El valor de grupo %(group)s especificado en la configuración debe ser un " "diccionario de opciones" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Esta no es una versión de carga útil Fernet reconocida: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "La indicación de fecha y hora no está en el formato esperado. El servidor no " "ha podido satisfacer la solicitud porque tiene un formato incorrecto o es " "incorrecta de alguna otra forma. Se supone que el cliente es erróneo." msgid "Token version is unrecognizable or unsupported." msgstr "Versión de la señal no reconocida o no soportada." msgid "Trustee has no delegated roles." msgstr "La entidad de confianza no tiene roles delegados." msgid "Trustor is disabled." msgstr "Trustor está deshabilitado." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Intentando actualizar el grupo %(group)s, para que ese, y sólo ese grupo se " "especifique en la configuración" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Intentando actualizar la opción %(option)s en el grupo %(group)s, pero la " "configuración proporcionada contiene la opción %(option_other)s en su lugar" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Intentando actualizar la opción %(option)s en el grupo %(group)s, para que " "esa, y solo esa opción, se especifique en la configuración" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "No se puede acceder a la base de datos de keystone, compruebe si está " "configurada correctamente." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "No se puede suprimir la región %(region_id)s porque sus regiones secundarias " "tienen puntos finales asociados." #, python-format msgid "Unable to locate domain config directory: %s" msgstr "No se ha podido localizar el directorio config de dominio: %s" #, python-format msgid "Unable to lookup user %s" msgstr "No se ha podido buscar el usuario %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "No se puede reconciliar el atributo de identidad %(attribute)s porque tiene " "los valores en conflicto %(new)s y %(old)s" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Se ha encontrado un tipo de asignación inesperado, %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Estado inesperado solicitado para la respuesta de JSON Home, %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Dominio desconocido '%(name)s' especificado por --domain-name" msgid "Update of `domain_id` is not allowed." msgstr "No se permite la actualización de `domain_id`." msgid "Update of `is_domain` is not allowed." msgstr "No se permite la actualización de `is_domain`." msgid "Update of `parent_id` is not allowed." msgstr "No se permite la actualización de `parent_id`." #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "El usuario %(user_id)s no tiene acceso al Dominio %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "El usuario %(user_id)s no tiene acceso al proyecto %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "El usuario %(user_id)s ya es miembro del grupo %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Usuario '%(user_id)s' no encontrado en el grupo '%(group_id)s'" msgid "User IDs do not match" msgstr "ID de usuario no coinciden" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "No se puede crear la autorización de usuario porque falta el ID de usuario o " "el nombre de usuario con el ID de dominio, o el nombre de usuario con el " "nombre de dominio." #, python-format msgid "User is disabled: %s" msgstr "El usuario está inhabilitado: %s" msgid "User is not a trustee." msgstr "El usuario no es de confianza." #, python-format msgid "User type %s not supported" msgstr "El tipo de usuario %s no está soportado" msgid "You are not authorized to perform the requested action." msgstr "No está autorizado para realizar la acción solicitada." #, python-format msgid "You are not authorized to perform the requested action: %(action)s." msgstr "No está autorizado para realizar la acción solicitada: %(action)s." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Ha intentado crear un recurso utilizando el token de administración. Dado " "que este token no se encuentra dentro de un dominio, debe incluir " "explícitamente un dominio al que pertenecerá este recurso." msgid "any options" msgstr "cualquier opción" msgid "auth_type is not Negotiate" msgstr "auth_type no es Negotiate" msgid "authorizing user does not have role required" msgstr "el usuario de autorización no tiene la función requerida" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "No se puede crear un proyecto en una rama que contiene un proyecto " "inhabilitado: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "no se puede suprimir un proyecto habilitado que actúe como dominio. " "Inhabilite el proyecto %s." #, python-format msgid "group %(group)s" msgstr "grupo %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "no se permite tener dos proyectos actuando como dominios con el mismo " "nombre: %s" msgid "only root projects are allowed to act as domains." msgstr "Sólo los proyectos raíz pueden actuar como dominios." #, python-format msgid "option %(option)s in group %(group)s" msgstr "opción %(option)s en el grupo %(group)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses debe ser un entero positivo o nulo." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "remaining_uses no se debe establecer si se permite la redelegación" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "solicitud para actualizar el grupo %(group)s, pero la configuración " "proporcionada contiene el grupo %(group_other)s en su lugar" msgid "rescope a scoped token" msgstr "Volver a establecer el ámbito de una señal con ámbito" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "Se debe especificar scope.project.id si se especifica también include_subtree" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "No se ha encontrado o no es un directorio tls_cacertdir %s" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "No se ha encontrado o no es un fichero tls_cacertfile %s" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/fr/0000775000175000017500000000000000000000000017172 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000020757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/fr/LC_MESSAGES/keystone.po0000664000175000017500000007426400000000000023175 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Fries , 2014 # Maxime COQUEREL , 2014 # Andrew Melim , 2014 # Olivier Perrin , 2013 # Olivier Perrin , 2013 # Rémi Le Trocquer , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 06:34+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: French\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "Le nom %(entity)s ne peut pas contenir les caractères réservés suivants : " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s n'est pas un événement de notification valide, doit être l'une des " "options suivantes : %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s n'est pas un hôte de tableau de bord digne de confiance" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s ne permet pas les migrations de base de données. Le chemin du " "référentiel de migration %(path)s n'existe pas ou n'est pas un répertoire." #, python-format msgid "%s field is required and cannot be empty" msgstr "La zone %s est obligatoire et ne peut pas être vide" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Désactivez le mode insecure_debug pour supprimer ces détails.)" msgid "--all option cannot be mixed with other options" msgstr "-all option ne peut pas être mélanger avec d'autres options." msgid "A project-scoped token is required to produce a service catalog." msgstr "Un jeton de projet est requis pour produire un catalogue de service." msgid "Access token is expired" msgstr "Token d'accès est expiré" msgid "Access token not found" msgstr "Token d'accès non trouvé" msgid "Additional authentications steps required." msgstr "Authentifications étapes supplémentaires sont nécessaires ." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Une erreur inattendue est survenue lors de l'extraction des configurations " "de domaine" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "" "Une erreur inattendue est survenue lors de la tentative de stockage de %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "Une erreur inattendue a empêché le serveur de traiter votre requête." msgid "At least one option must be provided" msgstr "Au moins une option doit être fourni" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "Au moins une option doit être indiquée. Utilisez --all ou --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "Tentative d'authentification avec une méthode non prise en charge ." msgid "Authentication plugin error." msgstr "Erreur d'authentification du plugin." msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Impossible d'autoriser un jeton de requête avec un jeton émis par " "l'intermédiaire de la délégation." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Impossible de modifier %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Ne peut pas changer l'identifiant du domaine" msgid "Cannot change user ID" msgstr "Impossible de modifier l'id de l'utilisateur" msgid "Cannot change user name" msgstr "Impossible de changer le nom d'utilisateur" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Impossible de supprimer un domaine activé, veuillez d'abord le désactiver." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossible de supprimer le projet %(project_id)s car son sous-arbre contient " "des projets activés." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Impossible de supprimer le projet %s car il ne s'agit pas d'une feuille dans " "la hiérarchie. Utilisez l'option cascade si vous voulez supprimer un sous-" "arbre complet." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossible de désactiver le projet %(project_id)s car son sous-arbre " "contient des projets activés." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "Impossible d'activer le projet %s car ses parents sont désactivés" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Impossible de répertorier les affectations en provenance de groupes et " "filtrées par ID utilisateur." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Impossible de répertorier des jetons de requête avec un jeton émis par " "l'intermédiaire de la délégation." #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Impossible de retirer le rôle qui n'est pas accordé, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Impossible de tronquer un appel de pilote sans avoir hints list comme " "premier paramètre après self " msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Impossible d'utiliser les paramètres d'interrogation parents_as_list et " "parents_as_ids en même temps." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Impossible d'utiliser les paramètres d'interrogation subtree_as_list et " "subtree_as_ids en même temps." msgid "Cascade update is only allowed for enabled attribute." msgstr "La mise à jour en cascade n'est autorisée que pour l'attribut activé." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Le fait de combiner un filtre effectif et un filtre de groupes donnera " "toujours une liste vide." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Le fait de combiner des filtres effectifs, de domaine et hérités donnera " "toujours une liste vide." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entité Config API à /domains/%s/config" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "ID de région contradictoires indiqués : \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Client non trouvé" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Impossible de déterminer l'ID du fournisseur d'identité. L'option de " "configuration %(issuer_attribute)s est introuvable dans l'environnement de " "demande." msgid "Could not find Identity Provider identifier in environment" msgstr "" "L'identificateur de fournisseur d'identité est introuvable dans " "l'environnement." msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Impossible de mapper des propriétés d'utilisateur fédéré avec des valeurs " "d'identité. Pour plus d'informations, consultez les journaux de débogage ou " "le mappage utilisé." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Impossible de mapper l'utilisateur lors de la définition de l'identité " "utilisateur éphémère. des règles de mappage doivent spécifier ID utilisateur/" "nom ou la variable d'environnement REMOTE_USER doit être définie." msgid "Could not validate the access token" msgstr "Ne peut pas valider l'acces du token" msgid "Credential signature mismatch" msgstr "Signature des données d'identification non concordante" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Désactivation d'une entité dont l'attribut 'enable' est ignoré par la " "configuration." #, python-format msgid "Domain cannot be named %s" msgstr "Le domaine ne peut pas s'appeler %s" #, python-format msgid "Domain cannot have ID %s" msgstr "Le domaine ne peut pas posséder l'ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "Domaine désactivé : %s" msgid "Domain name cannot contain reserved characters." msgstr "Le nom du domaine ne peut pas contenir des caractères réservés." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Le domaine : %(domain)s possède déjà une configuration définie - ce fichier " "sera ignoré : %(file)s." #, python-format msgid "Duplicate ID, %s." msgstr "ID en double, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Entrée en double : %s" #, python-format msgid "Duplicate name, %s." msgstr "Nom en double, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID distant en double : %s" msgid "EC2 access key not found." msgstr "Clé d'accès EC2 non trouvée." msgid "EC2 signature not supplied." msgstr "Signature EC2 non fournie." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Noeud final %(endpoint_id)s introuvable dans le projet %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Association de projets du groupe de points finals introuvable" msgid "Ensure configuration option idp_entity_id is set." msgstr "Assurez-vous que l'option de configuration idp_entity_id est définie." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Assurez-vous que l'option de configuration idp_sso_endpoint est définie." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Erreur lors de l'analyse syntaxique du fichier de configuration pour le " "domaine : %(domain)s, fichier : %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Erreur lors de l'ouverture du fichier %(path)s : %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Erreur lors de l'analyse des règles %(path)s : %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Nombre de tentatives d'enregistrement du domaine %(domain)s dépassé pour " "utiliser le pilote SQL, le dernier domaine qui semble l'avoir eu est " "%(last_domain)s, abandon..." #, python-format msgid "Expected dict or list: %s" msgstr "Type dictionnaire ou liste attendu: %s" msgid "Failed to validate token" msgstr "Echec de validation du token" msgid "Federation token is expired" msgstr "La fédération du toke est expiré" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "La zone \"remaining_uses\" est définie sur %(value)s alors qu'elle ne doit " "pas être définie pour redéléguer une fiducie" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Le groupe %(group)s n'est pas pris en charge pour les configurations " "spécifiques au domaine" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Groupe %(group_id)s renvoyé par le mappage %(mapping_id)s introuvable dans " "le backend." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "L'attribut ID %(id_attr)s est introuvable dans l'objet LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Le fournisseur d'identité %(idp)s est désactivé" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "L'identificateur entrant du fournisseur d'identité ne fait pas partie des " "identificateurs acceptés." msgid "Invalid EC2 signature." msgstr "Signature EC2 non valide." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Option de certificat TLS LDAP non valide : %(option)s. Choisissez l'une des " "options : %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Mauvaise option LDAP TLS_AVAIL: %s. TLS n'est pas disponible" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Option déréférencée LDAP non valide : %(option)s. Choisir l'une des options " "suivantes : %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Portée LDAP invalide: %(scope)s. Choisissez parmi: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinaison TLS / LDAPS invalide" msgid "Invalid blob in credential" msgstr "Blob non valide dans les informations d'identification" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nom de domaine non valide : %(domain)s trouvé dans le nom du fichier de " "configuration : %(file)s - ce fichier sera ignoré." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Règle non valide : %(identity_value)s. Les mots clés 'groups' et 'domain' " "doivent être spécifiés." msgid "Invalid signature" msgstr "Signature non valide" msgid "Invalid user / password" msgstr "Login / Mot de passe non valide" msgid "Invalid username or TOTP passcode" msgstr "Nom d'utilisateur ou code d'authentification TOTP non valide" msgid "Invalid username or password" msgstr "Nom d'utilisateur ou mot de passe invalide" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Longueur de l'ID de ressource transformable > 64 (nombre maximal de " "caractères autorisé)" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "La section locale dans le mappage %(mapping_id)s fait référence à une " "correspondance à distance qui n'existe pas (par ex. {0} dans une section " "locale)." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Un caractère est mal formé dans URL (%(endpoint)s), regarder le log d'erreur " "pour plus de détails." #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "La profondeur maximale de hiérarchie est atteinte pour la branche %s." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Le membre %(member)s est déjà membre du groupe %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Impossible d'appeler la méthode %s" msgid "Missing entity ID from environment" msgstr "IP d'entité manquant de l'environnement" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "La modification de \"redelegation_count\" lors de la redélégation est " "interdite. Il est conseillé d'omettre ce paramètre." msgid "Multiple domains are not supported" msgstr "Les multiples domaines ne sont pas supporté" msgid "Must specify either domain or project" msgstr "Indiquer obligatoirement un domaine ou un projet" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "Aucun ID ou nom de domaine de projet n'a été fourni." msgid "No authenticated user" msgstr "Aucun utilisateur authentifié" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Aucune clé de chiffrement trouvée ; exécutez keystone-manage fernet_setup " "pour en amorcer une." msgid "No options specified" msgstr "Aucune option spécifiée" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Aucune règle n'est associée au point final %(endpoint_id)s." msgid "No token in the request" msgstr "Aucun jeton dans la demande" msgid "One of the trust agents is disabled or deleted" msgstr "L'un des agents de confiance est désactivé ou supprimé" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Option %(option)s trouvée avec aucun groupe spécifié lors de la vérification " "de la demande de configuration du domaine" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "L'option %(option)s dans le groupe %(group)s n'est pas prise en charge pour " "les configurations spécifiques au domaine" msgid "Project field is required and cannot be empty." msgstr "La zone Projet est requise et ne doit pas être vide." #, python-format msgid "Project is disabled: %s" msgstr "Projet désactivé : %s" msgid "Project name cannot contain reserved characters." msgstr "Le nom du projet ne peut pas contenir des caractères réservés." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "La lecture de la valeur par défaut pour l'option %(option)s dans le groupe " "%(group)s n'est pas prise en charge" msgid "Redelegation allowed for delegated by trust only" msgstr "Redélégation autorisée pour une délégation par fiducie uniquement" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Profondeur de redélégation restante %(redelegation_depth)d par rapport à la " "plage admise [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "La demande doit avoir un paramètre de requête d'origine" msgid "Request token is expired" msgstr "La requete du token est expiré" msgid "Request token not found" msgstr "Token de requete non trouvé" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Le délai d'expiration demandé dépasse celui que la fiducie redéléguée peut " "fournir" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "La profondeur de redélégation demandée %(requested_count)d est supérieure à " "la limite autorisée %(max_count)d" msgid "Scoping to both domain and project is not allowed" msgstr "La configuration du domaine et du projet n'est pas autorisée" msgid "Scoping to both domain and trust is not allowed" msgstr "" "La configuration du domaine et du certificat de confiance n'est pas autorisée" msgid "Scoping to both project and trust is not allowed" msgstr "" "La configuration du projet et du certificat de confiance n'est pas autorisée" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Le fournisseur de services %(sp)s est désactivé" msgid "Some of requested roles are not in redelegated trust" msgstr "Certains rôles demandés ne font pas partie de la fiducie redéléguée" msgid "Specify a domain or project, not both" msgstr "Spécifier un domaine ou un projet, pas les deux" msgid "Specify a user or group, not both" msgstr "Spécifier un utilisateur ou groupe, pas les deux" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "La valeur 'expires_at' ne doit pas être située dans le passé. Le serveur n'a " "pas pu exécuter la demande vu qu'elle est mal formée ou incorrecte. Le " "client est considéré comme étant à l'état d'erreur." msgid "The --all option cannot be used with the --domain-name option" msgstr "L'option --all ne peut pas être utilisée avec l'option --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "Le fichier de configuration Keystone %(config_file)s ne peut pas être trouvé." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "La configuration spécifique au domaine keystone a spécifié plusieurs pilotes " "SQL (un seul est autorisé) : %(source)s." msgid "The action you have requested has not been implemented." msgstr "L'action que vous avez demandée n'a pas été implémentée." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "La longueur du mot de passe doit être inférieure ou égale à %(size)i. n'est " "pas conforme à la demande car le mot de passe est incorrect." msgid "The request you have made requires authentication." msgstr "La demande que vous avez fait requiert une authentification." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "L'appel de révocation ne doit pas contenir à la fois domain_id et " "project_id. Il s'agit d'un bogue dans le serveur Keystone. La demande en " "cours est abandonnée." msgid "The service you have requested is no longer available on this server." msgstr "Le service que vous avez demandé n'est plus disponible sur ce serveur." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "La région parent spécifiée %(parent_region_id)s risque de créer une " "hiérarchie de région circulaire." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "La valeur du groupe %(group)s spécifié dans la configuration doit être un " "dictionnaire d'options" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Il ne s'agit pas d'une version de contenu Fernet reconnue : %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Horodatage n'est pas au format attendu. Le serveur n'a pas pu se conformer à " "la demande car elle est incorrectement formée ou incorrecte. Le client est " "considéré comme étant à l'état d'erreur." msgid "Token version is unrecognizable or unsupported." msgstr "Version de jeton non reconnue ou non prise en charge." msgid "Trustee has no delegated roles." msgstr "Le fiduciaire n'a aucun rôle délégué." msgid "Trustor is disabled." msgstr "Trustor est désactivé. " #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Tentative de mise à jour du groupe %(group)s, de sorte que le groupe soit " "spécifié dans la configuration uniquement" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Tentative de mise à jour de l'option %(option)s dans le groupe %(group)s, " "mais la configuration fournie contient l'option %(option_other)s à la place" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Tentative de mise à jour de l'option %(option)s dans le groupe %(group)s, de " "sorte que l'option soit spécifiée dans la configuration uniquement" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Impossible d'accéder à la base de données keystone, vérifiez qu'elle est " "configurée correctement." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Impossible de supprimer la région %(region_id)s car la région ou ses régions " "enfant ont des noeuds finals associés." #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Impossible de localiser le répertoire de configuration domaine: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Impossible de rechercher l'utilisateur %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Impossible de rapprocher l'attribut d'identité %(attribute)s car il possède " "des valeurs en conflit : %(new)s et %(old)s" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Type inattendu d'affectation, %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Statut inattendu demandé pour la réponse JSON Home, %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Domaine inconnu '%(name)s' spécifié par --domain-name" msgid "Update of `domain_id` is not allowed." msgstr "La mise à jour de `domain_id` n'est pas autorisée." msgid "Update of `is_domain` is not allowed." msgstr "La mise à jour de `is_domain` n'est pas autorisée." msgid "Update of `parent_id` is not allowed." msgstr "La mise à jour de `parent_id` est interdite." #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "L'utilisateur %(user_id)s n'a pas accès au domaine %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "L'utilisateur %(user_id)s n'a pas accès au projet %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "L'utilisateur %(user_id)s est déjà membre du groupe %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Utilisateur '%(user_id)s' non trouvé dans le groupe '%(group_id)s'" msgid "User IDs do not match" msgstr "Les ID utilisateur ne correspondent pas." msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "L'authentification utilisateur ne peut pas être créée en raison de l'absence " "d'un ID, utilisateur, d'un nom d'utilisateur avec ID de domaine ou d'un nom " "utilisateur avec nom de domaine." #, python-format msgid "User is disabled: %s" msgstr "Utilisateur désactivé : %s" msgid "User is not a trustee." msgstr "L'utilisateur n'est pas administrateur." #, python-format msgid "User type %s not supported" msgstr "Type d'utilisateur %s non pris en charge" msgid "You are not authorized to perform the requested action." msgstr "Vous n'êtes pas autorisé à effectuer l'action demandée" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Vous avez essayé de créer une ressource à l'aide du jeton admin. Comme ce " "jeton ne figure pas dans un domaine, vous devez inclure explicitement un " "domaine auquel cette ressource doit appartenir." msgid "any options" msgstr "toute option" msgid "auth_type is not Negotiate" msgstr "auth_type n'est pas négocié" msgid "authorizing user does not have role required" msgstr "un rôle est facultatif pour l'utilisateur d'autorisation" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "Impossible de créer un projet dans une branche qui contient un projet " "désactivé : %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "impossible de supprimer un projet activé faisant office de domaine. Veuillez " "d'abord désactiver le projet %s." #, python-format msgid "group %(group)s" msgstr "groupe %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "il n'est pas autorisé d'avoir deux projets faisant office de domaines avec " "le même nom : %s" msgid "only root projects are allowed to act as domains." msgstr "seuls les projets racine sont autorisés à faire office de domaines." #, python-format msgid "option %(option)s in group %(group)s" msgstr "option %(option)s dans le groupe %(group)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses doit être un entier positif ou nul." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses ne doit pas être défini si la redélégation est autorisée" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "demande de mise à jour du groupe %(group)s, mais la configuration fournie " "contient le groupe %(group_other)s à la place" msgid "rescope a scoped token" msgstr "Redéfinir la portée d'un jeton" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id doit être spécifié si include_subtree est également spécifié" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s introuvable ou n'est pas un répertoire" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s introuvable ou n'est pas un fichier" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/it/0000775000175000017500000000000000000000000017177 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/it/LC_MESSAGES/0000775000175000017500000000000000000000000020764 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/it/LC_MESSAGES/keystone.po0000664000175000017500000007277500000000000023207 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 06:26+0000\n" "Last-Translator: Copied by Zanata \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Italian\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "Il nome %(entity)s non può contenere caratteri riservati: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s non è un evento di notifica valido, deve essere uno tra: " "%(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s non è un host di dashboard attendibile" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s non fornisce le migrazioni del database. Il percorso del " "repository di migrazione in %(path)s non esiste o non è una directory." #, python-format msgid "%s field is required and cannot be empty" msgstr "Il campo %s è obbligatorio e non può essere vuoto" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "" "(Disabilitare la modalità insecure_debug per eliminare questi dettagli)." msgid "--all option cannot be mixed with other options" msgstr "--l'opzione all non può essere combinata con altre opzioni" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "È necessario un token in ambito progetto per produrre un catalogo del " "servizio." msgid "Access token is expired" msgstr "Il token di accesso è scaduto" msgid "Access token not found" msgstr "Token di accesso non trovato" msgid "Additional authentications steps required." msgstr "Sono richiesti ulteriori passi per le autenticazioni." msgid "An unexpected error occurred when retrieving domain configs" msgstr "" "Si è verificato un errore non previsto durante il richiamo delle " "configurazioni del dominio" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Si è verificato un errore quando si tenta di archiviare %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "" "Si è verificato un errore non previsto che ha impedito al server di " "soddisfare la richiesta." msgid "At least one option must be provided" msgstr "È necessario fornire almeno un'opzione" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "È necessario fornire almeno un'opzione, utilizzare --all o --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "Tentativo di autenticazione con un metodo non supportato." msgid "Authentication plugin error." msgstr "errore di autenticazione plugin." msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Impossibile autorizzare un token di richiesta con un token emesso mediante " "delega." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Impossibile modificare %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Impossibile modificare l'ID dominio" msgid "Cannot change user ID" msgstr "Impossibile modificare l'ID utente" msgid "Cannot change user name" msgstr "Impossibile modificare il nome utente" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Impossibile eliminare un dominio abilitato; è necessario prima disabilitarlo." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossibile eliminare il progetto %(project_id)s perché la relativa " "struttura ad albero secondaria contiene progetti abilitati." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Impossibile eliminare il progetto %s perché non è una foglia nella " "gerarchia. Se si desidera eliminare un'intera struttura ad albero secondaria " "utilizza l'opzione a catena." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Impossibile disabilitare il progetto %(project_id)s perché la relativa " "struttura ad albero secondaria contiene progetti abilitati." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "Impossibile abilitare il progetto %s perché dispone di elementi parent " "disabilitati" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Impossibile elencare le assegnazione originate da gruppi e filtrate da ID " "utente." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Impossibile elencare i token della richiesta con un token emesso mediante " "delega." #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Impossibile rimuovere un ruolo che non è stato concesso, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Impossibile troncare una chiamata al driver senza hints list come primo " "parametro dopo self " msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Impossibile utilizzare i parametri della query parents_as_list e " "parents_as_ids contemporaneamente." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Impossibile utilizzare i parametri della query subtree_as_list e " "subtree_as_ids contemporaneamente." msgid "Cascade update is only allowed for enabled attribute." msgstr "L'aggiornamento a catena è consentito solo per un attributo abilitato." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "La combinazione del filtro operativo e di gruppo avrà sempre come risultato " "un elenco vuoto." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "La combinazione di filtri operativi, di dominio ed ereditati avrà sempre " "come risultato un elenco vuoto." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entità API config in /domains/%s/config" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "Sono stati specificati ID regione in conflitto: \"%(url_id)s\" != " "\"%(ref_id)s\"" msgid "Consumer not found" msgstr "Consumer non trovato" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Impossibile determinare l'ID del provider di identità. L'opzione di " "configurazione %(issuer_attribute)s non è stata trovata nell'ambiente di " "richiesta. " msgid "Could not find Identity Provider identifier in environment" msgstr "" "Impossibile trovare l'identificativo del provider identità nell'ambiente" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Impossibile associare le proprietà dell'utente federato per identificare i " "valori. Controllare i log di debug o l'associazione utilizzata per ulteriori " "dettagli." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Impossibile associare l'utente durante l'impostazione dell'identità utente " "temporanea. Le regole di associazione devono specificare nome/id utente o la " "variabile di ambiente REMOTE_USER deve essereimpostata." msgid "Could not validate the access token" msgstr "Impossibile convalidare il token di accesso" msgid "Credential signature mismatch" msgstr "Mancata corrispondenza della firma delle credenziali" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Disabilitazione di un'entità in cui l'attributo 'enable' è ignorato dalla " "configurazione." #, python-format msgid "Domain cannot be named %s" msgstr "Il dominio non può essere denominato %s" #, python-format msgid "Domain cannot have ID %s" msgstr "Il dominio non può avere l'ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "Il dominio è disabilitato: %s" msgid "Domain name cannot contain reserved characters." msgstr "Il nome dominio non può contenere caratteri riservati." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Il dominio: %(domain)s dispone già di una configurazione definita - si sta " "ignorando il file: %(file)s." #, python-format msgid "Duplicate ID, %s." msgstr "ID duplicato, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Voce duplicata: %s" #, python-format msgid "Duplicate name, %s." msgstr "Nome duplicato, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID remoto duplicato: %s" msgid "EC2 access key not found." msgstr "Chiave di accesso EC2 non trovata." msgid "EC2 signature not supplied." msgstr "Firma EC2 non fornita." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Endpoint %(endpoint_id)s non trovato nel progetto %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Associazione al progetto del gruppo di endpoint non trovata" msgid "Ensure configuration option idp_entity_id is set." msgstr "" "Accertarsi che l'opzione di configurazione idp_entity_id sia impostata." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "Accertarsi che l'opzione di configurazione idp_sso_endpoint sia impostata." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Errore durante l'analisi del file di configurazione per il dominio: " "%(domain)s, file: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Errore durante l'apertura del file %(path)s: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Errore durante l'analisi delle regole %(path)s: %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Superato il numero di tentativi per registrare il dominio %(domain)s al fine " "di utilizzare il driver SQL, l'ultimo dominio che sembra avere avuto quel " "driver è %(last_domain)s, operazione terminata" #, python-format msgid "Expected dict or list: %s" msgstr "Previsto dict o list: %s" msgid "Failed to validate token" msgstr "Impossibile convalidare il token" msgid "Federation token is expired" msgstr "Il token comune è scaduto" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "Il campo \"remaining_uses\" è impostato su %(value)s mentre non deve essere " "impostato per assegnare una nuova delega ad un trust" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Il gruppo %(group)s non è supportato per le configurazioni specifiche del " "dominio" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Il gruppo %(group_id)s restituito dall'associazione %(mapping_id)s non è " "stato trovato nel backend." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "Attributo ID %(id_attr)s non trovato nell'oggetto LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Il provider identità %(idp)s è disabilitato" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "L'identificativo del provider identità in entrata non è incluso tra gli " "identificativi accettati." msgid "Invalid EC2 signature." msgstr "Firma EC2 non valida." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Opzione certificazioni (certs) LDAP TLS non valida: %(option)s. Scegliere " "una delle seguenti: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Opzione LDAP TLS_AVAIL non valida: %s. TLS non disponibile" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Opzione deref LDAP non valida: %(option)s. Scegliere una tra: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "Ambito LDAP non valido: %(scope)s. Scegliere uno dei seguenti: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinazione TLS / LDAPS non valida" msgid "Invalid blob in credential" msgstr "Blob non valido nella credenziale" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nome dominio non valido: %(domain)s trovato nel nome file di configurazione: " "%(file)s - si sta ignorando questo file." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Regola non valida: %(identity_value)s. Entrambi le parole chiave 'groups' e " "'domain' devono essere specificate." msgid "Invalid signature" msgstr "Firma non valida" msgid "Invalid user / password" msgstr "Utente/password non validi" msgid "Invalid username or TOTP passcode" msgstr "username o passcode TOTP non validi" msgid "Invalid username or password" msgstr "username o password non validi" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "La lunghezza dell'id risorsa trasformabile è > 64, che rappresenta il numero " "massimo di caratteri consentiti" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "La sezione locale nell'associazione %(mapping_id)s si riferisce ad una " "corrispondenza remota che non esiste (ad esempio {0} in una sezione locale)." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Url dell'endpoint non corretto (%(endpoint)s), consultare il log ERROR per " "ulteriori dettagli." #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Profondità massima della gerarchia raggiunta per il ramo %s." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Il membro %(member)s è già un membro del gruppo %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Metodo non richiamabile: %s" msgid "Missing entity ID from environment" msgstr "ID entità mancante dall'ambiente" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "La modifica di \"redelegation_count\" dopo la riassegnazione della delega " "non è consentita. Si consiglia di omettere questo parametro." msgid "Multiple domains are not supported" msgstr "Non sono supportati più domini" msgid "Must specify either domain or project" msgstr "È necessario specificare il dominio o il progetto" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "Non è stato fornito l'ID dominio progetto né il nome dominio progetto. " msgid "No authenticated user" msgstr "Nessun utente autenticato" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Nessuna chiave di codifica trovata; eseguire keystone-manage fernet_setup " "per eseguire un avvio." msgid "No options specified" msgstr "Nessuna opzione specificata" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Nessuna politica associata all'endpoint %(endpoint_id)s." msgid "No token in the request" msgstr "Nessun token nella richiesta" msgid "One of the trust agents is disabled or deleted" msgstr "Uno degli agent trust è disabilitato o eliminato" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "L'opzione %(option)s è stato trovato senza alcun gruppo specificato durante " "il controllo della richiesta di configurazione del dominio" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "L'opzione %(option)s nel gruppo %(group)s non è supportata per le " "configurazioni specifiche del dominio" msgid "Project field is required and cannot be empty." msgstr "Il campo progetto è obbligatorio e non può essere vuoto." #, python-format msgid "Project is disabled: %s" msgstr "Il progetto è disabilitato: %s" msgid "Project name cannot contain reserved characters." msgstr "Il nome progetto non può contenere caratteri riservati." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "La lettura dell'impostazione predefinita per l'opzione %(option)s nel gruppo " "%(group)s non è supportata" msgid "Redelegation allowed for delegated by trust only" msgstr "" "Assegnazione di una nuova delega consentita solo per i delegati dal trust" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "profondità di riassegnazione della delega rimanente %(redelegation_depth)d " "non compresa nell'intervallo consentito [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "La richiesta deve avere un parametro della query di origine" msgid "Request token is expired" msgstr "Il token della richiesta è scaduto" msgid "Request token not found" msgstr "token della richiesta non trovata" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Il tempo di scadenza richiesto è maggiore di quello che può essere fornito " "dal trust con delega riassegnata" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "La profondità di riassegnazione della delega richiesta %(requested_count)d è " "maggiore del valore consentito %(max_count)d" msgid "Scoping to both domain and project is not allowed" msgstr "Il controllo sia del dominio che del progetto non è consentito" msgid "Scoping to both domain and trust is not allowed" msgstr "Il controllo sia del dominio che di trust non è consentito" msgid "Scoping to both project and trust is not allowed" msgstr "Il controllo sia delprogetto che di trust non è consentito" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Il Provider del servizio %(sp)s è disabilitato" msgid "Some of requested roles are not in redelegated trust" msgstr "" "Alcuni dei ruoli richiesti non sono presenti nel trust con delega riassegnata" msgid "Specify a domain or project, not both" msgstr "Specificare un dominio o un progetto, non entrambi" msgid "Specify a user or group, not both" msgstr "Specificare un utente o un gruppo, non entrambi" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' non deve essere prima ora. Il server non è riuscito a " "rispettare larichiesta perché è in formato errato o non corretta. Il client " "viene considerato in errore." msgid "The --all option cannot be used with the --domain-name option" msgstr "L'opzione --all non può essere utilizzata con l'opzione --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "Impossibile trovare il file di configurazione Keystone %(config_file)s." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "La configurazione specifica del dominio keystone ha specificato più di un " "driver SQL (solo uno è consentito): %(source)s." msgid "The action you have requested has not been implemented." msgstr "L'azione richiesta non è stata implementata." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "La lunghezza della password deve essere minore o uguale a %(size)i. Il " "server non è in grado di soddisfare la richiesta perché la password non è " "valida." msgid "The request you have made requires authentication." msgstr "La richiesta che è stata fatta richiede l'autenticazione." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "La chiamata di revoca non deve avere entrambi domain_id e project_id. Questo " "è un bug nel server Keystone. La richiesta corrente è stata interrotta." msgid "The service you have requested is no longer available on this server." msgstr "Il servizio richiesto non è più disponibile su questo server." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "La regione parent specificata %(parent_region_id)s crea una gerarchia di " "regione circolare." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "Il valore del gruppo %(group)s specificato nella configurazione deve essere " "un dizionario di opzioni" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Questa non è una versione di payload Fernet riconosciuta: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Data/ora non nel formato previsto. Il server non è riuscito a rispettare la " "richiesta perché è in formato errato o non corretta. Il client viene " "considerato in errore." msgid "Token version is unrecognizable or unsupported." msgstr "La versione token non è riconoscibile o non supportata. " msgid "Trustee has no delegated roles." msgstr "Trustee non ha ruoli delegati." msgid "Trustor is disabled." msgstr "Trustor è disabilitato." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Tentativo di aggiornare il gruppo %(group)s, pertanto, solo quel gruppo deve " "essere specificato nella configurazione" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Tentativo di aggiornare l'opzione %(option)s nel gruppo %(group)s, ma la " "configurazione fornita contiene l'opzione %(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Tentativo di aggiornare l'opzione %(option)s nel gruppo %(group)s, pertanto, " "solo quell'opzione deve essere specificata nella configurazione" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Impossibile accedere al database del keystone, controllare se è configurato " "correttamente." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Impossibile eliminare la regione %(region_id)s perché la regione o le " "relative regioni child hanno degli endpoint associati." #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Impossibile individuare la directory config del dominio: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Impossibile eseguire la ricerca dell'utente %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Impossibile riconciliare l'attributo identity %(attribute)s poiché ha " "valori in conflitto tra i %(new)s e i %(old)s" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "È stato rilevato un tipo di assegnazione non previsto, %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Stato non previsto richiesto per la risposta JSON Home, %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Dominio sconosciuto '%(name)s' specificato da --domain-name" msgid "Update of `domain_id` is not allowed." msgstr "Aggiornamento di `domain_id` non consentito." msgid "Update of `is_domain` is not allowed." msgstr "Aggiornamento di `is_domain` non consentito." msgid "Update of `parent_id` is not allowed." msgstr "Aggiornamento di `parent_id` non consentito." #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "L'utente %(user_id)s non ha accesso al dominio %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "L'utente %(user_id)s non ha accesso al progetto %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "L'utente %(user_id)s è già membro del gruppo %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "L'utente '%(user_id)s' non è stato trovato nel gruppo '%(group_id)s'" msgid "User IDs do not match" msgstr "Gli ID utente non corrispondono" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "L'autorizzazione utente non può essere creata perché manca l'id utente o il " "nome utente con l'id dominio o il nome utente con il nome dominio. " #, python-format msgid "User is disabled: %s" msgstr "L'utente è disabilitato: %s" msgid "User is not a trustee." msgstr "L'utente non è un amministratore." #, python-format msgid "User type %s not supported" msgstr "Tipo utente %s non supportato" msgid "You are not authorized to perform the requested action." msgstr "Non si possiede l'autorizzazione per eseguire l'operazione richiesta." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Si è cercato di creare una risorsa utilizzando il token admin. Poiché questo " "token non si trova all'interno di un dominio, è necessario includere " "esplicitamente un dominio per fare in modo che questa risorsa vi appartenga." msgid "any options" msgstr "qualsiasi opzione" msgid "auth_type is not Negotiate" msgstr "auth_type non è Negotiate" msgid "authorizing user does not have role required" msgstr "l'utente per l'autorizzazione non dispone del ruolo richiesto" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "impossibile creare un progetto in un ramo che contiene un progetto " "disabilitato: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "impossibile eliminare un progetto abilitato che agisce come un dominio. " "Disabilitare prima il progetto %s." #, python-format msgid "group %(group)s" msgstr "gruppo %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "non è consentito avere due progetti che agiscono con lo stesso nome: %s" msgid "only root projects are allowed to act as domains." msgstr "Solo ai progetti root è consentito agire come domini." #, python-format msgid "option %(option)s in group %(group)s" msgstr "opzione %(option)s nel gruppo %(group)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses deve essere un numero intero positivo o nullo." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses non deve essere impostato se è consentita la riassegnazione " "della delega" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "Richiesta di aggiornamento del gruppo %(group)s, ma la configurazione " "fornita contiene il gruppo %(group_other)s" msgid "rescope a scoped token" msgstr "riassegna ambito a token con ambito" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id deve essere specificato se è specificato anche " "include_subtree" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "Impossibile trovare tls_cacertdir %s o non è una directory" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "Impossibile trovare tls_cacertfile %s o non è un file" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/ja/0000775000175000017500000000000000000000000017155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000020742 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/ja/LC_MESSAGES/keystone.po0000664000175000017500000010334000000000000023144 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Tomoyuki KATO , 2012-2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 06:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 名に以下の予約済み文字を含めることはできません: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s は有効な通知イベントではありません。%(actions)s のいずれかでなけれ" "ばなりません。" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s は信頼されたダッシュボードホストではありません" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s はデータベースマイグレーションを提供していません。%(path)s のマ" "イグレーションリポジトリーのパスが存在しないか、ディレクトリーではないかのい" "ずれかです。" #, python-format msgid "%s field is required and cannot be empty" msgstr "フィールド %s は必須フィールドであるため、空にできません" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(これらの詳細を抑制するには、insecure_debug モードを無効にします。)" msgid "--all option cannot be mixed with other options" msgstr "--all オプションを他のオプションと組み合わせて使用することはできません" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "サービスカタログを生成するには、プロジェクトにスコープが設定されたトークンが" "必要です。" msgid "Access token is expired" msgstr "アクセストークンの有効期限が切れています" msgid "Access token not found" msgstr "アクセストークンが見つかりません" msgid "Additional authentications steps required." msgstr "追加認証手順が必要です。" msgid "An unexpected error occurred when retrieving domain configs" msgstr "ドメイン設定の取得中に予期しないエラーが発生しました" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "%s の保存中に予期しないエラーが発生しました" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "予期しないエラーが発生したため、サーバーが要求を完了できませんでした。" msgid "At least one option must be provided" msgstr "少なくとも 1 つはオプションを指定する必要があります" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "少なくとも 1 つのオプションを指定する必要があります。--all または --domain-" "name を使用してください" msgid "Attempted to authenticate with an unsupported method." msgstr "サポートされていないメソッドを使用して認証を行おうとしました。" msgid "Authentication plugin error." msgstr "認証プラグインエラー。" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "委任によって発行されたトークンを使用して要求トークンを許可することはできませ" "ん。" #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "%(option_name)s %(attr)s を変更できません" msgid "Cannot change Domain ID" msgstr "ドメイン ID を変更できません" msgid "Cannot change user ID" msgstr "ユーザー ID を変更できません" msgid "Cannot change user name" msgstr "ユーザー名を変更できません" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "有効になっているドメインは削除できません。最初にそのドメインを無効にしてくだ" "さい。" #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "プロジェクト %(project_id)s はそのサブツリーに有効になっているプロジェクトが" "含まれているため削除できません。" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "プロジェクト %s は階層内の末端ではないため、削除できません。サブツリー全体を" "削除する場合、カスケードオプションを使用してください。" #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "プロジェクト %(project_id)s はそのサブツリーに有効になっているプロジェクトが" "含まれているため、無効にできません。" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "親が無効になっているプロジェクト %s は有効にできません" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "グループから取得し、ユーザー ID でフィルター処理した割り当てをリストできませ" "ん。" msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "委任によって発行されたトークンを使用して要求トークンをリストすることはできま" "せん。" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "許可されていないロールを削除できません、%s" #, fuzzy msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "セルフの後に最初のパラメーターとしてヒントリストなしでドライバー呼び出しを切" "り捨てることはできません" msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "問い合わせパラメーター parents_as_list と parents_as_ids を同時に使用すること" "はできません。" msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "問い合わせパラメーター subtree_as_list と subtree_as_ids を同時に使用すること" "はできません。" msgid "Cascade update is only allowed for enabled attribute." msgstr "カスケード更新は有効になっている属性にのみ許可されます。" #, fuzzy msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "有効フィルターとグループフィルターの組み合わせは常に空のリストになります。" #, fuzzy msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "有効フィルター、ドメインフィルター、および継承フィルターの組み合わせは常に空" "のリストになります。" #, python-format msgid "Config API entity at /domains/%s/config" msgstr "/domains/%s/config の Config API エンティティー" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "矛盾するリージョン ID が指定されました: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "コンシューマーが見つかりません" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "認証プロバイダー ID を判別できませんでした。設定オプション " "%(issuer_attribute)s が要求環境内で見つかりませんでした。" msgid "Could not find Identity Provider identifier in environment" msgstr "Identity Provider ID が環境情報内に見つかりませんでした" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "フェデレーションしたユーザープロパティーのいずれも ID 値にマップすることがで" "きませんでした。デバッグログまたは追加の詳細に使用したマッピングを確認してく" "ださい。" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "一時的なユーザー ID の設定中にユーザーをマップすることができませんでした。" "マッピング規則によってユーザー ID/ユーザー名を指定するか、REMOTE_USER 環境変" "数を設定するか、いずれかを行う必要があります。" msgid "Could not validate the access token" msgstr "アクセストークンを検証できませんでした" msgid "Credential signature mismatch" msgstr "クレデンシャルのシグニチャーが一致しません" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "「enable」属性が設定によって無視されているエンティティーを無効化中です。" #, python-format msgid "Domain cannot be named %s" msgstr "ドメインに %s という名前を付けることはできません" #, python-format msgid "Domain cannot have ID %s" msgstr "ドメインに %s という ID を付けることはできません" #, python-format msgid "Domain is disabled: %s" msgstr "ドメイン %s が無効になっています" msgid "Domain name cannot contain reserved characters." msgstr "ドメイン名に予約済み文字が含まれていてはなりません。" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "ドメイン %(domain)s には既に定義された設定があります。ファイル %(file)s は無" "視されます。" #, python-format msgid "Duplicate ID, %s." msgstr "重複した ID、%s。" #, python-format msgid "Duplicate entry: %s" msgstr "重複する項目: %s" #, python-format msgid "Duplicate name, %s." msgstr "重複した名前、%s。" #, python-format msgid "Duplicate remote ID: %s" msgstr "重複するリモート ID: %s" msgid "EC2 access key not found." msgstr "EC2 アクセスキーが見つかりません。" msgid "EC2 signature not supplied." msgstr "EC2 の署名が提供されていません。" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "" "エンドポイント %(endpoint_id)s がプロジェクト %(project_id)s に見つかりません" msgid "Endpoint Group Project Association not found" msgstr "エンドポイントグループとプロジェクトの関連付けが見つかりません" msgid "Ensure configuration option idp_entity_id is set." msgstr "設定オプション idp_entity_id が設定されていることを確認してください。" msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "" "設定オプション idp_sso_endpoint が設定されていることを確認してください。" #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "ドメイン: %(domain)s、ファイル: %(file)s の設定ファイルの構文解析エラー。" #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "ファイル %(path)s のオープン中にエラーが発生しました: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "ルール %(path)s の解析中にエラーが発生しました: %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "SQL ドライバーを使用するためのドメイン %(domain)s の登録の試行回数が制限を超" "過しました。最後に登録されたと思われるドメインは %(last_domain)s です。中断し" "ます" #, python-format msgid "Expected dict or list: %s" msgstr "期待される辞書またはリスト: %s" msgid "Failed to validate token" msgstr "トークンの検証に失敗しました" msgid "Federation token is expired" msgstr "統合トークンの有効期限が切れています" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "フィールド \"remaining_uses\" は %(value)s になっていますが、トラストを再委任" "するにはこのフィールドが設定されていてはなりません" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "ドメイン固有の設定ではグループ %(group)s はサポートされません" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "マッピング %(mapping_id)s が返したグループ %(group_id)s がバックエンドにあり" "ませんでした。" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID 属性 %(id_attr)s が LDAP オブジェクト %(dn)s に見つかりません" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "ID プロバイダー %(idp)s は無効になっています" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "受諾した ID の中に着信 ID プロバイダーの ID が含まれません。" msgid "Invalid EC2 signature." msgstr "無効な EC2 の署名。" #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "無効な LDAP TLS 証明書オプション %(option)s です。 %(options)s のいずれかを選" "択してください" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "無効な LDAP TLS_AVAIL オプション %s です。TLS が利用できません。" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "無効な LDAP deref オプション %(option)s です。%(options)s のいずれかを選択し" "てください" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "無効な LDAP スコープ %(scope)s です。 %(options)s のいずれかを選んでくださ" "い: " msgid "Invalid TLS / LDAPS combination" msgstr "無効な TLS / LDAPS の組み合わせです" msgid "Invalid blob in credential" msgstr "クレデンシャル内の blob が無効です" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "無効なドメイン名 %(domain)s が設定ファイル名 %(file)s に見つかりました。この" "ファイルは無視されます。" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "無効なルール: %(identity_value)s。「グループ」と「ドメイン」の両方のキーワー" "ドを指定する必要があります。" msgid "Invalid signature" msgstr "シグニチャーが無効です" msgid "Invalid user / password" msgstr "ユーザー/パスワードが無効です" msgid "Invalid username or TOTP passcode" msgstr "無効なユーザー名または TOTP パスコード" msgid "Invalid username or password" msgstr "無効なユーザー名かパスワード" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "変換可能なリソース ID の長さは最大許容文字数である、64 文字より少なくなりま" "す。" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "マッピング %(mapping_id)s にあるローカルセクションは、存在しないリモートの一" "致 (例えばローカルセクションの {0}) を参照します。" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "エンドポイント URL (%(endpoint)s) の形式が正しくありません。詳しくはエラーロ" "グを参照してください。" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "%s ブランチに到達する最大の階層の深さ。" #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "メンバー %(member)s は既にグループ %(group)s のメンバーです" #, python-format msgid "Method not callable: %s" msgstr "メソッドが呼び出し可能ではありません: %s" msgid "Missing entity ID from environment" msgstr "環境情報にエンティティー ID が見つかりません" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "再委任時の「redelegation_count」の変更は禁止されています。このパラメーターは" "指定しないでください。" msgid "Multiple domains are not supported" msgstr "複数のドメインはサポートされていません" msgid "Must specify either domain or project" msgstr "ドメインまたはプロジェクトのいずれかを指定する必要があります" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "プロジェクトドメイン ID および プロジェクトドメイン名のいずれも指定されません" "でした。" msgid "No authenticated user" msgstr "認証されていないユーザー" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "暗号鍵が見つかりません。keystone-manage fernet_setup を実行して暗号鍵を初期設" "定します。" msgid "No options specified" msgstr "オプションが指定されていません" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "" "エンドポイント %(endpoint_id)s に関連付けられているポリシーはありません。" msgid "No token in the request" msgstr "要求にトークンがありません" msgid "One of the trust agents is disabled or deleted" msgstr "トラストエージェントの 1 つが無効になっているか削除されています" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "ドメイン設定要求の検査中に、グループが指定されていないオプション %(option)s " "が見つかりました" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "ドメイン固有の設定ではグループ %(group)s のオプション %(option)s はサポートさ" "れていません" msgid "Project field is required and cannot be empty." msgstr "プロジェクトフィールドは必須であり、空にできません。" #, python-format msgid "Project is disabled: %s" msgstr "プロジェクト %s が無効になっています" msgid "Project name cannot contain reserved characters." msgstr "プロジェクト名に予約済み文字が含まれていてはなりません。" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "グループ %(group)s のオプション %(option)s のデフォルトの読み取りはサポートさ" "れません" msgid "Redelegation allowed for delegated by trust only" msgstr "再委任はトラストによる委任にのみ許可されます" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "%(redelegation_depth)d の残りの再委任の深さが、許可された範囲 [0.." "%(max_count)d] を超えています" #, fuzzy msgid "Request must have an origin query parameter" msgstr "要求には起点照会パラメーターが必要です" msgid "Request token is expired" msgstr "要求トークンの有効期限が切れています" msgid "Request token not found" msgstr "要求されたトークンが見つかりません" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "要求された有効期限は再委任されたトラストが提供可能な期間を超えています" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "要求された再委任の深さ %(requested_count)d が、許可された上限 %(max_count)d " "を超えています" msgid "Scoping to both domain and project is not allowed" msgstr "ドメインとプロジェクトの両方にスコープを設定することはできません" msgid "Scoping to both domain and trust is not allowed" msgstr "ドメインとトラストの両方にスコープを設定することはできません" msgid "Scoping to both project and trust is not allowed" msgstr "プロジェクトとトラストの両方にスコープを設定することはできません" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "サービスプロバイダー %(sp)s は無効になっています" msgid "Some of requested roles are not in redelegated trust" msgstr "要求されたロールの一部が再委任されたトラスト内にありません" msgid "Specify a domain or project, not both" msgstr "ドメインかプロジェクトを指定してください。両方は指定しないでください" msgid "Specify a user or group, not both" msgstr "ユーザーかグループを指定してください。両方は指定しないでください" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' は現時点以前であってはなりません。要求の形式が誤っているか、要求" "が正しくないために、サーバーはこの要求に応じることが出来ませんでした。クライ" "アントでエラーが発生していると考えられます。" msgid "The --all option cannot be used with the --domain-name option" msgstr "--all オプションを --domain-name オプションと併用することはできません" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "Keystone 設定ファイル %(config_file)s が見つかりませんでした。" #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "keystone ドメイン固有設定で複数の SQL ドライバーが指定されています (1 つしか" "指定できません): %(source)s。" msgid "The action you have requested has not been implemented." msgstr "要求したアクションは実装されていません。" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "パスワードの長さは %(size)i 以下でなければなりません。パスワードが無効である" "ため、サーバーは要求に応じることができませんでした。" msgid "The request you have made requires authentication." msgstr "実行された要求には認証が必要です。" msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "取り消し呼び出しに domain_id と project_id の両方を使用することはできません。" "これは、Keystone サーバーにおけるバグです。現在の要求は打ち切られます。" msgid "The service you have requested is no longer available on this server." msgstr "要求したサービスは現在このサーバーでは使用できません。" #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "指定された親リージョン %(parent_region_id)s では、リージョン階層構造でループ" "が発生してしまいます。" #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "設定で指定されたグループ %(group)s の値はオプションの辞書にする必要があります" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "これは認識可能な Fernet ペイロードバージョンではありません: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "タイムスタンプが想定された形式になっていません。要求の形式が不正もしくは正し" "くないため、サーバーは要求に応じることができませんでした。クライアントでエ" "ラーが発生していると考えられます。" msgid "Token version is unrecognizable or unsupported." msgstr "トークンバージョンが認識できないかサポートされません。" #, fuzzy msgid "Trustee has no delegated roles." msgstr "受託者に委任された役割がありません。" #, fuzzy msgid "Trustor is disabled." msgstr "委託者は無効です。" #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "グループ %(group)s を更新しようとしていますが、その場合は設定でグループのみを" "指定する必要があります" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "グループ %(group)s のオプション %(option)s を更新しようとしましたが、指定され" "た設定には代わりにオプション %(option_other)s が含まれています" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "グループ %(group)s のオプション %(option)s を更新しようとしていますが、その場" "合は設定でオプションのみを指定する必要があります" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "keystone データベースにアクセスできません。このデータベースが正しく設定されて" "いるかどうかを確認してください。" #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "リージョン %(region_id)s またはその子リージョンがエンドポイントに関連付けられ" "ているため、このリージョンを削除できません。" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "ドメイン設定ディレクトリーが見つかりません: %s" #, python-format msgid "Unable to lookup user %s" msgstr "ユーザー %s を検索できません" #, fuzzy, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "ID 属性 %(attribute)s に競合する値 %(new)s と %(old)s が含まれているため、調" "整できません" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "予期しない割り当てタイプが検出されました。%s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "JSON Home 応答に対して予期しない状況が要求されました。%s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "不明なドメイン '%(name)s' が --domain-name で指定されました" msgid "Update of `domain_id` is not allowed." msgstr "`domain_id` の更新は許可されていません。" msgid "Update of `is_domain` is not allowed." msgstr "`is_domain` の更新は許可されません。" msgid "Update of `parent_id` is not allowed." msgstr "\"parent_id\" の更新は許可されていません。" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "" "ユーザー %(user_id)s はドメイン %(domain_id)s へのアクセス権限がありません" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "" "ユーザー %(user_id)s はプロジェクト %(project_id)s へのアクセス権限がありませ" "ん" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "ユーザー %(user_id)s はすでにグループ %(group_id)s のメンバーです" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "ユーザー '%(user_id)s' がグループ '%(group_id)s' で見つかりません" msgid "User IDs do not match" msgstr "ユーザー ID が一致しません" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "ユーザー ID、ドメイン ID が指定されたユーザー名、ドメイン名が指定されたユー" "ザー名のいずれかが欠落しているため、ユーザー認証を作成できません。" #, python-format msgid "User is disabled: %s" msgstr "ユーザーが無効になっています: %s" #, fuzzy msgid "User is not a trustee." msgstr "ユーザーは受託者ではありません。" #, fuzzy, python-format msgid "User type %s not supported" msgstr "ユーザータイプ %s はサポートされていません" msgid "You are not authorized to perform the requested action." msgstr "要求されたアクションを実行する許可がありません。" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "管理トークンを使用してリソースを作成しようとしています。このトークンはドメイ" "ン内にないため、このリソースが属するドメインを明示的に含める必要があります。" msgid "any options" msgstr "任意のオプション" msgid "auth_type is not Negotiate" msgstr "auth_type はネゴシエートではありません" msgid "authorizing user does not have role required" msgstr "ユーザーを認可するのに必要なロールがありません" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "無効になっているプロジェクトを含むブランチにプロジェクトを作成することはでき" "ません: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "ドメインとして動作する有効になっているプロジェクトを削除できません。最初にプ" "ロジェクト %s を無効にしてください。" #, python-format msgid "group %(group)s" msgstr "グループ %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "ドメインとして動作する同じ名前の 2 つのプロジェクトが存在することは許可されま" "せん: %s" msgid "only root projects are allowed to act as domains." msgstr "ドメインとして動作することが許可されるのは root プロジェクトのみです。" #, python-format msgid "option %(option)s in group %(group)s" msgstr "グループ %(group)s のオプション %(option)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses は正整数またはヌルでなければなりません。" msgid "remaining_uses must not be set if redelegation is allowed" msgstr "再委任が許可されている場合は remaining_uses を設定してはなりません" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "グループ %(group)s の更新を要求しましたが、指定された設定には代わりにグルー" "プ %(group_other)s が含まれています" msgid "rescope a scoped token" msgstr "スコープが設定されたトークンのスコープを設定し直します" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "include_subtree も指定される場合、scope.project.id を指定する必要があります。" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "" "tls_cacertdir %s が見つからない、もしくは、ディレクトリではありません。" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s が見つからない、もしくは、ファイルではありません。" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/ko_KR/0000775000175000017500000000000000000000000017570 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000021355 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/ko_KR/LC_MESSAGES/keystone.po0000664000175000017500000007657700000000000023604 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Sungjin Kang , 2013 # Sungjin Kang , 2013 # Andreas Jaeger , 2016. #zanata # Ian Y. Choi , 2018. #zanata # Kuemjong Jeong , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2023-08-25 16:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-08-23 11:00+0000\n" "Last-Translator: Kuemjong Jeong \n" "Language: ko_KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Korean (South Korea)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 이름에는 다음과 같은 예약 문자가 포함될 수 없음: %(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s은(는) 올바른 알림 이벤트가 아니며 %(actions)s 중 하나여야 합니다." #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s이(가) 신뢰 대시보드 호스트가 아님" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s은(는) 데이터베이스 마이그레이션을 제공하지 않습니다. 마이그레이" "션 저장소 경로가 %(path)s에 존재하지 않거나 디렉토리가 아닙니다." #, python-format msgid "%s field is required and cannot be empty" msgstr "%s 필드가 필요하며 비어 있을 수 없음" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "" "(이러한 세부사항을 억제하려면 insecure_debug 모드를 사용 안함으로 설정하십시" "오.)" msgid "--all option cannot be mixed with other options" msgstr "--all 옵션은 다른 옵션과 함께 사용할 수 없음" msgid "A project-scoped token is required to produce a service catalog." msgstr "서비스 카탈로그를 생성하려면 프로젝트 범위 토큰이 필요합니다." msgid "Access token is expired" msgstr "액세스 토큰이 만료됨" msgid "Access token not found" msgstr "액세스 토큰을 찾을 수 없음" msgid "Additional authentications steps required." msgstr "추가 인증 단계가 필요합니다." msgid "An unexpected error occurred when retrieving domain configs" msgstr "도메인 구성 검색 중 예상치 못한 오류 발생" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "%s을(를) 저장하려 할 때 예기치 않은 오류가 발생했음" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "예상치 않은 오류가 발생하여 서버가 사용자 요청을 이행하지 못함." msgid "At least one option must be provided" msgstr "하나 이상의 옵션을 제공해야 함" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "하나 이상의 옵션을 제공해야 합니다. --all 또는 --domain-name을 사용하십시오. " msgid "Attempted to authenticate with an unsupported method." msgstr "지원되지 않는 방법으로 인증을 시도했습니다." msgid "Authentication plugin error." msgstr "인증 플러그인 오류." msgid "Cannot authorize a request token with a token issued via delegation." msgstr "위임을 통해 발행된 토큰으로 요청 토큰에 권한을 부여할 수 없습니다." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "%(option_name)s %(attr)s을(를) 변경할 수 없음" msgid "Cannot change Domain ID" msgstr "도메인 ID를 변경할 수 없음" msgid "Cannot change user ID" msgstr "사용자 ID를 변경할 수 없음" msgid "Cannot change user name" msgstr "사용자 이름을 변경할 수 없음" #, python-format msgid "Cannot create an endpoint with an invalid URL: %(url)s." msgstr "올바르지 않은 URL을 사용하여 엔드포인트를 작성할 수 없음: %(url)s." msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "사용으로 설정된 도메인을 삭제할 수 없습니다. 먼저 해당 도메인을 사용 안함으" "로 설정하십시오." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "서브트리에 사용 설정된 프로젝트가 있으므로 프로젝트 %(project_id)s을(를) 삭제" "할 수 없습니다." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "계층 구조의 리프가 아니므로 프로젝트 %s을(를) 삭제할 수 없습니다. 전체 하위 " "트리를 삭제하려면 계단식 옵션을 사용하십시오." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "서브트리에 사용 설정된 프로젝트가 있으므로 프로젝트 %(project_id)s을(를) 사" "용 안함으로 설정할 수 없습니다." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "프로젝트 %s에 사용 안함으로 설정된 상위가 있어서 이를 사용할 수 없음" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "그룹에서 소스가 공급되고 사용자 ID별로 필터링된 할당을 나열할 수 없습니다." msgid "Cannot list request tokens with a token issued via delegation." msgstr "위임을 통해 발행된 토큰으로 요청 토큰을 나열할 수 없습니다." #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "권한이 부여되지 않은 역할을 제거할 수 없음: %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "자신 뒤의 첫 번째 매개변수와 같은 힌트 목록 없이 드라이버 호출을 자를 수 없음" msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "parents_as_list 및 parents_as_ids 조회 매개변수를 동시에 사용할 수 없습니다." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "subtree_as_list 및 subtree_as_ids 조회 매개변수를 동시에 사용할 수 없습니다." msgid "Cascade update is only allowed for enabled attribute." msgstr "사용된 속성에만 계단식 업데이트가 허용됩니다." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "결합에 효율적인 그룹 필터는 항상 빈 목록을 생성합니다." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "결합에 효율적인 도메인과 상속 필터는 항상 빈 목록을 생성합니다." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "/domains/%s/config의 구성 API 엔티티" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "지정된 리젼 ID가 충돌함: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "이용자를 찾을 수 없음" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "ID 제공자 ID를 판별할 수 없습니다. 구성 옵션 %(issuer_attribute)s이(가) 요청 " "환경에 없습니다. " msgid "Could not find Identity Provider identifier in environment" msgstr "환경에서 ID 제공자의 ID를 찾을 수 없음" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "연합 사용자 특성을 ID 값에 맵핑할 수 없습니다. 추가 세부 사항은 사용된 맵핑 " "또는 디버그 로그를 확인하십시오." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "임시 사용자 ID를 설정하는 중에 사용자를 맵핑할 수 없습니다. 맵핑 규칙이 사용" "자 ID/이름을 지정해야 하거나 REMOTE_USER 환경 변수를 설정해야 합니다. " msgid "Could not validate the access token" msgstr "액세스 토큰을 유효성 검증할 수 없음" msgid "Credential signature mismatch" msgstr "자격 증명 서명 불일치" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "구성에서 'enable' 속성이 있는 엔티티의 사용 안함 설정을 무시합니다." #, python-format msgid "Domain cannot be named %s" msgstr "도메인 이름은 %s일 수 없음" #, python-format msgid "Domain cannot have ID %s" msgstr "도메인 ID가 %s일 수 없음" #, python-format msgid "Domain is disabled: %s" msgstr "도메인을 사용 안함: %s" msgid "Domain name cannot contain reserved characters." msgstr "도메인 이름에는 예약된 문자가 포함될 수 없습니다." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "%(domain)s 도메인에 이미 정의된 구성이 있음 - 다음 파일을 무시하십시오. " "%(file)s." #, python-format msgid "Duplicate ID, %s." msgstr "중복 ID, %s." #, python-format msgid "Duplicate entry: %s" msgstr "중복된 항목: %s" #, python-format msgid "Duplicate name, %s." msgstr "중복 이름, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "중복된 원격 ID: %s" msgid "EC2 access key not found." msgstr "EC2 액세스 키를 찾을 수 없습니다." msgid "EC2 signature not supplied." msgstr "EC2 서명이 제공되지 않았습니다." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "%(endpoint_id)s 엔드포인트가 %(project_id)s 프로젝트에 없음 " msgid "Endpoint Group Project Association not found" msgstr "엔드포인트 그룹 프로젝트 연관을 찾을 수 없음" msgid "Ensure configuration option idp_entity_id is set." msgstr "구성 옵션 idp_entity_id가 설정되어 있는지 확인하십시오." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "구성 옵션 idp_sso_endpoint가 설정되어 있는지 확인하십시오." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "%(domain)s 도메인에 대한 구성 파일을 구문 분석하는 중 오류 발생. 파일: " "%(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "파일 %(path)s 여는 중 오류 발생: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "규칙 %(path)s 구문 분석 중 오류 발생: %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "SQL 드라이버를 사용하기 위해 도메인 %(domain)s을(를) 등록하는 시도가 초과되었" "습니다. 드라이버를 보유한 것으로 보이는 마지막 도메인은 %(last_domain)s입니" "다. 포기하는 중" #, python-format msgid "Expected dict or list: %s" msgstr "예상된 사전 또는 목록: %s" #, python-format msgid "" "Expecting to find %(attribute)s in %(target)s. The server could not comply " "with the request since it is either malformed or otherwise incorrect. The " "client is assumed to be in error." msgstr "" "%(target)s에 %(attribute)s이(가) 있어야 합니다- 서버의 형식이나 다른 항목이 " "올바르지 않기 때문에 서버가 요청을 준수할 수 없습니다. 클라이언트가 오류 상태" "로 간주됩니다." msgid "Failed to validate token" msgstr "토큰을 유효성 검증하지 못했음" msgid "Federation token is expired" msgstr "연합 토큰이 만료됨" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "필드 \"remaining_uses\"가 %(value)s(으)로 설정되었으나 신뢰를 재위임하려면 설" "정하지 않아야 함" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "도메인 특정 구성에 대해 %(group)s 그룹이 지원되지 않음" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "맵핑 %(mapping_id)s별로 리턴된 그룹 %(group_id)s을(를) 백엔드에서 찾지 못했습" "니다." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "ID 속성 %(id_attr)s을(를) LDAP 오브젝트 %(dn)s에서 찾을 수 없음" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "ID 제공자 %(idp)s이(가) 사용 안함으로 설정됨" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "승인 ID에 수신 ID 제공자가 포함되지 않습니다." msgid "Invalid EC2 signature." msgstr "올바르지 않은 EC2 서명입니다." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "올바르지 않은 LDAP TLS 인증 옵션: %(option)s. 다음 중 하나 선택: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "올바르지 않은 LDAP TLS_AVAIL 옵션: %s. TLS를 사용할 수 없음" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "올바르지 않은 LDAP deref 옵션: %(option)s. 다음 중 하나 선택: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "올바르지 않은 LDAP 범위: %(scope)s. 다음 중 하나를 선택: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "잘못된 TLS / LDAPS 결합." msgid "Invalid blob in credential" msgstr "신임 정보에 올바르지 blob가 있음" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "구성 파일 이름에 올바르지 않은 도메인 이름 %(domain)s이(가) 있음: %(file)s - " "이 파일을 무시하십시오." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "올바르지 않은 규칙: %(identity_value)s. 'groups' 및 'domain' 키워드가 둘 다 " "지정되어야 합니다." msgid "Invalid signature" msgstr "올바르지 않은 서명" msgid "Invalid user / password" msgstr "올바르지 않은 사용자 / 비밀번호" msgid "Invalid username or TOTP passcode" msgstr "올바르지 않은 사용자 이름 또는 TOTP 비밀번호" msgid "Invalid username or password" msgstr "올바르지 않은 사용자 이름 또는 비밀번호" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "변환 가능한 자원 id의 길이가 최대 허용 문자인 64보다 큼" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "맵핑 %(mapping_id)s의 로컬 섹션에서 존재하지 않는 원격 일치를 참조합니다(예: " "로컬 섹션의 {0})." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "잘못된 형식의 엔드포인트 URL(%(endpoint)s). 세부사항은 오류 로그를 참조하십시" "오." #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "%s 분기에 대한 최대 계층 깊이에 도달했습니다." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "%(member)s 구성원은 이미 %(group)s 그룹의 구성원임" #, python-format msgid "Method not callable: %s" msgstr "메소드를 호출할 수 없음: %s" msgid "Missing entity ID from environment" msgstr "환경에서 엔티티 ID가 누락됨" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "재위임 시 \"redelegation_count\"를 수정할 수 없습니다. 이 매개변수는 생략하" "는 것이 좋습니다." msgid "Multiple domains are not supported" msgstr "여러 도메인이 지원되지 않음" msgid "Must specify either domain or project" msgstr "도메인 프로젝트 중 하나를 지정해야 함" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "프로젝트 도메인 ID와 프로젝트 도메인 이름이 제공되지 않았습니다. " msgid "No authenticated user" msgstr "인증된 사용자가 없음" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "암호화 키를 찾을 수 없음: keystone-manage fernet_setup을 부트스트랩 1로 실행" "하십시오." msgid "No options specified" msgstr "지정된 옵션 없음" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "엔드포인트 %(endpoint_id)s과(와) 연관된 정책이 없습니다." msgid "No token in the request" msgstr "요청에 토큰이 없음" msgid "One of the trust agents is disabled or deleted" msgstr "신뢰 에이전트 중 하나가 사용 안함으로 설정되었거나 삭제됨" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "%(option)s 옵션은 도메인 구성 요청 확인 중에 지정된 그룹이 없음을 발견함" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "도메인 특정 구성에 대해 %(group)s 그룹의 %(option)s 옵션이 지원되지않음" #, python-format msgid "Password validation error: %(detail)s." msgstr "확인 오류 발생: %(detail)s." msgid "Project field is required and cannot be empty." msgstr "프로젝트 필드는 필수이므로 비어 있어서는 안 됩니다. " #, python-format msgid "Project is disabled: %s" msgstr "프로젝트를 사용 안함: %s" msgid "Project name cannot contain reserved characters." msgstr "프로젝트 이름에 예약된 문자가 포함될 수 없습니다." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "그룹 %(group)s에서 옵션 %(option)s의 기본값 읽기는 지원되지 않음" msgid "Redelegation allowed for delegated by trust only" msgstr "신뢰에서 위임한 경우에만 재위임 허용" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "%(redelegation_depth)d의 나머지 재위임 깊이가 허용 범위 [0..%(max_count)d]을" "(를) 벗어남" msgid "Request must have an origin query parameter" msgstr "요청에는 원본 조회 매개변수가 있어야 함" msgid "Request token is expired" msgstr "요청 토큰이 만료됨" msgid "Request token not found" msgstr "요청 토큰을 찾을 수 없음" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "요청된 만기 시간이 재위임된 신뢰에서 제공할 수 있는 시간보다 큼" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "%(requested_count)d의 요청된 재위임 깊이가 허용되는 %(max_count)d보다 깊음" msgid "Scoping to both domain and project is not allowed" msgstr "도메인과 프로젝트에 대한 범위 지정이 허용되지 않음" msgid "Scoping to both domain and trust is not allowed" msgstr "도메인과 신뢰에 대한 범위 지정이 허용되지 않음" msgid "Scoping to both project and trust is not allowed" msgstr "프로젝트와 신뢰에 대한 범위 지정이 허용되지 않음" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "서비스 제공자 %(sp)s이(가) 사용 안함으로 설정됨" msgid "Some of requested roles are not in redelegated trust" msgstr "요청된 일부 역할이 재위임된 신뢰에 없음" msgid "Specify a domain or project, not both" msgstr "도메인 또는 프로젝트 중 하나 지정" msgid "Specify a user or group, not both" msgstr "사용자 또는 그룹 중 하나 지정" #, python-format msgid "" "String length exceeded. The length of string '%(string)s' exceeds the limit " "of column %(type)s(CHAR(%(length)d))." msgstr "" "문자열 길이 제한을 초과하였습니다. '%(string)s' 문자열 길이가 열의 한도 " "%(type)s(CHAR(%(length)d))을(를) 초과하였습니다." msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at'은 지금보다 이전이어서는 안 됩니다. 형식이 잘못되었거나 올바르지 " "않기 때문에 서버가 요청을 준수할 수 없습니다. 클라이언트는 오류 상태로 간주됩" "니다." msgid "The --all option cannot be used with the --domain-name option" msgstr "--all 옵션은 --domain-name 옵션과 함께 사용할 수 없습니다." #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "키스톤 구성 파일 %(config_file)s을(를) 찾을 수 없습니다." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "키스톤 도메인 특정 구성에 하나 이상의 SQL 드라이버가 지정됨(하나만 허용됨): " "%(source)s." msgid "The action you have requested has not been implemented." msgstr "요청한 조치가 구현되지 않았습니다." #, python-format msgid "The password does not match the requirements: %(detail)s." msgstr "비밀번호가 요구 조건에 부합하지 않습니다. : %(detail)s" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "비밀번호 길이는 %(size)i 이하여야 합니다. 비밀번호가 올바르지 않아 서버가 요" "청을 준수할 수 없습니다." msgid "The request you have made requires authentication." msgstr "요청에 인증이 필요합니다." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "취소 호출은 domain_id와 project_id가 둘 다 있으면 안됩니다.키스톤 서버에서 이" "는 버그입니다. 현재 요청이 중단됩니다." msgid "The service you have requested is no longer available on this server." msgstr "요청한 서비스를 더 이상 이 서버에서 사용할 수 없습니다." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "지정된 상위 리젼 %(parent_region_id)s에서 순환 리젼 계층을 작성합니다." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "구성에 지정된 %(group)s 그룹의 값은 옵션의 사전이어야 함" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "인식되는 Fernet 페이로드 버전이 아님: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "시간소인이 예상된 형식이 아닙니다. 잘못 구성되었거나 올바르지 않으므로 서버" "가 요청을 준수할 수 없습니다. 클라이언트가 오류 상태로 간주됩니다." msgid "Token version is unrecognizable or unsupported." msgstr "토큰 버전이 인식되지 않거나 지원되지 않습니다. " msgid "Trustee has no delegated roles." msgstr "Trustee에 위임된 역할이 없습니다. " msgid "Trustor is disabled." msgstr "Trustor를 사용하지 않습니다. " #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "구성에서 그룹만 지정되도록 %(group)s 그룹을 업데이트하려고 합니다. " #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "%(group)s 그룹에서 %(option)s 옵션을 업데이트하려고 했지만 제공된 구성에 " "%(option_other)s 옵션이 대신 포함되어 있습니다." #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "구성에서 옵션만 지정되도록 %(group)s 그룹에서 %(option)s 옵션을 업데이트하려" "고 합니다." msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "키스톤 데이터베이스를 액세스할 수 없습니다. 데이터베이스가 제대로 구성되어 있" "는지 확인하십시오. " #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "리젼 %(region_id)s 또는 하위 리젼에 연관된 엔드포인트가 있어 삭제할 수 없습니" "다." #, python-format msgid "Unable to locate domain config directory: %s" msgstr "%s: 도메인 설정 디렉토리를 찾을 수 없습니다." #, python-format msgid "Unable to lookup user %s" msgstr "%s 사용자를 검색할 수 없음" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "ID 속성 %(attribute)s에 서로 충돌하는 %(new)s 및 %(old)s 값이 있으므로 이 ID " "속성을 조정할 수 없음" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "예상치 못한 지정 유형 발생, %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "JSON 홈 응답에 대해 예상치 못한 상태가 요청됨. %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "--domain-name으로 알 수 없는 도메인 '%(name)s'을(를) 지정했음" msgid "Update of `domain_id` is not allowed." msgstr "`domain_id` 업데이트는 허용되지 않습니다." msgid "Update of `is_domain` is not allowed." msgstr "`is_domain`의 업데이트는 허용되지 않습니다. " msgid "Update of `parent_id` is not allowed." msgstr "`parent_id` 업데이트가 허용되지 않습니다." #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "" "%(user_id)s 사용자는 %(domain_id)s 도메인에 대한 액세스 권한이 없습니다. " #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "" "%(user_id)s 사용자는 %(project_id)s 프로젝트에 대한 액세스 권한이 없습니다. " #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "%(user_id)s 사용자는 이미 %(group_id)s 그룹의 구성원임" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "'%(group_id)s' 그룹에 '%(user_id)s' 사용자가 없음" msgid "User IDs do not match" msgstr "사용자 ID가 일치하지 않음" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "사용자 ID, 도메인 ID가 포함된 사용자 이름 또는 도메인 이름이 포함된 사용자 이" "름이 누락되어 사용자 인증을 빌드할 수 없습니다. " #, python-format msgid "User is disabled: %s" msgstr "사용자를 사용 안함: %s" msgid "User is not a trustee." msgstr "사용자는 trustee가 아닙니다." #, python-format msgid "User type %s not supported" msgstr "사용자 유형 %s이(가) 지원되지 않음" msgid "You are not authorized to perform the requested action." msgstr "요청한 조치를 수행할 권한이 없습니다." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "관리자 토큰을 사용하여 자원을 생성하려 했습니다. 이 토큰이 도메인에 없으므" "로, 이 자원이 속할 도메인을 명시적으로 포함시켜야 합니다." msgid "any options" msgstr "옵션" msgid "auth_type is not Negotiate" msgstr "auth_type이 Negotiate가 아님" msgid "authorizing user does not have role required" msgstr "인증하는 사용자에게 필요한 역할이 없음" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "사용 안함으로 설정된 프로젝트가 포함된 분기에 프로젝트를 작성할 수 없습니다. " "%s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "도메인 역할을 하는 사용 설정된 프로젝트를 삭제할 수 없습니다. 프로젝트 %s을" "(를) 먼저 사용하지 않게 설정하십시오." #, python-format msgid "group %(group)s" msgstr "%(group)s 그룹" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "이름이 같은 두 프로젝트가 도메인 역할을 수행할 수 없음: %s" msgid "only root projects are allowed to act as domains." msgstr "루트 프로젝트만 도메인 역할을 수행할 수 있습니다." #, python-format msgid "option %(option)s in group %(group)s" msgstr "%(group)s 그룹의 %(option)s 옵션" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses는 양의 정수 또는 널이어야 합니다." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "재위임을 허용하는 경우 remaining_uses를 설정하지 않아야 함" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "%(group)s 그룹을 업데이트하도록 요청했지만 제공된 구성에 %(group_other)s 그룹" "이 대신 포함되어 있습니다." msgid "rescope a scoped token" msgstr "범위 지정된 토큰의 범위 재지정" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "include_subtree도 지정된 경우 scope.project.id를 지정해야 함" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s를 찾을 수 없으며, 이 디렉토리에 존재하지 않습니다." #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s를 찾을 수 없스며, 그런 파일이 없습니다." ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4221153 keystone-26.0.0/keystone/locale/pt_BR/0000775000175000017500000000000000000000000017571 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5261137 keystone-26.0.0/keystone/locale/pt_BR/LC_MESSAGES/0000775000175000017500000000000000000000000021356 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/pt_BR/LC_MESSAGES/keystone.po0000664000175000017500000007263100000000000023570 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Gabriel Wainer, 2013 # Gabriel Wainer, 2013 # Lucas Ribeiro , 2014 # Volmar Oliveira Junior , 2013 # Volmar Oliveira Junior , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 06:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt_BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese (Brazil)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "O nome %(entity)s não pode conter os caracteres reservados a seguir: " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s não é um evento de notificação válido, deve ser um de: %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s não é um host do painel confiável" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s não fornece migrações de banco de dados. O caminho do " "repositório de migração %(path)s não existe ou não é um diretório." #, python-format msgid "%s field is required and cannot be empty" msgstr "campo %s é obrigatório e não pode estar vazio" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Desative o modo insecure_debug para suprimir esses detalhes)." msgid "--all option cannot be mixed with other options" msgstr "A opção --all não pode ser combinada com outras opções" msgid "A project-scoped token is required to produce a service catalog." msgstr "" "Um token de projeto com escopo é necessário para produzir um catálogo de " "serviços." msgid "Access token is expired" msgstr "Token de acesso expirou" msgid "Access token not found" msgstr "Token de acesso não encontrado" msgid "Additional authentications steps required." msgstr "Passos de autenticação adicionais requeridos." msgid "An unexpected error occurred when retrieving domain configs" msgstr "Ocorreu um erro inesperado ao recuperar as configurações de domínio" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "Ocorreu um erro inesperado ao tentar armazenar %s" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "Um erro inesperado evitou que o servidor cumprisse sua solicitação." msgid "At least one option must be provided" msgstr "Pelo menos uma opção deve ser fornecida" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "Pelo menos uma opção deve ser fornecida, use --all ou --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "Tentativa de autenticação com um método não suportado." msgid "Authentication plugin error." msgstr "Erro do plugin de autenticação." msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Não é possível autorizar um token de solicitação com um token emitido por " "meio de delegação." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Não é possível alterar %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Não é possível alterar o ID do Domínio" msgid "Cannot change user ID" msgstr "Não é possível alterar o ID do usuário" msgid "Cannot change user name" msgstr "Não é possível alterar o nome de usuário" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "" "Não é possível excluir um domínio que esteja ativado, desative-o primeiro." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Não é possível excluir o projeto%(project_id)s porque sua subárvore contém " "projetos ativados." #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Não é possível excluir o projeto %s porque ele não é uma folha na " "hierarquia. Use a opção em cascata se desejar excluir uma subárvore inteira." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Não é possível desativar o projeto%(project_id)s porque sua subárvore " "contém projetos ativados." #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "Não é possível ativar o projeto %s porque ele possui pais desativados" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Não é possível listar designações originadas a partir de grupos e filtradas " "pelo ID do usuário." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Não é possível listar os tokens de solicitação com um token emitido por meio " "de delegação." #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Não é possível remover role que não foi concedido, %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Não é possível truncar uma chamada de driver sem lista de sugestões como " "primeiro parâmetro após self " msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Não é possível usar parâmetros de consulta parents_as_list e parents_as_ids " "ao mesmo tempo." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Não é possível usar parâmetros de consulta subtree_as_list e subtree_as_ids " "ao mesmo tempo." msgid "Cascade update is only allowed for enabled attribute." msgstr "A atualização em cascata é permitida somente para atributo ativado." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Combinar efetivo e filtro de grupo sempre resultará em uma lista vazia." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Combinar efetivo, domínio e filtros herdados sempre resultará em uma lista " "vazia." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Entidade de API de configuração em /domains/%s/config" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "" "IDs de região de conflito especificados: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Consumidor não encontrado" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Não foi possível determinar o ID do Provedor de Identidade. A opção de " "configuração %(issuer_attribute)s não foi encontrada no ambiente da " "solicitação." msgid "Could not find Identity Provider identifier in environment" msgstr "" "Não foi possível localizar o identificador do Provedor de Identidade no " "ambiente" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Não foi possível mapear nenhuma propriedade do usuário federado para valores " "de identidade. Verifique os logs de depuração ou o mapeamento usado para " "obter detalhes adicionais" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Não foi possível mapear o usuário ao configurar a identidade do usuário " "efêmera. Regras de mapeamento devem especificar o ID/nome do usuário ou a " "variável de ambiente REMOTE_USER deve ser configurada." msgid "Could not validate the access token" msgstr "Não foi possível validar o token de acesso" msgid "Credential signature mismatch" msgstr "Incompatibilidade de assinatura de credencial" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "A desativação de uma entidade em que o atributo ‘enable' é ignorado pelo " "configuração." #, python-format msgid "Domain cannot be named %s" msgstr "O domínio não pode ser chamado %s" #, python-format msgid "Domain cannot have ID %s" msgstr "O domínio não pode ter o ID de %s" #, python-format msgid "Domain is disabled: %s" msgstr "O domínio está desativado: %s" msgid "Domain name cannot contain reserved characters." msgstr "O nome do domínio não pode conter caracteres reservados." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "Domínio: %(domain)s já possui uma configuração definida - ignorando arquivo: " "%(file)s." #, python-format msgid "Duplicate ID, %s." msgstr "ID duplicado, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Entrada duplicada: %s" #, python-format msgid "Duplicate name, %s." msgstr "Nome duplicado, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "ID remoto duplicado: %s" msgid "EC2 access key not found." msgstr "Chave de acesso EC2 não encontrada." msgid "EC2 signature not supplied." msgstr "assinatura EC2 não fornecida." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Endpoint %(endpoint_id)s não encontrado no projeto %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Associação de Projeto do Grupo do Terminal não localizada" msgid "Ensure configuration option idp_entity_id is set." msgstr "Assegure que a opção de configuração idp_entity_id esteja definida." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "Assegure que a opção de configuração idp_sso_endpoint esteja definida." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Erro ao analisar o arquivo de configuração para o domínio: %(domain)s, " "arquivo: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Erro ao abrir arquivo %(path)s: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Erro ao analisar regras %(path)s: %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Tentativas de registrar o domínio %(domain)s para usar SQL driver excederam, " "o ultimo domínio que parece ter tido foi %(last_domain)s, desistindo" #, python-format msgid "Expected dict or list: %s" msgstr "Esperado dict ou list: %s" msgid "Failed to validate token" msgstr "Falha ao validar token" msgid "Federation token is expired" msgstr "O token de federação está expirado" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "O campo \"remaining_uses\" está configurado como %(value)s enquanto ele não " "deve ser configurado para delegar novamente uma confiança" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "O grupo %(group)s não é suportado para configurações específicas do domínio" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Grupo %(group_id)s retornou mapeando %(mapping_id)s não foi localizado no " "backend." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "Atributo do ID %(id_attr)s não localizado no objeto LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "O Provedor de Identidade %(idp)s está desativado" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "O identificador do provedor de identidade recebido não está incluído entre " "os identificadores aceitos." msgid "Invalid EC2 signature." msgstr "Assinatura EC2 inválida." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Opção de certificado LADP TLS inválida: %(option)s. Escolha uma de: " "%(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Opção LDAP TLS_AVAIL inválida: %s. TLS não dsponível" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "Opção deref LDAP inválida: %(option)s. Escolha uma destas: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "Escopo LDAP inválido: %(scope)s. Escolha um de: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Combinação TLS / LADPS inválida" msgid "Invalid blob in credential" msgstr "BLOB inválido na credencial" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Nome de domínio inválido: %(domain)s localizado no nome do arquivo de " "configuração: %(file)s - ignorando este arquivo." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Regra inválida: %(identity_value)s. As palavras-chave 'groups' e 'domain' " "devem ser especificadas." msgid "Invalid signature" msgstr "Assinatura inválida" msgid "Invalid user / password" msgstr "Usuário / senha inválido" msgid "Invalid username or TOTP passcode" msgstr "Nome de usuário ou passcode TOTP inválido" msgid "Invalid username or password" msgstr "Nome de usuário ou senha inválidos" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "O comprimento do recurso transformável id > 64, que é o máximo de caracteres " "permitidos" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "A seção local no mapeamento %(mapping_id)s refere-se a uma correspondência " "remota que não existe (por exemplo, {0} em uma seção local)." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "URL de endpoint mal-formada (%(endpoint)s), veja o log de ERROS para " "detalhes." #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Profundidade máx. de hierarquia atingida para a ramificação %s." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "O membro %(member)s já é membro do grupo %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Método não pode ser chamado: %s" msgid "Missing entity ID from environment" msgstr "ID da entidade ausente a partir do ambiente" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "A modificação de \"redelegation_count\" é proibida. É recomendado omitir " "este parâmetro." msgid "Multiple domains are not supported" msgstr "Múltiplos domínios não são suportados" msgid "Must specify either domain or project" msgstr "Deve especificar o domínio ou projeto" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "" "Nem o ID do Domínio do Projeto nem o Nome do Domíno do Projeto foram " "fornecidos." msgid "No authenticated user" msgstr "Nenhum usuário autenticado" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Nenhuma chave de criptografia foi localizada; execute keystone-manage " "fernet_setup para autoinicialização um." msgid "No options specified" msgstr "Nenhuma opção especificada" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "Nenhuma política associada ao terminal %(endpoint_id)s." msgid "No token in the request" msgstr "Não existe token na solicitação." msgid "One of the trust agents is disabled or deleted" msgstr "Um dos agentes de confiança está desativado ou excluído" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "A opção %(option)s localizada sem grupo especificado durante a verificação " "de domínio solicitação de configuração" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "A opção %(option)s no grupo %(group)s não é suportada para configurações " "específicas de domínio" msgid "Project field is required and cannot be empty." msgstr "Campo projeto é requerido e não pode ser vazio." #, python-format msgid "Project is disabled: %s" msgstr "O projeto está desativado: %s" msgid "Project name cannot contain reserved characters." msgstr "O nome do projeto não pode conter caracteres reservados." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "Não é suportado ler o padrão para a opção %(option)s no grupo %(group)s" msgid "Redelegation allowed for delegated by trust only" msgstr "Nova delegação permitida para delegado pela confiança somente" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Profundidade da redelegação restante do %(redelegation_depth)d fora do " "intervalo permitido de [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "A solicitação deve ter um parâmetro de consulta de origem" msgid "Request token is expired" msgstr "Token de requisição expirou" msgid "Request token not found" msgstr "Token de requisição não encontrado" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Prazo de expiração solicitado é maior do que a confiança delegada novamente " "pode fornecer" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "Profundidade da nova delegação solicitada de %(requested_count)d é maior que " "a %(max_count)d permitida" msgid "Scoping to both domain and project is not allowed" msgstr "A definição de escopo para o domínio e o projeto não é permitida" msgid "Scoping to both domain and trust is not allowed" msgstr "A definição de escopo para o domínio e a trust não é permitida" msgid "Scoping to both project and trust is not allowed" msgstr "A definição de escopo para o projeto e a trust não é permitida" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "O Provedor de Serviços %(sp)s está desativado" msgid "Some of requested roles are not in redelegated trust" msgstr "Algumas funções de confiança não estão na confiança da nova delegação" msgid "Specify a domain or project, not both" msgstr "Especifique um domínio ou projeto, não ambos" msgid "Specify a user or group, not both" msgstr "Epecifique um usuário ou grupo, não ambos" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "O 'expires_at' não deve ser anterior a agora. O servidor não pôde obedecer à " "solicitação porque ela está malformada ou de alguma maneira incorreta. O " "cliente é assumido como tendo erro." msgid "The --all option cannot be used with the --domain-name option" msgstr "A opção --all não pode ser usada com a opção --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "" "O arquivo de configuração do Keystone %(config_file)s não pôde ser " "localizado." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "A configuração específica de domínio Keystone especificou mais de um driver " "SQL (somente um é permitido): %(source)s." msgid "The action you have requested has not been implemented." msgstr "A ação que você solicitou não foi implementada." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "O comprimento da senha deve ser menor ou igual a %(size)i. O servidor não " "pôde obedecer à solicitação porque a senha é inválida." msgid "The request you have made requires authentication." msgstr "A requisição que você fez requer autenticação." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "A chamada de revogação não deve ter ambos domain_id e project_id. Esse é um " "erro no servidor do Keystone. A solicitação atual foi interrompida." msgid "The service you have requested is no longer available on this server." msgstr "O serviço que você solicitou não está mais disponível neste servidor." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "A região pai especificada %(parent_region_id)s criaria uma hierarquia de " "região circular." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "O valor do grupo %(group)s especificado na configuração deverá ser um " "dicionário de opções" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Esta não é uma versão de carga útil do Fernet reconhecida: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "A data não está no formato especificado. O servidor não pôde realizar a " "requisição pois ela está mal formada ou incorreta. Assume-se que o cliente " "está com erro." msgid "Token version is unrecognizable or unsupported." msgstr "A versão de Token é irreconhecida ou não suportada" msgid "Trustee has no delegated roles." msgstr "Fiador não possui roles delegados." msgid "Trustor is disabled." msgstr "O fiador está desativado." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Tentando atualizar o grupo %(group)s de modo que, e apenas que, o grupo deve " "ser especificado na configuração" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Tentando atualizar a opção %(option)s no grupo %(group)s, mas a configuração " "fornecida contém %(option_other)s ao invés" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Tentando atualizar a opção %(option)s no grupo %(group)s, de modo que, e " "apenas que, a opção deve ser especificada na configuração" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Não é possível acessar o banco de dados keystone, verifique se ele está " "configurado corretamente." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Não foi possível excluir a região %(region_id)s, uma vez que ela ou suas " "regiões filhas possuem terminais associados." #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Não é possível localizar diretório de configuração de domínio: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Não é possível consultar o usuário %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Não é possível reconciliar o atributo de identidade %(attribute)s, pois ele " "possui valores conflitantes %(new)s e %(old)s" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Tipo de designação inesperada encontrada, %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Status inesperado solicitado para resposta JSON Home, %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "Domínio desconhecido '%(name)s' especificado pelo --domain-name" msgid "Update of `domain_id` is not allowed." msgstr "Atualização de `domain_id` não é permitida." msgid "Update of `is_domain` is not allowed." msgstr "Atualização de `is_domain` não é permitida." msgid "Update of `parent_id` is not allowed." msgstr "Atualização de ‘parent_id’ não é permitida." #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "O usuário %(user_id)s não tem acesso ao domínio %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "O usuário %(user_id)s não tem acesso ao projeto %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "Usuário %(user_id)s já é membro do grupo %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Usuário '%(user_id)s' não localizado no grupo '%(group_id)s'" msgid "User IDs do not match" msgstr "ID de usuário não confere" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "A autenticação do usuário não pode ser construída porque está faltando o ID " "ou o nome do usuário com o ID do domínio ou o nome do usuário com o nome do " "domínio." #, python-format msgid "User is disabled: %s" msgstr "O usuário está desativado: %s" msgid "User is not a trustee." msgstr "Usuário não é confiável." #, python-format msgid "User type %s not supported" msgstr "Tipo de usuário %s não suportado" msgid "You are not authorized to perform the requested action." msgstr "Você não está autorizado à realizar a ação solicitada." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Você tentou criar um recurso usando o token de administração. Como esse " "token não está dentro de um domínio, deve-se incluir explicitamente um " "domínio ao qual esse recurso possa pertencer." msgid "any options" msgstr "quaisquer opções" msgid "auth_type is not Negotiate" msgstr "auth_type não é Negotiate" msgid "authorizing user does not have role required" msgstr "Usuário autorizado não possui o role necessário" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "" "não é possível criar um projeto em uma ramificação que contém um projeto " "desativado: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "Não é possível excluir um projeto ativado que age como um domínio. Desative " "o projeto %s primeiro." #, python-format msgid "group %(group)s" msgstr "grupo %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "Não é permitido ter dois projetos agindo como domínios com o mesmo nome: %s" msgid "only root projects are allowed to act as domains." msgstr "Somente projetos raízes são permitidos para agirem como domínios. " #, python-format msgid "option %(option)s in group %(group)s" msgstr "opção %(option)s no grupo %(group)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses deve ser um número inteiro positivo ou nulo." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "remaining_uses não deverá ser definido se a nova delegação for permitida" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "solicite atualizar o grupo %(group)s, mas a configuração fornecida contém o " "grupo %(group_other)s ao invés" msgid "rescope a scoped token" msgstr "Defina novamente um escopo de um token com escopo" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "" "scope.project.id deverá ser especificado se include_subtree também for " "especificado" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s não encontrado ou não é um diretório" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s não encontrada ou não é um arquivo" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4261153 keystone-26.0.0/keystone/locale/ru/0000775000175000017500000000000000000000000017211 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/locale/ru/LC_MESSAGES/0000775000175000017500000000000000000000000020776 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/ru/LC_MESSAGES/keystone.po0000664000175000017500000011233700000000000023206 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # kogamatranslator49 , 2015 # sher , 2013 # sher , 2013 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 06:27+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Russian\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "" "Имя %(entity)s не может содержать следующие зарезервированные символы: " "%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "" "%(event)s не является допустимым событием уведомления, требуется одно из " "значений: %(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s не является надежным хостом сводных панелей" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s не обеспечивает перенос баз данных. Путь к хранилищу миграции " "%(path)s не существует или не является каталогом." #, python-format msgid "%s field is required and cannot be empty" msgstr "Поле %s является обязательным и не может быть пустым" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(Выключите режим insecure_debug, чтобы не показывать эти подробности.)" msgid "--all option cannot be mixed with other options" msgstr "опцию --all нельзя указывать вместе с другими опциями" msgid "A project-scoped token is required to produce a service catalog." msgstr "Для создания каталога службы необходим маркер уровня проекта." msgid "Access token is expired" msgstr "Срок действия ключа доступа истек" msgid "Access token not found" msgstr "Ключ доступа не найден" msgid "Additional authentications steps required." msgstr "Требуются дополнительные действия для идентификации." msgid "An unexpected error occurred when retrieving domain configs" msgstr "Возникла непредвиденная ошибка при получении конфигураций доменов" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "При попытке сохранить %s произошла непредвиденная ошибка" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "Из-за непредвиденной ошибки ваш запрос не был выполнен сервером." msgid "At least one option must be provided" msgstr "Необходимо указать хотя бы одну опцию" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "" "Должен быть указан хотя бы один параметр. Укажите --all или --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "Попытка идентификации с использованием неподдерживаемого метода." msgid "Authentication plugin error." msgstr "Ошибка модуля идентификации." msgid "Cannot authorize a request token with a token issued via delegation." msgstr "" "Предоставить права доступа маркеру запроса с маркером, выданным посредством " "делегирования, невозможно." #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "Невозможно изменить %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "Невозможно изменить ИД домена" msgid "Cannot change user ID" msgstr "Невозможно изменить ИД пользователя" msgid "Cannot change user name" msgstr "Невозможно изменить имя пользователя" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "Невозможно удалить включенный домен, сначала выключите его." #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Невозможно удалить проект %(project_id)s, так как его поддерево содержит " "включенные проекты" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "Невозможно удалить проект %s, так как он не является конечным объектом в " "структуре. Используйте каскадную опцию для удаления всего поддерева." #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "" "Нельзя отключить проект %(project_id)s, так как его поддерево содержит " "включенные проекты" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "" "Не удается включить проект %s, так как у него отключены родительские объекты" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "" "Не удается показать список присвоений, полученных из групп и отфильтрованных " "по ИД пользователя." msgid "Cannot list request tokens with a token issued via delegation." msgstr "" "Показать список маркеров запросов с маркером, выданным посредством " "делегирования, невозможно." #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "Удалить роль, которая не была предоставлена, нельзя: %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "Невозможно отсечь вызов драйвера без списка подсказок в качестве первого " "параметра после самого себя " msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "" "Нельзя использовать параметры запроса parents_as_list и parents_as_ids " "одновременно." msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "" "Нельзя использовать параметры запроса subtree_as_list и subtree_as_ids " "одновременно." msgid "Cascade update is only allowed for enabled attribute." msgstr "Каскадное обновление разрешено только для включенных атрибутов." msgid "" "Combining effective and group filter will always result in an empty list." msgstr "" "Сочетание действующего фильтра и фильтра группы всегда дает пустой список." msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "" "Сочетание действующего фильтра, фильтра домена и унаследованного фильтра " "всегда дает пустой список." #, python-format msgid "Config API entity at /domains/%s/config" msgstr "Настроить элемент API в /domains/%s/config" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "Указаны конфликтующие ИД регионов: \"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "Приемник не найден" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "Не удалось определить ИД поставщика идентификации. Опция конфигурации " "%(issuer_attribute)s не найдена в среде запроса." msgid "Could not find Identity Provider identifier in environment" msgstr "Не удалось найти идентификатор поставщика идентификаторов в среде" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "Не удается связать объединенные свойства пользователя с идентификаторами. " "Дополнительные сведения о связывании приведены в протоколе отладки." msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "Не удалось привязать пользователя во время настройки временного " "идентификатора пользователя. Правила привязка должны указывать имя/ИД " "пользователя, либо должна быть задана переменная среды REMOTE_USER." msgid "Could not validate the access token" msgstr "Не удалось проверить ключ доступа" msgid "Credential signature mismatch" msgstr "Несовпадение подписи идентификационных данных" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "" "Отключение сущности, при котором атрибут 'enable' в конфигурации " "игнорируется." #, python-format msgid "Domain cannot be named %s" msgstr "Домену нельзя присвоить имя %s" #, python-format msgid "Domain cannot have ID %s" msgstr "Домен не может иметь идентификатор %s" #, python-format msgid "Domain is disabled: %s" msgstr "Домен отключен: %s" msgid "Domain name cannot contain reserved characters." msgstr "Имя домена не может содержать зарезервированные символы." #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "" "У домена %(domain)s уже определена конфигурация - файл пропущен: %(file)s." #, python-format msgid "Duplicate ID, %s." msgstr "Повторяющийся идентификатор, %s." #, python-format msgid "Duplicate entry: %s" msgstr "Повторяющаяся запись: %s" #, python-format msgid "Duplicate name, %s." msgstr "Повторяющееся имя, %s." #, python-format msgid "Duplicate remote ID: %s" msgstr "Повторяющийся удаленный ИД: %s" msgid "EC2 access key not found." msgstr "Ключ доступа EC2 не найден." msgid "EC2 signature not supplied." msgstr "Не указана подпись EC2." #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "Конечная точка %(endpoint_id)s не найдена в проекте %(project_id)s" msgid "Endpoint Group Project Association not found" msgstr "Не найдена связь проекта группы конечных точек" msgid "Ensure configuration option idp_entity_id is set." msgstr "Убедитесь, что указан параметр конфигурации idp_entity_id." msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "Убедитесь, что указан параметр конфигурации idp_sso_endpoint." #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "" "Ошибка анализа файла конфигурации для домена %(domain)s, файл: %(file)s." #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "Ошибка при открытии файла %(path)s: %(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "Ошибка при анализе правил %(path)s: %(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "Превышено число попыток регистрации домена %(domain)s для использования " "драйвера SQL. Последний домен, для которого это было сделано - " "%(last_domain)s. Больше попыток не будет" #, python-format msgid "Expected dict or list: %s" msgstr "Ожидается dict или list: %s" msgid "Failed to validate token" msgstr "Проверить маркер не удалось" msgid "Federation token is expired" msgstr "Срок действия ключа объединения истек" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "Полю \"remaining_uses\" присвоено значение %(value)s, хотя поле не может " "быть задано для изменения делегирования группы доверия" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "" "Группа %(group)s не поддерживается для определенных конфигураций домена" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "" "Группа %(group_id)s, возвращенная преобразованием %(mapping_id)s, не найдена " "в на базовом сервере." #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "Атрибут ИД %(id_attr)s не найден в объекте LDAP %(dn)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "Поставщик идентификаторов %(idp)s отключен" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "" "Входящий идентификатор поставщика идентификаторов не включен в принятые " "идентификаторы." msgid "Invalid EC2 signature." msgstr "Недопустимая подпись EC2." #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "" "Недопустимая опция certs TLS LDAP: %(option)s. Выберите одно из следующих " "значений: %(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "Недопустимая опция TLS_AVAIL LDAP: %s. TLS недоступен" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "" "Недопустимая опция deref LDAP: %(option)s. Выберите одно из следующих " "значений: %(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "" "Недопустимая область LDAP: %(scope)s. Выберите одно из следующих значений: " "%(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "Недопустимое сочетание TLS/LDAPS" msgid "Invalid blob in credential" msgstr "Недопустимый большой двоичный объект в разрешении" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "Обнаружено недопустимое имя домена %(domain)s в файле конфигурации %(file)s " "- файл пропущен." #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "" "Недопустимое правило: %(identity_value)s. Ключевые слова 'groups' и 'domain' " "должны быть указаны." msgid "Invalid signature" msgstr "Недопустимая подпись" msgid "Invalid user / password" msgstr "Недопустимый пользователь / пароль" msgid "Invalid username or TOTP passcode" msgstr "Недопустимое имя пользователя или пароль TOTP" msgid "Invalid username or password" msgstr "Недопустимое имя пользователя или пароль" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "" "Длина ИД преобразуемого ресурса > 64 символов, то есть превышает максимально " "допустимую" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "Локальный раздел в преобразовании %(mapping_id)s указывает на удаленное " "совпадение, которое не существует (например, {0} в локальном разделе)." #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "" "Неверный формат URL конечной точки (%(endpoint)s), подробную информацию см. " "в протоколе ОШИБОК." #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "Для ветви %s достигнута максимальная глубина иерархии." #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "Элемент %(member)s уже является участником группы %(group)s" #, python-format msgid "Method not callable: %s" msgstr "Вызов метода невозможен: %s" msgid "Missing entity ID from environment" msgstr "В среде отсутствует ИД сущности" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "" "Изменение параметра \"redelegation_count\" во время изменения делегирования " "запрещено. Возможен пропуск этого параметра." msgid "Multiple domains are not supported" msgstr "Множественные домены не поддерживаются" msgid "Must specify either domain or project" msgstr "Необходимо указать домен или проект" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "Не указаны ни ИД домена проекта, ни имя домена проекта." msgid "No authenticated user" msgstr "Нет идентифицированного пользователя" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "Не найдены ключи шифрования. Выполните команду keystone-manage fernet_setup, " "чтобы создать ключ." msgid "No options specified" msgstr "Параметры не указаны" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "С конечной точкой %(endpoint_id)s не связано ни одной стратегии." msgid "No token in the request" msgstr "В запросе отсутствует маркер" msgid "One of the trust agents is disabled or deleted" msgstr "Один из доверенных агентов отключен или удален" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "" "Обнаружен параметр %(option)s без указанной группы во время проверки запроса " "на настройку домена" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "" "Параметр %(option)s в группе %(group)s не поддерживается для определенных " "конфигураций домена" msgid "Project field is required and cannot be empty." msgstr "Поле проекта является обязательным и не может быть пустым." #, python-format msgid "Project is disabled: %s" msgstr "Проект отключен: %s" msgid "Project name cannot contain reserved characters." msgstr "Имя проекта не может содержать зарезервированные символы." #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "" "Чтение значения по умолчанию для параметра %(option)s в группе %(group)s не " "поддерживается" msgid "Redelegation allowed for delegated by trust only" msgstr "Изменение делегирования разрешено только для доверенного пользователя" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "Оставшаяся глубина изменения делегирования %(redelegation_depth)d выходит за " "пределы разрешенного диапазона [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "Запрос должен содержать параметр origin" msgid "Request token is expired" msgstr "Срок действия маркера запроса истек" msgid "Request token not found" msgstr "Маркер запроса не найден" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "" "Запрошенное время истечения срока действия превышает значение, которое может " "указать доверенный пользователь" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "" "Запрошенная глубина изменения делегирования %(requested_count)d превышает " "разрешенную %(max_count)d" msgid "Scoping to both domain and project is not allowed" msgstr "Назначать и домен, и проект в качестве области нельзя" msgid "Scoping to both domain and trust is not allowed" msgstr "Назначать и домен, и группу доверия в качестве области нельзя" msgid "Scoping to both project and trust is not allowed" msgstr "Назначать и проект, и группу доверия в качестве области нельзя" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "Поставщик службы %(sp)s отключен" msgid "Some of requested roles are not in redelegated trust" msgstr "" "Некоторые из запрошенных ролей не относятся к доверенному пользователю с " "измененными полномочиями" msgid "Specify a domain or project, not both" msgstr "Укажите домен или проект, но не то и другое" msgid "Specify a user or group, not both" msgstr "Укажите пользователя или группу, но не то и другое" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "Значение параметра 'expires_at' не должно быть меньше настоящего времени. " "Серверу не удалось исполнить запрос, так как он поврежден или неправильно " "сформирован. Предположительно, клиент находится в состоянии ошибки." msgid "The --all option cannot be used with the --domain-name option" msgstr "Параметр --all нельзя указывать вместе с параметром --domain-name" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "Не удалось найти файл конфигурации Keystone %(config_file)s." #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "В конфигурации для домена Keystone указано несколько драйверов SQL (допустим " "только один): %(source)s." msgid "The action you have requested has not been implemented." msgstr "Запрошенное действие не реализовано." #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "" "Длина пароля не должна превышать %(size)i. Сервер не может выполнить запрос, " "поскольку пароль недопустим." msgid "The request you have made requires authentication." msgstr "Выданный запрос требует идентификации." msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "В вызове revoke должны быть указаны domain_id и project_id. Это ошибка в " "коде сервера Keystone. Текущий запрос прерван." msgid "The service you have requested is no longer available on this server." msgstr "Запрошенная служба более не доступна на данном сервере." #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "" "Заданная родительская область %(parent_region_id)s создаст круговую " "структуру области." #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "" "Значение группы %(group)s, указанное в конфигурации, должно быть словарем " "параметров" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "Это не распознанная версия полезной нагрузки Fernet: %s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "Метка в неожиданном формате. Сервер не может выполнить запрос, поскольку он " "либо искажен или неправилен. Клиент, как предполагается, является ошибочным." msgid "Token version is unrecognizable or unsupported." msgstr "Версия маркера не распознана либо не поддерживается." msgid "Trustee has no delegated roles." msgstr "У доверенного лица нет делегированных ролей." msgid "Trustor is disabled." msgstr "Доверитель отключен." #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "Изменение группы %(group)s, чтобы группа должна была указываться только в " "конфигурации" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "Изменение параметра %(option)s в группе %(group)s, однако переданная " "конфигурация содержит параметр %(option_other)s вместо него" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "Изменение параметра %(option)s в группе %(group)s, чтобы параметр должен был " "указываться только в конфигурации" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "" "Нет доступа к базе данных Keystone. Убедитесь, что она настроена правильно." #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "" "Не удалось удалить регион %(region_id)s: регион или его дочерние регионы " "имеют связанные конечные точки." #, python-format msgid "Unable to locate domain config directory: %s" msgstr "Не удалось найти каталог конфигурации домена: %s" #, python-format msgid "Unable to lookup user %s" msgstr "Найти пользователя %s невозможно" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "Согласовать атрибут идентификатора, %(attribute)s, невозможно, поскольку он " "содержит конфликтующие значения %(new)s и %(old)s" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "Обнаружен непредвиденный тип назначения, %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "Запрошено неожиданное состояние для ответа JSON Home, %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "В опции --domain-name указано неизвестное имя домена '%(name)s'" msgid "Update of `domain_id` is not allowed." msgstr "Обновление `domain_id` не разрешено." msgid "Update of `is_domain` is not allowed." msgstr "Обновление `is_domain` не разрешено." msgid "Update of `parent_id` is not allowed." msgstr "Обновление `parent_id` не разрешено." #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "У пользователя %(user_id)s нет доступа к домену %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "У пользователя %(user_id)s нет доступа к проекту %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "Пользователь %(user_id)s уже является участником группы %(group_id)s" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "Пользователь '%(user_id)s' не найден в группе '%(group_id)s'" msgid "User IDs do not match" msgstr "ИД пользователей не совпадают" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "Не удалось скомпоновать идентификацию пользователя, так как отсутствует ИД " "пользователя, имя пользователя с ИД домена либо имя пользователя с именем " "домена." #, python-format msgid "User is disabled: %s" msgstr "Пользователь отключен: %s" msgid "User is not a trustee." msgstr "Пользователь не является доверенным лицом." #, python-format msgid "User type %s not supported" msgstr "Тип пользователя %s не поддерживается" msgid "You are not authorized to perform the requested action." msgstr "У вас нет прав на выполнение запрашиваемого действия." msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "Попытка создания ресурса с помощью административного маркера. Так как этот " "маркер не принадлежит домену, необходимо явно указать домен, которому будет " "принадлежать ресурс." msgid "any options" msgstr "любые параметры" msgid "auth_type is not Negotiate" msgstr "auth_type отличен от Negotiate" msgid "authorizing user does not have role required" msgstr "" "пользователю, предоставляющему права доступа, не присвоена требуемая роль" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "Нельзя создать проект в ветви, содержащей отключенный проект: %s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "" "Невозможно удалить включенный проект, работающий как домен. Сначала " "выключите проект %s." #, python-format msgid "group %(group)s" msgstr "группа %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "" "Не разрешено использовать два проекта в качестве доменов с одинаковым " "именем: %s" msgid "only root projects are allowed to act as domains." msgstr "Только корневые проекты могут работать в качестве доменов." #, python-format msgid "option %(option)s in group %(group)s" msgstr "параметр %(option)s в группе %(group)s" msgid "remaining_uses must be a positive integer or null." msgstr "" "Значение remaining_uses должно быть положительным целым числом или равным " "нулю." msgid "remaining_uses must not be set if redelegation is allowed" msgstr "" "Если включено изменение делегирования, параметр remaining_uses не должен " "быть задан" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "" "запрос на изменение группы %(group)s, однако переданная конфигурация " "содержит группу %(group_other)s вместо нее" msgid "rescope a scoped token" msgstr "Изменить область помещенного в область ключа" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "scope.project.id необходимо указать, если указан include_subtree" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s не найден или не является каталогом" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s не найден или не является файлом" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4261153 keystone-26.0.0/keystone/locale/zh_CN/0000775000175000017500000000000000000000000017564 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/locale/zh_CN/LC_MESSAGES/0000775000175000017500000000000000000000000021351 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/zh_CN/LC_MESSAGES/keystone.po0000664000175000017500000006703300000000000023563 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Zhong Chaoliang , 2013 # Dongliang Yu , 2013 # Lee Yao , 2013 # Lee Yao , 2013 # Zhong Chaoliang , 2013 # 颜海峰 , 2014 # Andreas Jaeger , 2016. #zanata # Eric Lei <1165970798@qq.com>, 2016. #zanata # Research and Development Center UnitedStack , 2022. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-07-01 18:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2022-06-14 12:29+0000\n" "Last-Translator: Research and Development Center UnitedStack " "\n" "Language: zh_CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (China)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 名称不能包含以下保留字符:%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "%(event)s 不是有效通知事件,必须是下列其中一项:%(actions)s" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s 不是可信的仪表板主机" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s 未提供数据库迁移。%(path)s 处的迁移存储库路径不存在或者不是目" "录。" #, python-format msgid "%s field is required and cannot be empty" msgstr "%s 字段是必填字段,不能为空" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(禁用 insecure_debug 方式以避免显示这些详细信息。)" msgid "--all option cannot be mixed with other options" msgstr "--all 选项不能与其他选项一起使用" msgid "A project-scoped token is required to produce a service catalog." msgstr "产生服务目录时需要项目范围的令牌。" msgid "Access token is expired" msgstr "访问令牌已过期" msgid "Access token not found" msgstr "找不到访问令牌" msgid "Additional authentications steps required." msgstr "需要额外身份验证" msgid "An unexpected error occurred when retrieving domain configs" msgstr "检索域配置时发生意外错误" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "尝试存储 %s 时发生意外错误" msgid "" "An unexpected error prevented the server from accessing encrypted " "credentials." msgstr "一个意外错误阻止服务器获得加密证书" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "意外错误阻止了服务器完成您的请求。" msgid "At least one option must be provided" msgstr "必须至少提供一个选项" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "必须至少提供一个选项,请使用 --all 或 --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "尝试使用未支持的方法进行验证" msgid "Authentication plugin error." msgstr "认证插件错误" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "无法对带有通过代理发出的令牌的请求令牌授权。" #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "无法更改 %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "无法更改域标识" msgid "Cannot change user ID" msgstr "无法更改用户标识" msgid "Cannot change user name" msgstr "无法更改用户名" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "无法删除已启用的域,请先禁用该域。" #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "无法删除项目 %(project_id)s,因为其子树包含已启用的项目。" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "无法删除项目 %s,因为它不是该层次结构中的支叶。如果要删除整个子树,请使用级联" "选项。" #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "无法禁用项目 %(project_id)s,因为它的子树包含已启用的项目。" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "无法启用项目 %s,因为它具有已禁用的父代" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "无法列示源自若干组并按用户标识过滤的分配。" msgid "Cannot list request tokens with a token issued via delegation." msgstr "无法列示带有通过代理发出的令牌的请求令牌。" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "无法除去尚未授予的角色 %s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "" "在没有将 hints list 用作 self 后面的第一个参数的情况下,无法截断驱动程序调用" msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "无法同时使用 parents_as_list 和 parents_as_ids 查询参数。" msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "无法同时使用 subtree_as_list 和 subtree_as_ids 查询参数。" msgid "Cascade update is only allowed for enabled attribute." msgstr "只允许对已启用的属性执行级联更新。" msgid "" "Combining effective and group filter will always result in an empty list." msgstr "将有效过滤器与组过滤器进行组合将始终产生空列表。" msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "将有效过滤器、域过滤器和继承的过滤器进行组合将始终产生空列表。" #, python-format msgid "Config API entity at /domains/%s/config" msgstr "在 /domains/%s/config 配置 API 实体" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "指定的区域标识有冲突:“%(url_id)s”不等于“%(ref_id)s”" msgid "Consumer not found" msgstr "找不到使用者" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "未能确定身份提供者标识。在请求环境中找不到配置选项 %(issuer_attribute)s。" msgid "Could not find Identity Provider identifier in environment" msgstr "在环境中,找不到“身份提供者”标识" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "无法将任何联合用户属性映射至身份值。请检查调试日志或所使用的映射以获取其他信" "息。" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "设置临时用户身份时未能映射用户。映射规则必须指定用户标识/用户名,或者必须设" "置 REMOTE_USER 环境变量。" msgid "Could not validate the access token" msgstr "未能验证访问令牌" msgid "Credential signature mismatch" msgstr "凭据签名不匹配" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "正在禁用实体,在此情况下,“enable”属性已由配置忽略。" #, python-format msgid "Domain cannot be named %s" msgstr "无法将域命名为 %s" #, python-format msgid "Domain cannot have ID %s" msgstr "域不能具有标识 %s" #, python-format msgid "Domain is disabled: %s" msgstr "域已禁用:%s" msgid "Domain name cannot contain reserved characters." msgstr "域名不能包含保留字符。" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "域 %(domain)s 已定义配置 - 正在忽略以下文件:%(file)s。" #, python-format msgid "Duplicate ID, %s." msgstr "标识 %s 重复。" #, python-format msgid "Duplicate entry: %s" msgstr "重复条目:%s" #, python-format msgid "Duplicate name, %s." msgstr "名称 %s 重复。" #, python-format msgid "Duplicate remote ID: %s" msgstr "重复远程标识:%s" msgid "EC2 access key not found." msgstr "找不到 EC2 访问密钥。" msgid "EC2 signature not supplied." msgstr "未提供 EC2 签名。" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "在项目 %(project_id)s 中找不到端点 %(endpoint_id)s" msgid "Endpoint Group Project Association not found" msgstr "找不到端点组项目关联" msgid "Ensure configuration option idp_entity_id is set." msgstr "请确保设置了配置选项 idp_entity_id。" msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "请确保设置了配置选项 idp_sso_endpoint。" #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "解析域 %(domain)s 的配置文件时出错,文件为 %(file)s。" #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "打开文件 %(path)s 时出错:%(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "解析规则 %(path)s 时出错:%(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "注册域 %(domain)s 以使用 SQL 驱动程序的尝试次数已超出限制,显示为进行此尝试的" "最后一个域为 %(last_domain)s,正在放弃" #, python-format msgid "Expected dict or list: %s" msgstr "期望字典或者列表: %s" msgid "Failed to validate token" msgstr "token验证失败" msgid "Federation token is expired" msgstr "联合令牌已到期" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "字段“remaining_uses”已设置为 %(value)s,尽管为了重新委派信任,不能设置该字段" msgid "Global role cannot imply a domain-specific role" msgstr "全局角色不能隐含一个具体域角色。" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "特定于域的配置不支持组 %(group)s" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "在后端中,找不到由映射 %(mapping_id)s 返回的组 %(group_id)s。" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "在 LDAP 对象 %(dn)s 中,找不到标识属性 %(id_attr)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "身份提供者 %(idp)s 已禁用" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "新的“身份提供者”标识未包含在已接受的标识中。" msgid "Invalid EC2 signature." msgstr "无效 EC2 签名。" #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "LDAP TLS 证书选项 %(option)s 无效。请选择下列其中一项:%(options)s" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "无效的LDAP TLS_AVAIL 选项: %s.TLS无效" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "LDAP deref 选项 %(option)s 无效。请选择下列其中一项:%(options)s" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "无效的 LDAP作用域: %(scope)s. 选择以下选项之一: %(options)s" msgid "Invalid TLS / LDAPS combination" msgstr "无效的 TLS / LDAPS 组合" msgid "Invalid blob in credential" msgstr "凭证中的 BLOB 无效" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "在配置文件名 %(file)s 中找到的域名 %(domain)s 无效 - 正在忽略此文件。" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "规则 %(identity_value)s 无效。必须同时指定关键字“groups”和“domain”。" msgid "Invalid signature" msgstr "签名无效" msgid "Invalid user / password" msgstr "用户/密码无效" msgid "Invalid username or TOTP passcode" msgstr "无效用户名或 TOTP 密码" msgid "Invalid username or password" msgstr "无效用户名或密码" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "可变换资源标识的长度超过 64 个字符(允许的最大字符数)。" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "映射 %(mapping_id)s 中的本地节引用不存在的远程匹配(例如,本地节中的 " "'{0}')。" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "不正确的端点URL(%(endpoint)s), 查看错误日志获取详情" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "已达到 %s 分支的最大层深度。" #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "成员 %(member)s 已属于组 %(group)s" #, python-format msgid "Method not callable: %s" msgstr "方法不可调用:%s" msgid "Missing entity ID from environment" msgstr "环境中缺少实体标识" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "正在修改“redelegation_count”(当禁止重新委派时)。建议省略此参数。" msgid "Multiple domains are not supported" msgstr "多个域不受支持" msgid "Must specify either domain or project" msgstr "必须指定 domain 或 project" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "既未提供项目域标识,也未提供项目域名。" msgid "No authenticated user" msgstr "不存在任何已认证的用户" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "" "找不到任何加密密钥;请针对引导程序 1 运行 keystone-manage fernet_setup。" msgid "No options specified" msgstr "无选项指定" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "没有任何策略与端点 %(endpoint_id)s 关联。" msgid "No token in the request" msgstr "请求中没有令牌。" msgid "One of the trust agents is disabled or deleted" msgstr "其中一个信任代理已禁用或删除" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "在检查域配置请求时,找到选项 %(option)s,但未指定任何组" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "特定于域的配置不支持组 %(group)s 中的选项 %(option)s" msgid "Project field is required and cannot be empty." msgstr "项目字段是必填字段,不得为空。" #, python-format msgid "Project is disabled: %s" msgstr "项目已禁用:%s" msgid "Project name cannot contain reserved characters." msgstr "项目名称不能包含保留字符。" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "系统不支持读取组 %(group)s 中的选项 %(option)s 的缺省值。" msgid "Redelegation allowed for delegated by trust only" msgstr "仅允许对“委派者”信任进行重新委派" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "其余重新委派深度 %(redelegation_depth)d 超出允许的范围 [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "请求必须具有源查询参数" msgid "Request token is expired" msgstr "请求令牌已过期" msgid "Request token not found" msgstr "找不到请求令牌" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "请求的到期时间超过重新委派的信任可提供的到期时间" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "请求的重新委派深度 %(requested_count)d 超过允许的 %(max_count)d" msgid "Scoping to both domain and project is not allowed" msgstr "不允许同时将作用域限定到域和项目" msgid "Scoping to both domain and trust is not allowed" msgstr "不允许同时将作用域限定到域和信任" msgid "Scoping to both project and trust is not allowed" msgstr "不允许同时将作用域限定到项目和信任" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "服务提供程序 %(sp)s 已禁用" msgid "Some of requested roles are not in redelegated trust" msgstr "某些所请求角色未在重新委派的信任中" msgid "Specify a domain or project, not both" msgstr "请指定域或项目,但不是同时指定这两者" msgid "Specify a user or group, not both" msgstr "请指定用户或组,但不是同时指定这两者" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "“expires_at”不得早于现在。服务器未能遵从请求,因为它的格式不正确,或者其他方" "面不正确。客户机被认为发生错误。" msgid "The --all option cannot be used with the --domain-name option" msgstr "--all 选项不能与 --domain-name 选项配合使用" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "找不到 Keystone 配置文件 %(config_file)s。" #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "特定于 Keystone 域的配置已指定多个 SQL 驱动程序(仅允许指定一个):" "%(source)s。" msgid "The action you have requested has not been implemented." msgstr "您请求的操作暂未被执行" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "密码长度必须小于或等于 %(size)i。服务器未能遵照请求,因为密码无效。" msgid "The request you have made requires authentication." msgstr "你的请求需要先授权" msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "撤销调用不能同时具有 domain_id 和 project_id。这是 Keystone 服务器中的错误。" "当前请求已异常中止。" msgid "The service you have requested is no longer available on this server." msgstr "在此服务器上,已请求的服务不再可用。" #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "指定的父区域 %(parent_region_id)s 将创建循环区域层次结构。" #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "在配置中指定的组 %(group)s 的值应该是选项的字典" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "这不是可识别的 Fernet 有效内容版本:%s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "时间戳记未采用所需格式。服务器未能遵照请求,因为它的格式或者其他方面不正确。" "客户机被认为发生错误。" msgid "Token version is unrecognizable or unsupported." msgstr "令牌版本不可识别或者不受支持。" msgid "Trustee has no delegated roles." msgstr "托管人没有任何已委派的角色。" msgid "Trustor is disabled." msgstr "Trustor被禁用" #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "正在尝试更新组 %(group)s,因此仅存在以下要求:必须在配置中指定组" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "正在尝试更新组 %(group)s 中的选项 %(option)s,但所提供配置反而包含选项 " "%(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "正在尝试更新组 %(group)s 中的选项 %(option)s,因此仅存在以下要求:必须在配置" "中指定选项" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "无法访问 keystone 数据库,请检查它是否正确配置。" #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "无法删除区域 %(region_id)s,因为它或它的子区域具有关联的端点。" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "找不到指定的域配置目录:%s" #, python-format msgid "Unable to lookup user %s" msgstr "无法查找用户 %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "无法协调身份属性 %(attribute)s,因为它具有冲突值%(new)s 和 %(old)s" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "遇到意外的指派类型 %s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "请求 JSON 主页响应时处于意外状态,%s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "--domain-name 指定的“%(name)s”是未知域" msgid "Update of `domain_id` is not allowed." msgstr "不允许更新“domain_id”。" msgid "Update of `is_domain` is not allowed." msgstr "不允许更新“is_domain”。" msgid "Update of `parent_id` is not allowed." msgstr "不允许更新“parent_id”。" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "用户%(user_id)s对域%(domain_id)s没有任何访问权限" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "用户%(user_id)s 没有访问项目 %(project_id)s的权限" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "用户%(user_id)s 已是组 %(group_id)s 的成员" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "在组“%(group_id)s”中找不到用户“%(user_id)s”" msgid "User IDs do not match" msgstr "用户ID不匹配" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "由于缺少用户标识、具有域标识的用户名或者具有域名的用户名,因此无法构建用户认" "证。" #, python-format msgid "User is disabled: %s" msgstr "用户已禁用:%s" msgid "User is not a trustee." msgstr "用户不是受托人。" #, python-format msgid "User type %s not supported" msgstr "用户类型 %s 不受支持" msgid "You are not authorized to perform the requested action." msgstr "您没有授权完成所请求的操作。" msgid "" "You cannot change your password at this time due to password policy " "disallowing password changes. Please contact your administrator to reset " "your password." msgstr "" "因为密码策略被设置为禁止修改密码,目前您不能更改密码。请联系管理员重置您的密" "码。" #, python-format msgid "" "You cannot change your password at this time due to the minimum password " "age. Once you change your password, it must be used for %(min_age_days)d " "day(s) before it can be changed. Please try again in %(days_left)d day(s) or " "contact your administrator to reset your password." msgstr "" "没有达到密码最小使用时长,目前您不能更改密码。一旦您修改了密码,在下次可被修" "改前该密码必须使用%(min_age_days)d天。请在%(days_left)d天后重试,或者联系管理" "员重置您的密码。" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "您已尝试使用管理员令牌创建资源。因为此令牌不在域中,所以您必须显式添加域以使" "此资源成为其成员。" msgid "any options" msgstr "任何选项" msgid "auth_type is not Negotiate" msgstr "auth_type 不是“Negotiate”" msgid "authorizing user does not have role required" msgstr "授权用户没有必需的角色" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "无法在包含已禁用项目的分支中创建项目:%s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "无法删除充当域的已启用项目。请先禁用项目 %s。" #, python-format msgid "group %(group)s" msgstr "组 %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "不允许两个同名项目充当域:%s" msgid "only root projects are allowed to act as domains." msgstr "只允许根项目充当域。" #, python-format msgid "option %(option)s in group %(group)s" msgstr "组 %(group)s 中的选项 %(option)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses 必须为正整数或 Null。" msgid "remaining_uses must not be set if redelegation is allowed" msgstr "如果允许重新委派,那么不能设置 remaining_uses" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "请求更新组 %(group)s,但所提供配置反而包含组 %(group_other)s" msgid "rescope a scoped token" msgstr "请重新确定带范围的令牌的范围" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "如果还指定了 include_subtree,那么必须指定 scope.project.id" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s 未找到或者不是一个目录" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s 未找到或者不是一个文件" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4261153 keystone-26.0.0/keystone/locale/zh_TW/0000775000175000017500000000000000000000000017616 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/locale/zh_TW/LC_MESSAGES/0000775000175000017500000000000000000000000021403 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/locale/zh_TW/LC_MESSAGES/keystone.po0000664000175000017500000006431000000000000023610 0ustar00zuulzuul00000000000000# Translations template for keystone. # Copyright (C) 2015 OpenStack Foundation # This file is distributed under the same license as the keystone project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: keystone VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2022-05-20 04:41+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-07 06:28+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (Taiwan)\n" #, python-format msgid "%(detail)s" msgstr "%(detail)s" #, python-format msgid "" "%(entity)s name cannot contain the following reserved characters: %(chars)s" msgstr "%(entity)s 名稱不能包含下列保留字元:%(chars)s" #, python-format msgid "" "%(event)s is not a valid notification event, must be one of: %(actions)s" msgstr "%(event)s 不是有效的通知事件,必須是 %(actions)s 的其中之一" #, python-format msgid "%(host)s is not a trusted dashboard host" msgstr "%(host)s 不是授信儀表板主機" #, python-format msgid "%(message)s %(amendment)s" msgstr "%(message)s %(amendment)s" #, python-format msgid "" "%(mod_name)s doesn't provide database migrations. The migration repository " "path at %(path)s doesn't exist or isn't a directory." msgstr "" "%(mod_name)s 未提供資料庫移轉。%(path)s 處的移轉儲存庫路徑不存在或者不是目" "錄。" #, python-format msgid "%s field is required and cannot be empty" msgstr "%s 欄位是必要欄位,因此不能是空的" msgid "(Disable insecure_debug mode to suppress these details.)" msgstr "(停用 insecure_debug 模式,以暫停這些詳細資料。)" msgid "--all option cannot be mixed with other options" msgstr "--all 選項不能與其他選項混合" msgid "A project-scoped token is required to produce a service catalog." msgstr "需要專案範圍的記號來產生服務型錄。" msgid "Access token is expired" msgstr "存取記號過期" msgid "Access token not found" msgstr "找不到存取記號" msgid "Additional authentications steps required." msgstr "需要其他鑑別步驟。" msgid "An unexpected error occurred when retrieving domain configs" msgstr "擷取網域配置時發生非預期的錯誤" #, python-format msgid "An unexpected error occurred when trying to store %s" msgstr "嘗試儲存 %s 時發生非預期的錯誤" msgid "An unexpected error prevented the server from fulfilling your request." msgstr "發生非預期的錯誤,造成伺服器無法履行要求。" msgid "At least one option must be provided" msgstr "必須提供至少一個選項" msgid "At least one option must be provided, use either --all or --domain-name" msgstr "必須提供至少一個選項,請使用 --all 或 --domain-name" msgid "Attempted to authenticate with an unsupported method." msgstr "已嘗試使用不支援的方法進行鑑別。" msgid "Authentication plugin error." msgstr "鑑別外掛程式錯誤。" msgid "Cannot authorize a request token with a token issued via delegation." msgstr "無法使用透過委派發出之記號授權要求記號。" #, python-format msgid "Cannot change %(option_name)s %(attr)s" msgstr "無法變更 %(option_name)s %(attr)s" msgid "Cannot change Domain ID" msgstr "無法變更網域 ID" msgid "Cannot change user ID" msgstr "無法變更使用者 ID" msgid "Cannot change user name" msgstr "無法變更使用者名稱" msgid "Cannot delete a domain that is enabled, please disable it first." msgstr "無法刪除已啟用的網域,請先停用該網域。" #, python-format msgid "" "Cannot delete project %(project_id)s since its subtree contains enabled " "projects." msgstr "無法刪除專案 %(project_id)s,因為它的子樹狀結構包含已啟用的專案。" #, python-format msgid "" "Cannot delete the project %s since it is not a leaf in the hierarchy. Use " "the cascade option if you want to delete a whole subtree." msgstr "" "無法刪除專案 %s,因為它不是階層中的葉節點。如果要刪除整個子樹狀結構,請使用重" "疊顯示選項。" #, python-format msgid "" "Cannot disable project %(project_id)s since its subtree contains enabled " "projects." msgstr "無法停用專案 %(project_id)s,因為它的子樹狀結構包含已啟用的專案。" #, python-format msgid "Cannot enable project %s since it has disabled parents" msgstr "無法啟用專案 %s,因為它具有已停用的母項" msgid "Cannot list assignments sourced from groups and filtered by user ID." msgstr "無法列出由群組提供且依使用者 ID 進行過濾的指派。" msgid "Cannot list request tokens with a token issued via delegation." msgstr "無法列出含有透過委派發出之記號的要求記號。" #, python-format msgid "Cannot remove role that has not been granted, %s" msgstr "無法移除尚未授權的角色,%s" msgid "" "Cannot truncate a driver call without hints list as first parameter after " "self " msgstr "屬性 limit 不在 hints 清單時,無法截斷驅動程式呼叫" msgid "" "Cannot use parents_as_list and parents_as_ids query params at the same time." msgstr "無法同時使用 parents_as_list 與 parents_as_ids查詢參數。" msgid "" "Cannot use subtree_as_list and subtree_as_ids query params at the same time." msgstr "無法同時使用 subtree_as_list 與 subtree_as_ids 查詢參數。" msgid "Cascade update is only allowed for enabled attribute." msgstr "只容許對已啟用的屬性進行重疊顯示更新。" msgid "" "Combining effective and group filter will always result in an empty list." msgstr "結合作用中的過濾器和群組過濾器將一律導致空清單。" msgid "" "Combining effective, domain and inherited filters will always result in an " "empty list." msgstr "結合作用中的過濾器、網域過濾器及繼承的過濾器將一律導致空清單。" #, python-format msgid "Config API entity at /domains/%s/config" msgstr "在 /domains/%s/config 處配置 API 實體" #, python-format msgid "Conflicting region IDs specified: \"%(url_id)s\" != \"%(ref_id)s\"" msgstr "指定了相衝突的區域 ID:\"%(url_id)s\" != \"%(ref_id)s\"" msgid "Consumer not found" msgstr "找不到消費者" #, python-format msgid "" "Could not determine Identity Provider ID. The configuration option " "%(issuer_attribute)s was not found in the request environment." msgstr "" "無法判定身分提供者 ID。在要求環境中,找不到配置選項%(issuer_attribute)s。" msgid "Could not find Identity Provider identifier in environment" msgstr "在環境中找不到身分提供者 ID" msgid "" "Could not map any federated user properties to identity values. Check debug " "logs or the mapping used for additional details." msgstr "" "無法將任何聯合使用者內容對映至身分值。如需其他詳細資料,請檢查除錯日誌或使用" "的對映。" msgid "" "Could not map user while setting ephemeral user identity. Either mapping " "rules must specify user id/name or REMOTE_USER environment variable must be " "set." msgstr "" "設定暫時使用者身分時,無法對映使用者。對映規則必須指定使用者 ID/名稱,或者必" "須設定 REMOTE_USER環境變數。" msgid "Could not validate the access token" msgstr "無法驗證存取記號" msgid "Credential signature mismatch" msgstr "認證簽章不符" msgid "" "Disabling an entity where the 'enable' attribute is ignored by configuration." msgstr "正在停用配置已忽略其 'enable' 屬性的實體。" #, python-format msgid "Domain cannot be named %s" msgstr "網域不能命名為 %s" #, python-format msgid "Domain cannot have ID %s" msgstr "網域不能具有 ID %s" #, python-format msgid "Domain is disabled: %s" msgstr "已停用網域:%s" msgid "Domain name cannot contain reserved characters." msgstr "網域名稱不能包含保留字元。" #, python-format msgid "" "Domain: %(domain)s already has a configuration defined - ignoring file: " "%(file)s." msgstr "網域 %(domain)s 已定義配置 - 正在忽略檔案 %(file)s。" #, python-format msgid "Duplicate ID, %s." msgstr "重複的 ID,%s。" #, python-format msgid "Duplicate entry: %s" msgstr "重複項目:%s" #, python-format msgid "Duplicate name, %s." msgstr "重複的名稱,%s。" #, python-format msgid "Duplicate remote ID: %s" msgstr "重複的遠端 ID:%s" msgid "EC2 access key not found." msgstr "找不到 EC2 存取金鑰。" msgid "EC2 signature not supplied." msgstr "未提供 EC2 簽章。" #, python-format msgid "Endpoint %(endpoint_id)s not found in project %(project_id)s" msgstr "在專案 %(project_id)s 中找不到端點 %(endpoint_id)s" msgid "Endpoint Group Project Association not found" msgstr "找不到端點群組專案關聯" msgid "Ensure configuration option idp_entity_id is set." msgstr "請確保已設定配置選項 idp_entity_id。" msgid "Ensure configuration option idp_sso_endpoint is set." msgstr "請確保已設定配置選項 idp_sso_endpoint。" #, python-format msgid "" "Error parsing configuration file for domain: %(domain)s, file: %(file)s." msgstr "剖析網域 %(domain)s 的配置檔時發生錯誤,檔案:%(file)s。" #, python-format msgid "Error while opening file %(path)s: %(err)s" msgstr "開啟檔案 %(path)s 時發生錯誤:%(err)s" #, python-format msgid "Error while parsing rules %(path)s: %(err)s" msgstr "剖析規則 %(path)s 時發生錯誤:%(err)s" #, python-format msgid "" "Exceeded attempts to register domain %(domain)s to use the SQL driver, the " "last domain that appears to have had it is %(last_domain)s, giving up" msgstr "" "已超過嘗試登錄網域 %(domain)s 以使用 SQL 驅動程式的次數,似乎已經具有它的最後" "一個網域是 %(last_domain)s,將放棄" #, python-format msgid "Expected dict or list: %s" msgstr "預期字典或清單:%s" msgid "Failed to validate token" msgstr "無法驗證記號" msgid "Federation token is expired" msgstr "聯合記號過期" #, python-format msgid "" "Field \"remaining_uses\" is set to %(value)s while it must not be set in " "order to redelegate a trust" msgstr "" "欄位 \"remaining_uses\" 設定為 %(value)s,但為了重新委派信任,不能設定該欄位" #, python-format msgid "Group %(group)s is not supported for domain specific configurations" msgstr "網域特定配置不支援群組 %(group)s" #, python-format msgid "" "Group %(group_id)s returned by mapping %(mapping_id)s was not found in the " "backend." msgstr "在後端找不到對映 %(mapping_id)s 所傳回的群組 %(group_id)s。" #, python-format msgid "ID attribute %(id_attr)s not found in LDAP object %(dn)s" msgstr "在 LDAP 物件 %(dn)s 中找不到 ID 屬性 %(id_attr)s" #, python-format msgid "Identity Provider %(idp)s is disabled" msgstr "已停用身分提供者 %(idp)s" msgid "" "Incoming identity provider identifier not included among the accepted " "identifiers." msgstr "送入的身分提供者 ID 未包括在接受的 ID 中。" msgid "Invalid EC2 signature." msgstr "無效的 EC2 簽章。" #, python-format msgid "Invalid LDAP TLS certs option: %(option)s. Choose one of: %(options)s" msgstr "無效的 LDAP TLS 憑證選項:%(option)s。請選擇 %(options)s 的其中之一" #, python-format msgid "Invalid LDAP TLS_AVAIL option: %s. TLS not available" msgstr "無效的 LDAP TLS_AVAIL 選項:%s。TLS 無法使用" #, python-format msgid "Invalid LDAP deref option: %(option)s. Choose one of: %(options)s" msgstr "無效的 LDAP deref 選項:%(option)s。請選擇 %(options)s 的其中之一" #, python-format msgid "Invalid LDAP scope: %(scope)s. Choose one of: %(options)s" msgstr "無效的 LDAP 範圍:%(scope)s。請選擇 %(options)s 的其中之一" msgid "Invalid TLS / LDAPS combination" msgstr "無效的 TLS/LDAPS 組合" msgid "Invalid blob in credential" msgstr "認證中的二進位大型物件無效" #, python-format msgid "" "Invalid domain name: %(domain)s found in config file name: %(file)s - " "ignoring this file." msgstr "" "在配置檔名稱 %(file)s 中找到的網域名稱 %(domain)s 無效 - 正在忽略此檔案。" #, python-format msgid "" "Invalid rule: %(identity_value)s. Both 'groups' and 'domain' keywords must " "be specified." msgstr "規則 %(identity_value)s 無效。必須指定 'groups' 及 'domain' 關鍵字。" msgid "Invalid signature" msgstr "無效的簽章" msgid "Invalid user / password" msgstr "無效的使用者/密碼" msgid "Invalid username or TOTP passcode" msgstr "無效的使用者名稱或 TOTP 密碼" msgid "Invalid username or password" msgstr "無效的使用者名稱或密碼" msgid "" "Length of transformable resource id > 64, which is max allowed characters" msgstr "可轉換資源 ID 的長度大於 64(這是所容許的字元數目上限)" #, python-format msgid "" "Local section in mapping %(mapping_id)s refers to a remote match that " "doesn't exist (e.g. {0} in a local section)." msgstr "" "對映 %(mapping_id)s 中的本端區段參照了一個不存在的遠端相符項(例如,本端區段" "中的 '{0}')。" #, python-format msgid "Malformed endpoint URL (%(endpoint)s), see ERROR log for details." msgstr "端點 URL (%(endpoint)s) 的格式不正確,請參閱錯誤日誌以取得詳細資料。" #, python-format msgid "Max hierarchy depth reached for %s branch." msgstr "已達到 %s 分支的階層深度上限。" #, python-format msgid "Member %(member)s is already a member of group %(group)s" msgstr "成員 %(member)s 已是群組 %(group)s 的成員" #, python-format msgid "Method not callable: %s" msgstr "方法不可呼叫:%s" msgid "Missing entity ID from environment" msgstr "環境中遺漏實體 ID" msgid "" "Modifying \"redelegation_count\" upon redelegation is forbidden. Omitting " "this parameter is advised." msgstr "禁止在重新委派時修改 \"redelegation_count\"。建議省略此參數。" msgid "Multiple domains are not supported" msgstr "不支援多個網域" msgid "Must specify either domain or project" msgstr "必須指定 Domain 或 Project" msgid "Neither Project Domain ID nor Project Domain Name was provided." msgstr "既未提供「專案網域 ID」,也未提供「專案網域名稱」。" msgid "No authenticated user" msgstr "沒有已鑑別使用者" msgid "" "No encryption keys found; run keystone-manage fernet_setup to bootstrap one." msgstr "找不到加密金鑰;請執行 keystone-manage fernet_setup 以引導一個。" msgid "No options specified" msgstr "未指定選項" #, python-format msgid "No policy is associated with endpoint %(endpoint_id)s." msgstr "沒有原則與端點 %(endpoint_id)s 相關聯。" msgid "No token in the request" msgstr "要求中沒有記號" msgid "One of the trust agents is disabled or deleted" msgstr "已停用或刪除其中一個信任代理程式" #, python-format msgid "" "Option %(option)s found with no group specified while checking domain " "configuration request" msgstr "檢查網域配置要求時,發現選項 %(option)s 未指定任何群組" #, python-format msgid "" "Option %(option)s in group %(group)s is not supported for domain specific " "configurations" msgstr "網域特定配置不支援群組 %(group)s 中的選項 %(option)s" msgid "Project field is required and cannot be empty." msgstr "專案欄位是必要的,不能是空的。" #, python-format msgid "Project is disabled: %s" msgstr "已停用專案:%s" msgid "Project name cannot contain reserved characters." msgstr "專案名稱不能包含保留字元。" #, python-format msgid "" "Reading the default for option %(option)s in group %(group)s is not supported" msgstr "不支援讀取群組 %(group)s 中選項 %(option)s 的預設值" msgid "Redelegation allowed for delegated by trust only" msgstr "僅委派為信任時,才容許重新委派" #, python-format msgid "" "Remaining redelegation depth of %(redelegation_depth)d out of allowed range " "of [0..%(max_count)d]" msgstr "" "剩餘的重新委派深度 %(redelegation_depth)d 超出容許的範圍 [0..%(max_count)d]" msgid "Request must have an origin query parameter" msgstr "要求必須具有原始查詢參數" msgid "Request token is expired" msgstr "要求記號過期" msgid "Request token not found" msgstr "找不到要求記號" msgid "Requested expiration time is more than redelegated trust can provide" msgstr "所要求的有效期限超過重新委派之信任可提供的有效期限" #, python-format msgid "" "Requested redelegation depth of %(requested_count)d is greater than allowed " "%(max_count)d" msgstr "所要求的重新委派深度 %(requested_count)d 大於容許的 %(max_count)d" msgid "Scoping to both domain and project is not allowed" msgstr "不容許將範圍同時設定為網域及專案" msgid "Scoping to both domain and trust is not allowed" msgstr "不容許將範圍同時設定為網域及信任" msgid "Scoping to both project and trust is not allowed" msgstr "不容許將範圍同時設定為專案及信任" #, python-format msgid "Service Provider %(sp)s is disabled" msgstr "已停用服務提供者 %(sp)s" msgid "Some of requested roles are not in redelegated trust" msgstr "所要求的部分角色不在重新委派的信任中" msgid "Specify a domain or project, not both" msgstr "指定網域或專案,但不要同時指定兩者" msgid "Specify a user or group, not both" msgstr "指定使用者或群組,但不要同時指定兩者" msgid "" "The 'expires_at' must not be before now. The server could not comply with " "the request since it is either malformed or otherwise incorrect. The client " "is assumed to be in error." msgstr "" "'expires_at' 不得早於現在。伺服器無法遵守要求,因為它的格式不正確,或者在其他" "方面發生錯誤。系統會假定用戶端處於錯誤狀態。" msgid "The --all option cannot be used with the --domain-name option" msgstr "--all 選項不能與 --domain-name 選項搭配使用" #, python-format msgid "The Keystone configuration file %(config_file)s could not be found." msgstr "找不到 Keystone 配置檔 %(config_file)s。" #, python-format msgid "" "The Keystone domain-specific configuration has specified more than one SQL " "driver (only one is permitted): %(source)s." msgstr "" "Keystone 網域特定配置指定了多個SQL 驅動程式(僅允許一個):%(source)s。" msgid "The action you have requested has not been implemented." msgstr "尚未實作所要求的動作。" #, python-format msgid "" "The password length must be less than or equal to %(size)i. The server could " "not comply with the request because the password is invalid." msgstr "密碼長度必須小於或等於 %(size)i。伺服器無法遵守要求,因為密碼無效。" msgid "The request you have made requires authentication." msgstr "您發出的要求需要鑑別。" msgid "" "The revoke call must not have both domain_id and project_id. This is a bug " "in the Keystone server. The current request is aborted." msgstr "" "撤銷呼叫不得同時具有 domain_id 和 project_id。這是Keystone 伺服器中的錯誤。已" "中斷現行要求。" msgid "The service you have requested is no longer available on this server." msgstr "此伺服器上無法再使用所要求的服務。" #, python-format msgid "" "The specified parent region %(parent_region_id)s would create a circular " "region hierarchy." msgstr "指定的母項區域 %(parent_region_id)s 會建立循環區域階層。" #, python-format msgid "" "The value of group %(group)s specified in the config should be a dictionary " "of options" msgstr "在配置中指定的群組 %(group)s 的值應該為選項字典" #, python-format msgid "This is not a recognized Fernet payload version: %s" msgstr "這不是已辨識的 Fernet 內容版本:%s" msgid "" "Timestamp not in expected format. The server could not comply with the " "request since it is either malformed or otherwise incorrect. The client is " "assumed to be in error." msgstr "" "時間戳記的格式不符合預期。伺服器無法遵守要求,因為它的格式不正確。系統會假定" "用戶端處於錯誤狀態。" msgid "Token version is unrecognizable or unsupported." msgstr "無法辨識或不支援記號版本。" msgid "Trustee has no delegated roles." msgstr "受託人沒有委派的角色。" msgid "Trustor is disabled." msgstr "委託人已停用。" #, python-format msgid "" "Trying to update group %(group)s, so that, and only that, group must be " "specified in the config" msgstr "" "正在嘗試更新群組 %(group)s,因此必須在配置中指定該群組且必須僅指定該群組" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, but config provided " "contains option %(option_other)s instead" msgstr "" "正在嘗試更新群組 %(group)s 中的選項 %(option)s,但提供的配置卻包含選項 " "%(option_other)s" #, python-format msgid "" "Trying to update option %(option)s in group %(group)s, so that, and only " "that, option must be specified in the config" msgstr "" "正在嘗試更新群組 %(group)s 中的選項 %(option)s,因此必須在配置中指定該選項且" "必須僅指定該選項" msgid "" "Unable to access the keystone database, please check it is configured " "correctly." msgstr "無法存取 Keystone 資料庫,請檢查它是否已正確配置。" #, python-format msgid "" "Unable to delete region %(region_id)s because it or its child regions have " "associated endpoints." msgstr "無法刪除區域 %(region_id)s,因為此區域或其子區域具有相關聯的端點。" #, python-format msgid "Unable to locate domain config directory: %s" msgstr "找不到網域配置目錄:%s" #, python-format msgid "Unable to lookup user %s" msgstr "無法查閱使用者 %s" #, python-format msgid "" "Unable to reconcile identity attribute %(attribute)s as it has conflicting " "values %(new)s and %(old)s" msgstr "" "無法核對身分屬性 %(attribute)s,因為該屬性具有衝突的值 %(new)s 和 %(old)s" #, python-format msgid "Unexpected assignment type encountered, %s" msgstr "發現非預期的指派類型:%s" #, python-format msgid "Unexpected status requested for JSON Home response, %s" msgstr "針對「JSON 起始目錄」回應要求了非預期狀態 %s" #, python-format msgid "Unknown domain '%(name)s' specified by --domain-name" msgstr "由 --domain-name 指定的網域 '%(name)s' 不明" msgid "Update of `domain_id` is not allowed." msgstr "不容許更新 'domain_id'。" msgid "Update of `is_domain` is not allowed." msgstr "不容許更新 `is_domain`。" msgid "Update of `parent_id` is not allowed." msgstr "不容許更新 'parent_id'。" #, python-format msgid "User %(user_id)s has no access to domain %(domain_id)s" msgstr "使用者 %(user_id)s 無法存取網域 %(domain_id)s" #, python-format msgid "User %(user_id)s has no access to project %(project_id)s" msgstr "使用者 %(user_id)s 無法存取專案 %(project_id)s" #, python-format msgid "User %(user_id)s is already a member of group %(group_id)s" msgstr "使用者 %(user_id)s 已是群組 %(group_id)s 的成員" #, python-format msgid "User '%(user_id)s' not found in group '%(group_id)s'" msgstr "在群組 '%(group_id)s' 中找不到使用者 '%(user_id)s'" msgid "User IDs do not match" msgstr "使用者 ID 不符" msgid "" "User auth cannot be built due to missing either user id, or user name with " "domain id, or user name with domain name." msgstr "" "無法建置使用者鑑別,因為遺漏了使用者 ID、具有網域 ID 的使用者名稱或具有網域名" "稱的使用者名稱。" #, python-format msgid "User is disabled: %s" msgstr "已停用使用者:%s" msgid "User is not a trustee." msgstr "使用者不是受託人。" #, python-format msgid "User type %s not supported" msgstr "使用者類型 %s 不受支援" msgid "You are not authorized to perform the requested action." msgstr "您未獲授權來執行所要求的動作。" msgid "" "You have tried to create a resource using the admin token. As this token is " "not within a domain you must explicitly include a domain for this resource " "to belong to." msgstr "" "您已嘗試使用管理者記號建立資源。因為此記號不在網域內,所以您必須明確包含某個" "網域,以讓此資源屬於該網域。" msgid "any options" msgstr "任何選項" msgid "auth_type is not Negotiate" msgstr "auth_type 不是 Negotiate" msgid "authorizing user does not have role required" msgstr "授權使用者不具有必要的角色" #, python-format msgid "cannot create a project in a branch containing a disabled project: %s" msgstr "無法在包含已停用專案的分支中建立專案:%s" #, python-format msgid "" "cannot delete an enabled project acting as a domain. Please disable the " "project %s first." msgstr "無法刪除已啟用且正在充當網域的專案。請先停用專案 %s。" #, python-format msgid "group %(group)s" msgstr "群組 %(group)s" #, python-format msgid "" "it is not permitted to have two projects acting as domains with the same " "name: %s" msgstr "不允許包含兩個具有相同名稱且充當網域的專案:%s" msgid "only root projects are allowed to act as domains." msgstr "只容許根專案充當網域。" #, python-format msgid "option %(option)s in group %(group)s" msgstr "群組 %(group)s 中的選項 %(option)s" msgid "remaining_uses must be a positive integer or null." msgstr "remaining_uses 必須是正整數或空值。" msgid "remaining_uses must not be set if redelegation is allowed" msgstr "如果容許重新委派,則不得設定 remaining_uses" #, python-format msgid "" "request to update group %(group)s, but config provided contains group " "%(group_other)s instead" msgstr "要求更新群組 %(group)s,但提供的配置卻包含群組 %(group_other)s" msgid "rescope a scoped token" msgstr "重新劃定已限定範圍之記號的範圍" msgid "scope.project.id must be specified if include_subtree is also specified" msgstr "如果也指定了 include_subtree,則必須指定 scope.project.id" #, python-format msgid "tls_cacertdir %s not found or is not a directory" msgstr "tls_cacertdir %s 找不到,或者不是目錄" #, python-format msgid "tls_cacertfile %s not found or is not a file" msgstr "tls_cacertfile %s 找不到,或者不是檔案" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/models/0000775000175000017500000000000000000000000016607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/models/__init__.py0000664000175000017500000000000000000000000020706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/models/receipt_model.py0000664000175000017500000001141300000000000021774 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unified in-memory receipt model.""" from oslo_log import log from oslo_serialization import msgpackutils from oslo_utils import reflection from keystone.auth import core from keystone.common import cache from keystone.common import provider_api from keystone import exception from keystone.identity.backends import resource_options as ro LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs class ReceiptModel: """An object that represents a receipt emitted by keystone. This is a queryable object that other parts of keystone can use to reason about a user's receipt. """ def __init__(self): self.user_id = None self.__user = None self.__user_domain = None self.methods = None self.__required_methods = None self.__expires_at = None self.__issued_at = None def __repr__(self): """Return string representation of KeystoneReceipt.""" desc = '<%(type)s at %(loc)s>' self_cls_name = reflection.get_class_name(self, fully_qualified=False) return desc % {'type': self_cls_name, 'loc': hex(id(self))} @property def expires_at(self): return self.__expires_at @expires_at.setter def expires_at(self, value): if not isinstance(value, str): raise ValueError('expires_at must be a string.') self.__expires_at = value @property def issued_at(self): return self.__issued_at @issued_at.setter def issued_at(self, value): if not isinstance(value, str): raise ValueError('issued_at must be a string.') self.__issued_at = value @property def user(self): if not self.__user: if self.user_id: self.__user = PROVIDERS.identity_api.get_user(self.user_id) return self.__user @property def user_domain(self): if not self.__user_domain: if self.user: self.__user_domain = PROVIDERS.resource_api.get_domain( self.user['domain_id'] ) return self.__user_domain @property def required_methods(self): if not self.__required_methods: mfa_rules = self.user['options'].get( ro.MFA_RULES_OPT.option_name, [] ) rules = core.UserMFARulesValidator._parse_rule_structure( mfa_rules, self.user_id ) methods = set(self.methods) active_methods = set(core.AUTH_METHODS.keys()) required_auth_methods = [] for r in rules: r_set = set(r).intersection(active_methods) if r_set.intersection(methods): required_auth_methods.append(list(r_set)) self.__required_methods = required_auth_methods return self.__required_methods def mint(self, receipt_id, issued_at): """Set the ``id`` and ``issued_at`` attributes of a receipt. The process of building a Receipt requires setting attributes about the partial authentication context, like ``user_id`` and ``methods`` for example. Once a Receipt object accurately represents this information it should be "minted". Receipt are minted when they get an ``id`` attribute and their creation time is recorded. """ self.id = receipt_id self.issued_at = issued_at class _ReceiptModelHandler: identity = 125 handles = (ReceiptModel,) def __init__(self, registry): self._registry = registry def serialize(self, obj): serialized = msgpackutils.dumps(obj.__dict__, registry=self._registry) return serialized def deserialize(self, data): receipt_data = msgpackutils.loads(data, registry=self._registry) try: receipt_model = ReceiptModel() for k, v in iter(receipt_data.items()): setattr(receipt_model, k, v) except Exception: LOG.debug( "Failed to deserialize ReceiptModel. Data is %s", receipt_data ) raise exception.CacheDeserializationError( ReceiptModel.__name__, receipt_data ) return receipt_model cache.register_model_handler(_ReceiptModelHandler) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/models/revoke_model.py0000664000175000017500000002455200000000000021644 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from oslo_serialization import msgpackutils from oslo_utils import timeutils from keystone.common import cache from keystone.common import utils LOG = log.getLogger(__name__) # The set of attributes common between the RevokeEvent # and the dictionaries created from the token Data. _NAMES = [ 'trust_id', 'consumer_id', 'access_token_id', 'audit_id', 'audit_chain_id', 'expires_at', 'domain_id', 'project_id', 'user_id', 'role_id', ] # Additional arguments for creating a RevokeEvent _EVENT_ARGS = ['issued_before', 'revoked_at'] # Names of attributes in the RevocationEvent, including "virtual" attributes. # Virtual attributes are those added based on other values. _EVENT_NAMES = _NAMES + ['domain_scope_id'] # Values that will be in the token data but not in the event. # These will compared with event values that have different names. # For example: both trustor_id and trustee_id are compared against user_id _TOKEN_KEYS = [ 'identity_domain_id', 'assignment_domain_id', 'issued_at', 'trustor_id', 'trustee_id', ] # Alternative names to be checked in token for every field in # revoke tree. ALTERNATIVES = { 'user_id': ['user_id', 'trustor_id', 'trustee_id'], 'domain_id': ['identity_domain_id', 'assignment_domain_id'], # For a domain-scoped token, the domain is in assignment_domain_id. 'domain_scope_id': [ 'assignment_domain_id', ], } REVOKE_KEYS = _NAMES + _EVENT_ARGS def blank_token_data(issued_at): token_data = dict() for name in _NAMES: token_data[name] = None for name in _TOKEN_KEYS: token_data[name] = None # required field token_data['issued_at'] = issued_at return token_data class RevokeEvent: def __init__(self, **kwargs): for k in REVOKE_KEYS: v = kwargs.get(k) setattr(self, k, v) if self.domain_id and self.expires_at: # This is revoking a domain-scoped token. self.domain_scope_id = self.domain_id self.domain_id = None else: # This is revoking all tokens for a domain. self.domain_scope_id = None if self.expires_at is not None: # Trim off the expiration time because MySQL timestamps are only # accurate to the second. self.expires_at = self.expires_at.replace(microsecond=0) if self.revoked_at is None: self.revoked_at = timeutils.utcnow().replace(microsecond=0) if self.issued_before is None: self.issued_before = self.revoked_at def to_dict(self): keys = [ 'user_id', 'role_id', 'domain_id', 'domain_scope_id', 'project_id', 'audit_id', 'audit_chain_id', ] event = { key: self.__dict__[key] for key in keys if self.__dict__[key] is not None } if self.trust_id is not None: event['OS-TRUST:trust_id'] = self.trust_id if self.consumer_id is not None: event['OS-OAUTH1:consumer_id'] = self.consumer_id if self.access_token_id is not None: event['OS-OAUTH1:access_token_id'] = self.access_token_id if self.expires_at is not None: event['expires_at'] = utils.isotime(self.expires_at) if self.issued_before is not None: event['issued_before'] = utils.isotime( self.issued_before, subsecond=True ) if self.revoked_at is not None: event['revoked_at'] = utils.isotime( self.revoked_at, subsecond=True ) return event def is_revoked(events, token_data): """Check if a token matches a revocation event. Compare a token against every revocation event. If the token matches an event in the `events` list, the token is revoked. If the token is compared against every item in the list without a match, it is not considered revoked from the `revoke_api`. :param events: a list of RevokeEvent instances :param token_data: map based on a flattened view of the token. The required fields are `expires_at`,`user_id`, `project_id`, `identity_domain_id`, `assignment_domain_id`, `trust_id`, `trustor_id`, `trustee_id` `consumer_id` and `access_token_id` :returns: True if the token matches an existing revocation event, meaning the token is revoked. False is returned if the token does not match any revocation events, meaning the token is considered valid by the revocation API. """ return any([matches(e, token_data) for e in events]) def matches(event, token_values): """See if the token matches the revocation event. A brute force approach to checking. Compare each attribute from the event with the corresponding value from the token. If the event does not have a value for the attribute, a match is still possible. If the event has a value for the attribute, and it does not match the token, no match is possible, so skip the remaining checks. :param event: a RevokeEvent instance :param token_values: dictionary with set of values taken from the token :returns: True if the token matches the revocation event, indicating the token has been revoked """ # If any one check does not match, the whole token does # not match the event. The numerous return False indicate # that the token is still valid and short-circuits the # rest of the logic. # The token has two attributes that can match the domain_id. if event.domain_id is not None and event.domain_id not in ( token_values['identity_domain_id'], token_values['assignment_domain_id'], ): return False if event.domain_scope_id is not None and event.domain_scope_id not in ( token_values['assignment_domain_id'], ): return False # If an event specifies an attribute name, but it does not match, the token # is not revoked. if event.expires_at is not None and event.expires_at not in ( token_values['expires_at'], ): return False if event.trust_id is not None and event.trust_id not in ( token_values['trust_id'], ): return False if event.consumer_id is not None and event.consumer_id not in ( token_values['consumer_id'], ): return False if event.audit_chain_id is not None and event.audit_chain_id not in ( token_values['audit_chain_id'], ): return False if event.role_id is not None and event.role_id not in ( token_values['roles'] ): return False return True def build_token_values(token): token_expires_at = timeutils.parse_isotime(token.expires_at) # Trim off the microseconds because the revocation event only has # expirations accurate to the second. token_expires_at = token_expires_at.replace(microsecond=0) token_values = { 'expires_at': timeutils.normalize_time(token_expires_at), 'issued_at': timeutils.normalize_time( timeutils.parse_isotime(token.issued_at) ), 'audit_id': token.audit_id, 'audit_chain_id': token.parent_audit_id, } if token.user_id is not None: token_values['user_id'] = token.user_id # Federated users do not have a domain, be defensive and get the user # domain set to None in the federated user case. token_values['identity_domain_id'] = token.user_domain['id'] else: token_values['user_id'] = None token_values['identity_domain_id'] = None if token.project_id is not None: token_values['project_id'] = token.project_id # The domain_id of projects acting as domains is None token_values['assignment_domain_id'] = token.project_domain['id'] else: token_values['project_id'] = None if token.domain_id is not None: token_values['assignment_domain_id'] = token.domain_id else: token_values['assignment_domain_id'] = None role_list = [] token_roles = token.roles if token_roles is not None: for role in token_roles: role_list.append(role['id']) token_values['roles'] = role_list if token.trust_scoped: token_values['trust_id'] = token.trust['id'] token_values['trustor_id'] = token.trustor['id'] token_values['trustee_id'] = token.trustee['id'] else: token_values['trust_id'] = None token_values['trustor_id'] = None token_values['trustee_id'] = None if token.oauth_scoped: token_values['consumer_id'] = token.access_token['consumer_id'] token_values['access_token_id'] = token.access_token['id'] else: token_values['consumer_id'] = None token_values['access_token_id'] = None return token_values class _RevokeEventHandler: # NOTE(morganfainberg): There needs to be reserved "registry" entries set # in oslo_serialization for application-specific handlers. We picked 127 # here since it's waaaaaay far out before oslo_serialization will use it. identity = 127 handles = (RevokeEvent,) def __init__(self, registry): self._registry = registry def serialize(self, obj): return msgpackutils.dumps(obj.__dict__, registry=self._registry) def deserialize(self, data): revoke_event_data = msgpackutils.loads(data, registry=self._registry) try: revoke_event = RevokeEvent(**revoke_event_data) except Exception: LOG.debug( "Failed to deserialize RevokeEvent. Data is %s", revoke_event_data, ) raise return revoke_event cache.register_model_handler(_RevokeEventHandler) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/models/token_model.py0000664000175000017500000005344400000000000021473 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unified in-memory token model.""" from oslo_log import log from oslo_serialization import jsonutils from oslo_serialization import msgpackutils from oslo_utils import reflection from keystone.common import cache from keystone.common import provider_api from keystone import exception from keystone.i18n import _ LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs # supported token versions V3 = 'v3.0' VERSIONS = frozenset([V3]) # minimum access rules support ACCESS_RULES_MIN_VERSION = 1.0 class TokenModel: """An object that represents a token emitted by keystone. This is a queryable object that other parts of keystone can use to reason about a user's authentication or authorization. """ def __init__(self): self.user_id = None self.__user = None self.__user_domain = None self.methods = None self.audit_id = None self.parent_audit_id = None self.__expires_at = None self.__issued_at = None self.system = None self.domain_id = None self.__domain = None self.project_id = None self.__project = None self.__project_domain = None self.trust_id = None self.__trust = None self.__trustor = None self.__trustee = None self.__trust_project = None self.__trust_project_domain = None self.is_federated = False self.identity_provider_id = None self.protocol_id = None self.federated_groups = None self.access_token_id = None self.__access_token = None self.application_credential_id = None self.__application_credential = None self.oauth2_credential_id = None self.oauth2_thumbprint = None def __repr__(self): """Return string representation of TokenModel.""" desc = ( '<%(type)s (audit_id=%(audit_id)s, ' 'audit_chain_id=%(audit_ids)s) at %(loc)s>' ) self_cls_name = reflection.get_class_name(self, fully_qualified=False) return desc % { 'type': self_cls_name, 'audit_id': self.audit_id, 'audit_ids': self.audit_ids, 'loc': hex(id(self)), } @property def audit_ids(self): if self.parent_audit_id: return [self.audit_id, self.parent_audit_id] return [self.audit_id] @property def expires_at(self): return self.__expires_at @expires_at.setter def expires_at(self, value): if not isinstance(value, str): raise ValueError('expires_at must be a string.') self.__expires_at = value @property def issued_at(self): return self.__issued_at @issued_at.setter def issued_at(self, value): if not isinstance(value, str): raise ValueError('issued_at must be a string.') self.__issued_at = value @property def unscoped(self): return not any( [ self.system_scoped, self.domain_scoped, self.project_scoped, self.trust_scoped, ] ) @property def system_scoped(self): return self.system is not None @property def user(self): if not self.__user: if self.user_id: self.__user = PROVIDERS.identity_api.get_user(self.user_id) return self.__user @property def user_domain(self): if not self.__user_domain: if self.user: self.__user_domain = PROVIDERS.resource_api.get_domain( self.user['domain_id'] ) return self.__user_domain @property def domain(self): if not self.__domain: if self.domain_id: self.__domain = PROVIDERS.resource_api.get_domain( self.domain_id ) return self.__domain @property def domain_scoped(self): return self.domain_id is not None @property def project(self): if not self.__project: if self.project_id: self.__project = PROVIDERS.resource_api.get_project( self.project_id ) return self.__project @property def project_scoped(self): return self.project_id is not None @property def project_domain(self): if not self.__project_domain: if self.project and self.project.get('domain_id'): self.__project_domain = PROVIDERS.resource_api.get_domain( self.project['domain_id'] ) return self.__project_domain @property def application_credential(self): if not self.__application_credential: if self.application_credential_id: app_cred_api = PROVIDERS.application_credential_api self.__application_credential = ( app_cred_api.get_application_credential( self.application_credential_id ) ) return self.__application_credential @property def oauth_scoped(self): return self.access_token_id is not None @property def access_token(self): if not self.__access_token: if self.access_token_id: self.__access_token = PROVIDERS.oauth_api.get_access_token( self.access_token_id ) return self.__access_token @property def trust_scoped(self): return self.trust_id is not None @property def trust(self): if not self.__trust: if self.trust_id: self.__trust = PROVIDERS.trust_api.get_trust(self.trust_id) return self.__trust @property def trustor(self): if not self.__trustor: if self.trust: self.__trustor = PROVIDERS.identity_api.get_user( self.trust['trustor_user_id'] ) return self.__trustor @property def trustee(self): if not self.__trustee: if self.trust: self.__trustee = PROVIDERS.identity_api.get_user( self.trust['trustee_user_id'] ) return self.__trustee @property def trust_project(self): if not self.__trust_project: if self.trust: self.__trust_project = PROVIDERS.resource_api.get_project( self.trust['project_id'] ) return self.__trust_project @property def trust_project_domain(self): if not self.__trust_project_domain: if self.trust: self.__trust_project_domain = ( PROVIDERS.resource_api.get_domain( self.trust_project['domain_id'] ) ) return self.__trust_project_domain def _get_system_roles(self): roles = [] groups = PROVIDERS.identity_api.list_groups_for_user(self.user_id) all_group_roles = [] assignments = [] for group in groups: group_roles = ( PROVIDERS.assignment_api.list_system_grants_for_group( group['id'] ) ) for role in group_roles: all_group_roles.append(role) assignment = {'group_id': group['id'], 'role_id': role['id']} assignments.append(assignment) user_roles = PROVIDERS.assignment_api.list_system_grants_for_user( self.user_id ) for role in user_roles: assignment = {'user_id': self.user_id, 'role_id': role['id']} assignments.append(assignment) # NOTE(lbragstad): The whole reason we need to build out a list of # "assignments" as opposed to just using the nice list of roles we # already have is because the add_implied_roles() method operates on a # list of assignment dictionaries (containing role_id, # user_id/group_id, project_id, et cetera). That method could probably # be fixed to be more clear by operating on actual roles instead of # just assignments. assignments = PROVIDERS.assignment_api.add_implied_roles(assignments) for assignment in assignments: role = PROVIDERS.role_api.get_role(assignment['role_id']) roles.append({'id': role['id'], 'name': role['name']}) return roles def _get_trust_roles(self): roles = [] # If redelegated_trust_id is set, then we must traverse the trust_chain # in order to determine who the original trustor is. We need to do this # because the user ID of the original trustor helps us determine scope # in the redelegated context. if self.trust.get('redelegated_trust_id'): trust_chain = PROVIDERS.trust_api.get_trust_pedigree(self.trust_id) original_trustor_id = trust_chain[-1]['trustor_user_id'] else: original_trustor_id = self.trustor['id'] trust_roles = [{'role_id': role['id']} for role in self.trust['roles']] effective_trust_roles = PROVIDERS.assignment_api.add_implied_roles( trust_roles ) effective_trust_role_ids = { r['role_id'] for r in effective_trust_roles } current_effective_trustor_roles = ( PROVIDERS.assignment_api.get_roles_for_trustor_and_project( original_trustor_id, self.trust.get('project_id') ) ) for trust_role_id in effective_trust_role_ids: if trust_role_id in current_effective_trustor_roles: role = PROVIDERS.role_api.get_role(trust_role_id) if role['domain_id'] is None: roles.append(role) else: raise exception.Forbidden(_('Trustee has no delegated roles.')) return roles def _get_oauth_roles(self): roles = [] access_token_roles = self.access_token['role_ids'] access_token_roles = [ {'role_id': r} for r in jsonutils.loads(access_token_roles) ] effective_access_token_roles = ( PROVIDERS.assignment_api.add_implied_roles(access_token_roles) ) user_roles = [r['id'] for r in self._get_project_roles()] for role in effective_access_token_roles: if role['role_id'] in user_roles: role = PROVIDERS.role_api.get_role(role['role_id']) roles.append({'id': role['id'], 'name': role['name']}) return roles def _get_federated_roles(self): roles = [] group_ids = [group['id'] for group in self.federated_groups] federated_roles = PROVIDERS.assignment_api.get_roles_for_groups( group_ids, self.project_id, self.domain_id ) for group_id in group_ids: group_roles = ( PROVIDERS.assignment_api.list_system_grants_for_group(group_id) ) for role in group_roles: federated_roles.append(role) user_roles = PROVIDERS.assignment_api.list_system_grants_for_user( self.user_id ) for role in user_roles: federated_roles.append(role) if self.domain_id: domain_roles = ( PROVIDERS.assignment_api.get_roles_for_user_and_domain( self.user_id, self.domain_id ) ) for role in domain_roles: federated_roles.append(role) if self.project_id: project_roles = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_id, self.project_id ) ) for role in project_roles: federated_roles.append(role) # NOTE(lbragstad): Remove duplicate role references from a list of # roles. It is often suggested that this be done with: # # roles = [dict(t) for t in set([tuple(d.items()) for d in roles])] # # But that doesn't actually remove duplicates in all cases and # causes transient failures because dictionaries are unordered # objects. This means {'id': 1, 'foo': 'bar'} and {'foo': 'bar', # 'id': 1} won't actually resolve to a single entity in the above # logic since they are both considered unique. By using `in` we're # performing a containment check, which also does a deep comparison # of the objects, which is what we want. for role in federated_roles: if not isinstance(role, dict): role = PROVIDERS.role_api.get_role(role) if role not in roles: roles.append(role) return roles def _get_domain_roles(self): roles = [] domain_roles = PROVIDERS.assignment_api.get_roles_for_user_and_domain( self.user_id, self.domain_id ) for role_id in domain_roles: role = PROVIDERS.role_api.get_role(role_id) roles.append({'id': role['id'], 'name': role['name']}) return roles def _get_project_roles(self): roles = [] project_roles = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_id, self.project_id ) ) for role_id in project_roles: r = PROVIDERS.role_api.get_role(role_id) roles.append({'id': r['id'], 'name': r['name']}) return roles def _get_application_credential_roles(self): roles = [] app_cred_roles = self.application_credential['roles'] assignment_list = PROVIDERS.assignment_api.list_role_assignments( user_id=self.user_id, project_id=self.project_id, domain_id=self.domain_id, effective=True, ) user_roles = list({x['role_id'] for x in assignment_list}) for role in app_cred_roles: if role['id'] in user_roles: roles.append({'id': role['id'], 'name': role['name']}) return roles def _get_oauth2_credential_roles(self): return self._get_project_roles() @property def roles(self): if self.system_scoped: roles = self._get_system_roles() elif self.trust_scoped: roles = self._get_trust_roles() elif self.oauth_scoped: roles = self._get_oauth_roles() elif self.is_federated and not self.unscoped: roles = self._get_federated_roles() elif self.domain_scoped: roles = self._get_domain_roles() elif self.application_credential_id and self.project_id: roles = self._get_application_credential_roles() elif self.project_scoped: roles = self._get_project_roles() else: roles = [] return roles def _validate_token_resources(self): if self.project and not self.project.get('enabled'): msg = ( 'Unable to validate token because project %(id)s is ' 'disabled' ) % {'id': self.project_id} tr_msg = _( 'Unable to validate token because project %(id)s is ' 'disabled' ) % {'id': self.project_id} LOG.warning(msg) raise exception.ProjectNotFound(tr_msg) if self.project and not self.project_domain.get('enabled'): msg = ( 'Unable to validate token because domain %(id)s is disabled' ) % {'id': self.project_domain['id']} tr_msg = _( 'Unable to validate token because domain %(id)s is disabled' ) % {'id': self.project_domain['id']} LOG.warning(msg) raise exception.DomainNotFound(tr_msg) def _validate_token_user(self): if self.trust_scoped: if self.user_id != self.trustee['id']: raise exception.Forbidden(_('User is not a trustee.')) try: PROVIDERS.resource_api.assert_domain_enabled( self.trustor['domain_id'] ) except AssertionError: raise exception.TokenNotFound(_('Trustor domain is disabled.')) try: PROVIDERS.resource_api.assert_domain_enabled( self.trustee['domain_id'] ) except AssertionError: raise exception.TokenNotFound(_('Trustee domain is disabled.')) try: PROVIDERS.identity_api.assert_user_enabled(self.trustor['id']) except AssertionError: raise exception.Forbidden(_('Trustor is disabled.')) if not self.user_domain.get('enabled'): msg = ( 'Unable to validate token because domain %(id)s is disabled' ) % {'id': self.user_domain['id']} tr_msg = _( 'Unable to validate token because domain %(id)s is disabled' ) % {'id': self.user_domain['id']} LOG.warning(msg) raise exception.DomainNotFound(tr_msg) def _validate_system_scope(self): if self.system_scoped and not self.roles: msg = ('User %(user_id)s has no access to the system') % { 'user_id': self.user_id } tr_msg = _('User %(user_id)s has no access to the system') % { 'user_id': self.user_id } LOG.debug(msg) raise exception.Unauthorized(tr_msg) def _validate_domain_scope(self): if self.domain_scoped and not self.roles: msg = ( 'User %(user_id)s has no access to domain %(domain_id)s' ) % {'user_id': self.user_id, 'domain_id': self.domain_id} tr_msg = _( 'User %(user_id)s has no access to domain %(domain_id)s' ) % {'user_id': self.user_id, 'domain_id': self.domain_id} LOG.debug(msg) raise exception.Unauthorized(tr_msg) def _validate_project_scope(self): if self.project_scoped and not self.roles: msg = ( 'User %(user_id)s has no access to project %(project_id)s' ) % {'user_id': self.user_id, 'project_id': self.project_id} tr_msg = _( 'User %(user_id)s has no access to project %(project_id)s' ) % {'user_id': self.user_id, 'project_id': self.project_id} LOG.debug(msg) raise exception.Unauthorized(tr_msg) def _validate_trust_scope(self): trust_roles = [] if self.trust_id: refs = [{'role_id': role['id']} for role in self.trust['roles']] effective_trust_roles = PROVIDERS.assignment_api.add_implied_roles( refs ) effective_trust_role_ids = { r['role_id'] for r in effective_trust_roles } current_effective_trustor_roles = ( PROVIDERS.assignment_api.get_roles_for_trustor_and_project( self.trustor['id'], self.trust.get('project_id') ) ) # Go through each of the effective trust roles, making sure the # trustor still has them, if any have been removed, then we # will treat the trust as invalid for trust_role_id in effective_trust_role_ids: if trust_role_id in current_effective_trustor_roles: role = PROVIDERS.role_api.get_role(trust_role_id) if role['domain_id'] is None: trust_roles.append(role) else: raise exception.Forbidden( _('Trustee has no delegated roles.') ) def mint(self, token_id, issued_at): """Set the ``id`` and ``issued_at`` attributes of a token. The process of building a token requires setting attributes about the authentication and authorization context, like ``user_id`` and ``project_id`` for example. Once a Token object accurately represents this information it should be "minted". Tokens are minted when they get an ``id`` attribute and their creation time is recorded. """ self._validate_token_resources() self._validate_token_user() self._validate_system_scope() self._validate_domain_scope() self._validate_project_scope() self._validate_trust_scope() self.id = token_id self.issued_at = issued_at class _TokenModelHandler: identity = 126 handles = (TokenModel,) def __init__(self, registry): self._registry = registry def serialize(self, obj): serialized = msgpackutils.dumps(obj.__dict__, registry=self._registry) return serialized def deserialize(self, data): token_data = msgpackutils.loads(data, registry=self._registry) try: token_model = TokenModel() for k, v in iter(token_data.items()): setattr(token_model, k, v) except Exception: LOG.debug( "Failed to deserialize TokenModel. Data is %s", token_data ) raise exception.CacheDeserializationError( TokenModel.__name__, token_data ) return token_model cache.register_model_handler(_TokenModelHandler) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/notifications.py0000664000175000017500000010215200000000000020550 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Notifications module for OpenStack Identity Service resources.""" import collections import functools import inspect import socket import flask from oslo_log import log import oslo_messaging from oslo_utils import reflection import pycadf from pycadf import cadftaxonomy as taxonomy from pycadf import cadftype from pycadf import credential from pycadf import eventfactory from pycadf import host from pycadf import reason from pycadf import resource from keystone.common import context from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ _CATALOG_HELPER_OBJ = None LOG = log.getLogger(__name__) # NOTE(gyee): actions that can be notified. One must update this list whenever # a new action is supported. _ACTIONS = collections.namedtuple( '_ACTIONS', ['created', 'deleted', 'disabled', 'updated', 'internal'] ) ACTIONS = _ACTIONS( created='created', deleted='deleted', disabled='disabled', updated='updated', internal='internal', ) """The actions on resources.""" CADF_TYPE_MAP = { 'group': taxonomy.SECURITY_GROUP, 'project': taxonomy.SECURITY_PROJECT, 'role': taxonomy.SECURITY_ROLE, 'user': taxonomy.SECURITY_ACCOUNT_USER, 'domain': taxonomy.SECURITY_DOMAIN, 'region': taxonomy.SECURITY_REGION, 'endpoint': taxonomy.SECURITY_ENDPOINT, 'service': taxonomy.SECURITY_SERVICE, 'policy': taxonomy.SECURITY_POLICY, 'OS-TRUST:trust': taxonomy.SECURITY_TRUST, 'OS-OAUTH1:access_token': taxonomy.SECURITY_CREDENTIAL, 'OS-OAUTH1:request_token': taxonomy.SECURITY_CREDENTIAL, 'OS-OAUTH1:consumer': taxonomy.SECURITY_ACCOUNT, 'application_credential': taxonomy.SECURITY_CREDENTIAL, } SAML_AUDIT_TYPE = 'http://docs.oasis-open.org/security/saml/v2.0' # resource types that can be notified _SUBSCRIBERS: dict = {} _notifier = None SERVICE = 'identity' PROVIDERS = provider_api.ProviderAPIs ROOT_DOMAIN = '<>' CONF = keystone.conf.CONF # NOTE(morganfainberg): Special case notifications that are only used # internally for handling token persistence token deletions INVALIDATE_TOKEN_CACHE = 'invalidate_token_cache' # nosec PERSIST_REVOCATION_EVENT_FOR_USER = 'persist_revocation_event_for_user' REMOVE_APP_CREDS_FOR_USER = 'remove_application_credentials_for_user' DOMAIN_DELETED = 'domain_deleted' def build_audit_initiator(): """A pyCADF initiator describing the current authenticated context.""" pycadf_host = host.Host( address=flask.request.remote_addr, agent=str(flask.request.user_agent) ) initiator = resource.Resource( typeURI=taxonomy.ACCOUNT_USER, host=pycadf_host ) oslo_context = flask.request.environ.get(context.REQUEST_CONTEXT_ENV) if oslo_context.user_id: initiator.id = utils.resource_uuid(oslo_context.user_id) initiator.user_id = oslo_context.user_id if oslo_context.project_id: initiator.project_id = oslo_context.project_id if oslo_context.domain_id: initiator.domain_id = oslo_context.domain_id initiator.request_id = oslo_context.request_id if oslo_context.global_request_id: initiator.global_request_id = oslo_context.global_request_id return initiator class Audit: """Namespace for audit notification functions. This is a namespace object to contain all of the direct notification functions utilized for ``Manager`` methods. """ @classmethod def _emit( cls, operation, resource_type, resource_id, initiator, public, actor_dict=None, reason=None, ): """Directly send an event notification. :param operation: one of the values from ACTIONS :param resource_type: type of resource being affected :param resource_id: ID of the resource affected :param initiator: CADF representation of the user that created the request :param public: If True (default), the event will be sent to the notifier API. If False, the event will only be sent via notify_event_callbacks to in process listeners :param actor_dict: dictionary of actor information in the event of assignment notification :param reason: pycadf object containing the response code and message description """ # NOTE(stevemar): the _send_notification function is # overloaded, it's used to register callbacks and to actually # send the notification externally. Thus, we should check # the desired notification format in the function instead # of before it. _send_notification( operation, resource_type, resource_id, initiator=initiator, actor_dict=actor_dict, public=public, ) if CONF.notification_format == 'cadf' and public: outcome = taxonomy.OUTCOME_SUCCESS _create_cadf_payload( operation, resource_type, resource_id, outcome, initiator, reason, ) @classmethod def created( cls, resource_type, resource_id, initiator=None, public=True, reason=None, ): cls._emit( ACTIONS.created, resource_type, resource_id, initiator, public, reason=reason, ) @classmethod def updated( cls, resource_type, resource_id, initiator=None, public=True, reason=None, ): cls._emit( ACTIONS.updated, resource_type, resource_id, initiator, public, reason=reason, ) @classmethod def disabled( cls, resource_type, resource_id, initiator=None, public=True, reason=None, ): cls._emit( ACTIONS.disabled, resource_type, resource_id, initiator, public, reason=reason, ) @classmethod def deleted( cls, resource_type, resource_id, initiator=None, public=True, reason=None, ): cls._emit( ACTIONS.deleted, resource_type, resource_id, initiator, public, reason=reason, ) @classmethod def added_to( cls, target_type, target_id, actor_type, actor_id, initiator=None, public=True, reason=None, ): actor_dict = { 'id': actor_id, 'type': actor_type, 'actor_operation': 'added', } cls._emit( ACTIONS.updated, target_type, target_id, initiator, public, actor_dict=actor_dict, reason=reason, ) @classmethod def removed_from( cls, target_type, target_id, actor_type, actor_id, initiator=None, public=True, reason=None, ): actor_dict = { 'id': actor_id, 'type': actor_type, 'actor_operation': 'removed', } cls._emit( ACTIONS.updated, target_type, target_id, initiator, public, actor_dict=actor_dict, reason=reason, ) @classmethod def internal(cls, resource_type, resource_id, reason=None): # NOTE(lbragstad): Internal notifications are never public and have # never used the initiator variable, but the _emit() method expects # them. Let's set them here but not expose them through the method # signature - that way someone can not do something like send an # internal notification publicly. initiator = None public = False cls._emit( ACTIONS.internal, resource_type, resource_id, initiator, public, reason, ) def invalidate_token_cache_notification(reason): """A specific notification for invalidating the token cache. :param reason: The specific reason why the token cache is being invalidated. :type reason: string """ # Since keystone does a lot of work in the authentication and validation # process to make sure the authorization context for the user is # update-to-date, invalidating the token cache is a somewhat common # operation. It's done across various subsystems when role assignments # change, users are disabled, identity providers deleted or disabled, etc.. # This notification is meant to make the process of invalidating the token # cache DRY, instead of have each subsystem implement their own token cache # invalidation strategy or callbacks. LOG.debug(reason) resource_id = None initiator = None public = False Audit._emit( ACTIONS.internal, INVALIDATE_TOKEN_CACHE, resource_id, initiator, public, reason=reason, ) def _get_callback_info(callback): """Return list containing callback's module and name. If the callback is a bound instance method also return the class name. :param callback: Function to call :type callback: function :returns: List containing parent module, (optional class,) function name :rtype: list """ module_name = getattr(callback, '__module__', None) func_name = callback.__name__ if inspect.ismethod(callback): class_name = reflection.get_class_name( callback.__self__, fully_qualified=False ) return [module_name, class_name, func_name] else: return [module_name, func_name] def register_event_callback(event, resource_type, callbacks): """Register each callback with the event. :param event: Action being registered :type event: keystone.notifications.ACTIONS :param resource_type: Type of resource being operated on :type resource_type: str :param callbacks: Callback items to be registered with event :type callbacks: list :raises ValueError: If event is not a valid ACTION :raises TypeError: If callback is not callable """ if event not in ACTIONS: raise ValueError( _( '%(event)s is not a valid notification event, must ' 'be one of: %(actions)s' ) % {'event': event, 'actions': ', '.join(ACTIONS)} ) if not hasattr(callbacks, '__iter__'): callbacks = [callbacks] for callback in callbacks: if not callable(callback): msg = 'Method not callable: %s' % callback tr_msg = _('Method not callable: %s') % callback LOG.error(msg) raise TypeError(tr_msg) _SUBSCRIBERS.setdefault(event, {}).setdefault(resource_type, set()) _SUBSCRIBERS[event][resource_type].add(callback) if LOG.logger.getEffectiveLevel() <= log.DEBUG: # Do this only if its going to appear in the logs. msg = 'Callback: `%(callback)s` subscribed to event `%(event)s`.' callback_info = _get_callback_info(callback) callback_str = '.'.join(i for i in callback_info if i is not None) event_str = '.'.join(['identity', resource_type, event]) LOG.debug(msg, {'callback': callback_str, 'event': event_str}) def listener(cls): """A class decorator to declare a class to be a notification listener. A notification listener must specify the event(s) it is interested in by defining a ``event_callbacks`` attribute or property. ``event_callbacks`` is a dictionary where the key is the type of event and the value is a dictionary containing a mapping of resource types to callback(s). :data:`.ACTIONS` contains constants for the currently supported events. There is currently no single place to find constants for the resource types. Example:: @listener class Something(object): def __init__(self): self.event_callbacks = { notifications.ACTIONS.created: { 'user': self._user_created_callback, }, notifications.ACTIONS.deleted: { 'project': [ self._project_deleted_callback, self._do_cleanup, ] }, } """ def init_wrapper(init): @functools.wraps(init) def __new_init__(self, *args, **kwargs): init(self, *args, **kwargs) _register_event_callbacks(self) return __new_init__ def _register_event_callbacks(self): for event, resource_types in self.event_callbacks.items(): for resource_type, callbacks in resource_types.items(): register_event_callback(event, resource_type, callbacks) cls.__init__ = init_wrapper(cls.__init__) return cls def notify_event_callbacks(service, resource_type, operation, payload): """Send a notification to registered extensions.""" if operation in _SUBSCRIBERS: if resource_type in _SUBSCRIBERS[operation]: for cb in _SUBSCRIBERS[operation][resource_type]: subst_dict = { 'cb_name': cb.__name__, 'service': service, 'resource_type': resource_type, 'operation': operation, 'payload': payload, } LOG.debug( 'Invoking callback %(cb_name)s for event ' '%(service)s %(resource_type)s %(operation)s for ' '%(payload)s', subst_dict, ) cb(service, resource_type, operation, payload) def _get_notifier(): """Return a notifier object. If _notifier is None it means that a notifier object has not been set. If _notifier is False it means that a notifier has previously failed to construct. Otherwise it is a constructed Notifier object. """ global _notifier if _notifier is None: host = CONF.default_publisher_id or socket.gethostname() try: transport = oslo_messaging.get_notification_transport(CONF) _notifier = oslo_messaging.Notifier( transport, "identity.%s" % host ) except Exception: LOG.exception("Failed to construct notifier") _notifier = False return _notifier def clear_subscribers(): """Empty subscribers dictionary. This effectively stops notifications since there will be no subscribers to publish to. """ _SUBSCRIBERS.clear() def reset_notifier(): """Reset the notifications internal state. This is used only for testing purposes. """ global _notifier _notifier = None def _create_cadf_payload( operation, resource_type, resource_id, outcome, initiator, reason=None ): """Prepare data for CADF audit notifier. Transform the arguments into content to be consumed by the function that emits CADF events (_send_audit_notification). Specifically the ``resource_type`` (role, user, etc) must be transformed into a CADF keyword, such as: ``data/security/role``. The ``resource_id`` is added as a top level value for the ``resource_info`` key. Lastly, the ``operation`` is used to create the CADF ``action``, and the ``event_type`` name. As per the CADF specification, the ``action`` must start with create, update, delete, etc... i.e.: created.user or deleted.role However the ``event_type`` is an OpenStack-ism that is typically of the form project.resource.operation. i.e.: identity.project.updated :param operation: operation being performed (created, updated, or deleted) :param resource_type: type of resource being operated on (role, user, etc) :param resource_id: ID of resource being operated on :param outcome: outcomes of the operation (SUCCESS, FAILURE, etc) :param initiator: CADF representation of the user that created the request :param reason: pycadf object containing the response code and message description """ if resource_type not in CADF_TYPE_MAP: target_uri = taxonomy.UNKNOWN else: target_uri = CADF_TYPE_MAP.get(resource_type) # TODO(gagehugo): The root domain ID is typically hidden, there isn't a # reason to emit a notification for it. Once we expose the root domain # (and handle the CADF UUID), remove this. if resource_id == ROOT_DOMAIN: return target = resource.Resource(typeURI=target_uri, id=resource_id) audit_kwargs = {'resource_info': resource_id} cadf_action = f'{operation}.{resource_type}' event_type = f'{SERVICE}.{resource_type}.{operation}' _send_audit_notification( cadf_action, initiator, outcome, target, event_type, reason=reason, **audit_kwargs, ) def _send_notification( operation, resource_type, resource_id, initiator=None, actor_dict=None, public=True, ): """Send notification to inform observers about the affected resource. This method doesn't raise an exception when sending the notification fails. :param operation: operation being performed (created, updated, or deleted) :param resource_type: type of resource being operated on :param resource_id: ID of resource being operated on :param initiator: representation of the user that created the request :param actor_dict: a dictionary containing the actor's ID and type :param public: if True (default), the event will be sent to the notifier API. if False, the event will only be sent via notify_event_callbacks to in process listeners. """ payload = {'resource_info': resource_id} if actor_dict: payload['actor_id'] = actor_dict['id'] payload['actor_type'] = actor_dict['type'] payload['actor_operation'] = actor_dict['actor_operation'] if initiator: payload['request_id'] = initiator.request_id global_request_id = getattr(initiator, 'global_request_id', None) if global_request_id: payload['global_request_id'] = global_request_id notify_event_callbacks(SERVICE, resource_type, operation, payload) # Only send this notification if the 'basic' format is used, otherwise # let the CADF functions handle sending the notification. But we check # here so as to not disrupt the notify_event_callbacks function. if public and CONF.notification_format == 'basic': notifier = _get_notifier() if notifier: context = {} event_type = '{service}.{resource_type}.{operation}'.format( service=SERVICE, resource_type=resource_type, operation=operation, ) if _check_notification_opt_out(event_type, outcome=None): return try: notifier.info(context, event_type, payload) except Exception: LOG.exception( 'Failed to send %(res_id)s %(event_type)s notification', {'res_id': resource_id, 'event_type': event_type}, ) def _get_request_audit_info(context, user_id=None): """Collect audit information about the request used for CADF. :param context: Request context :param user_id: Optional user ID, alternatively collected from context :returns: Auditing data about the request :rtype: :class:`pycadf.Resource` """ remote_addr = None http_user_agent = None project_id = None domain_id = None if context and 'environment' in context and context['environment']: environment = context['environment'] remote_addr = environment.get('REMOTE_ADDR') http_user_agent = environment.get('HTTP_USER_AGENT') if not user_id: user_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get( 'user_id' ) project_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get( 'project_id' ) domain_id = environment.get('KEYSTONE_AUTH_CONTEXT', {}).get( 'domain_id' ) host = pycadf.host.Host(address=remote_addr, agent=http_user_agent) initiator = resource.Resource(typeURI=taxonomy.ACCOUNT_USER, host=host) if user_id: initiator.user_id = user_id initiator.id = utils.resource_uuid(user_id) initiator = _add_username_to_initiator(initiator) if project_id: initiator.project_id = project_id if domain_id: initiator.domain_id = domain_id return initiator class CadfNotificationWrapper: """Send CADF event notifications for various methods. This function is only used for Authentication events. Its ``action`` and ``event_type`` are dictated below. - action: ``authenticate`` - event_type: ``identity.authenticate`` Sends CADF notifications for events such as whether an authentication was successful or not. :param operation: The authentication related action being performed """ def __init__(self, operation): self.action = operation self.event_type = f'{SERVICE}.{operation}' def __call__(self, f): @functools.wraps(f) def wrapper(wrapped_self, user_id, *args, **kwargs): """Will always send a notification.""" target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) initiator = build_audit_initiator() initiator.user_id = user_id initiator = _add_username_to_initiator(initiator) initiator.id = utils.resource_uuid(user_id) try: result = f(wrapped_self, user_id, *args, **kwargs) except (exception.AccountLocked, exception.PasswordExpired) as ex: # Send a CADF event with a reason for PCI-DSS related # authentication failures audit_reason = reason.Reason(str(ex), str(ex.code)) _send_audit_notification( self.action, initiator, taxonomy.OUTCOME_FAILURE, target, self.event_type, reason=audit_reason, ) if isinstance(ex, exception.AccountLocked): raise exception.Unauthorized raise except Exception: # For authentication failure send a CADF event as well _send_audit_notification( self.action, initiator, taxonomy.OUTCOME_FAILURE, target, self.event_type, ) raise else: _send_audit_notification( self.action, initiator, taxonomy.OUTCOME_SUCCESS, target, self.event_type, ) return result return wrapper class CadfRoleAssignmentNotificationWrapper: """Send CADF notifications for ``role_assignment`` methods. This function is only used for role assignment events. Its ``action`` and ``event_type`` are dictated below. - action: ``created.role_assignment`` or ``deleted.role_assignment`` - event_type: ``identity.role_assignment.created`` or ``identity.role_assignment.deleted`` Sends a CADF notification if the wrapped method does not raise an :class:`Exception` (such as :class:`keystone.exception.NotFound`). :param operation: one of the values from ACTIONS (created or deleted) """ ROLE_ASSIGNMENT = 'role_assignment' def __init__(self, operation): self.action = f'{operation}.{self.ROLE_ASSIGNMENT}' self.event_type = '{}.{}.{}'.format( SERVICE, self.ROLE_ASSIGNMENT, operation, ) def __call__(self, f): @functools.wraps(f) def wrapper(wrapped_self, role_id, *args, **kwargs): """Send a notification if the wrapped callable is successful. NOTE(stevemar): The reason we go through checking kwargs and args for possible target and actor values is because the create_grant() (and delete_grant()) method are called differently in various tests. Using named arguments, i.e.:: create_grant(user_id=user['id'], domain_id=domain['id'], role_id=role['id']) Or, using positional arguments, i.e.:: create_grant(role_id['id'], user['id'], None, domain_id=domain['id'], None) Or, both, i.e.:: create_grant(role_id['id'], user_id=user['id'], domain_id=domain['id']) Checking the values for kwargs is easy enough, since it comes in as a dictionary The actual method signature is :: create_grant(role_id, user_id=None, group_id=None, domain_id=None, project_id=None, inherited_to_projects=False) So, if the values of actor or target are still None after checking kwargs, we can check the positional arguments, based on the method signature. """ call_args = inspect.getcallargs( f, wrapped_self, role_id, *args, **kwargs ) inherited = call_args['inherited_to_projects'] initiator = call_args.get('initiator', None) target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) audit_kwargs = {} if call_args['project_id']: audit_kwargs['project'] = call_args['project_id'] elif call_args['domain_id']: audit_kwargs['domain'] = call_args['domain_id'] if call_args['user_id']: audit_kwargs['user'] = call_args['user_id'] elif call_args['group_id']: audit_kwargs['group'] = call_args['group_id'] audit_kwargs['inherited_to_projects'] = inherited audit_kwargs['role'] = role_id try: result = f(wrapped_self, role_id, *args, **kwargs) except Exception: _send_audit_notification( self.action, initiator, taxonomy.OUTCOME_FAILURE, target, self.event_type, **audit_kwargs, ) raise else: _send_audit_notification( self.action, initiator, taxonomy.OUTCOME_SUCCESS, target, self.event_type, **audit_kwargs, ) return result return wrapper def send_saml_audit_notification( action, user_id, group_ids, identity_provider, protocol, token_id, outcome ): """Send notification to inform observers about SAML events. :param action: Action being audited :type action: str :param user_id: User ID from Keystone token :type user_id: str :param group_ids: List of Group IDs from Keystone token :type group_ids: list :param identity_provider: ID of the IdP from the Keystone token :type identity_provider: str or None :param protocol: Protocol ID for IdP from the Keystone token :type protocol: str :param token_id: audit_id from Keystone token :type token_id: str or None :param outcome: One of :class:`pycadf.cadftaxonomy` :type outcome: str """ initiator = build_audit_initiator() target = resource.Resource(typeURI=taxonomy.ACCOUNT_USER) audit_type = SAML_AUDIT_TYPE user_id = user_id or taxonomy.UNKNOWN token_id = token_id or taxonomy.UNKNOWN group_ids = group_ids or [] cred = credential.FederatedCredential( token=token_id, type=audit_type, identity_provider=identity_provider, user=user_id, groups=group_ids, ) initiator.credential = cred event_type = f'{SERVICE}.{action}' _send_audit_notification(action, initiator, outcome, target, event_type) class _CatalogHelperObj(provider_api.ProviderAPIMixin): """A helper object to allow lookups of identity service id.""" def _send_audit_notification( action, initiator, outcome, target, event_type, reason=None, **kwargs ): """Send CADF notification to inform observers about the affected resource. This method logs an exception when sending the notification fails. :param action: CADF action being audited (e.g., 'authenticate') :param initiator: CADF resource representing the initiator :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING, taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE) :param target: CADF resource representing the target :param event_type: An OpenStack-ism, typically this is the meter name that Ceilometer uses to poll events. :param kwargs: Any additional arguments passed in will be added as key-value pairs to the CADF event. :param reason: Reason for the notification which contains the response code and message description """ if _check_notification_opt_out(event_type, outcome): return global _CATALOG_HELPER_OBJ if _CATALOG_HELPER_OBJ is None: _CATALOG_HELPER_OBJ = _CatalogHelperObj() service_list = _CATALOG_HELPER_OBJ.catalog_api.list_services() service_id = None for i in service_list: if i['type'] == SERVICE: service_id = i['id'] break initiator = _add_username_to_initiator(initiator) event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, reason=reason, observer=resource.Resource(typeURI=taxonomy.SERVICE_SECURITY), ) if service_id is not None: event.observer.id = service_id for key, value in kwargs.items(): setattr(event, key, value) context = {} payload = event.as_dict() notifier = _get_notifier() if notifier: try: notifier.info(context, event_type, payload) except Exception: # diaper defense: any exception that occurs while emitting the # notification should not interfere with the API request LOG.exception( 'Failed to send %(action)s %(event_type)s notification', {'action': action, 'event_type': event_type}, ) def _check_notification_opt_out(event_type, outcome): """Check if a particular event_type has been opted-out of. This method checks to see if an event should be sent to the messaging service. Any event specified in the opt-out list will not be transmitted. :param event_type: This is the meter name that Ceilometer uses to poll events. For example: identity.user.created, or identity.authenticate.success, or identity.role_assignment.created :param outcome: The CADF outcome (taxonomy.OUTCOME_PENDING, taxonomy.OUTCOME_SUCCESS, taxonomy.OUTCOME_FAILURE) """ # NOTE(stevemar): Special handling for authenticate, we look at the outcome # as well when evaluating. For authN events, event_type is just # identity.authenticate, which isn't fine enough to provide any opt-out # value, so we attach the outcome to re-create the meter name used in # ceilometer. if 'authenticate' in event_type: event_type = event_type + "." + outcome if event_type in CONF.notification_opt_out: return True return False def _add_username_to_initiator(initiator): """Add the username to the initiator if missing.""" if hasattr(initiator, 'username'): return initiator try: user_ref = PROVIDERS.identity_api.get_user(initiator.user_id) initiator.username = user_ref['name'] except (exception.UserNotFound, AttributeError): # Either user not found or no user_id, move along pass return initiator emit_event = CadfNotificationWrapper role_assignment = CadfRoleAssignmentNotificationWrapper ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/oauth1/0000775000175000017500000000000000000000000016525 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth1/__init__.py0000664000175000017500000000116500000000000020641 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.oauth1.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/oauth1/backends/0000775000175000017500000000000000000000000020277 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth1/backends/__init__.py0000664000175000017500000000000000000000000022376 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth1/backends/base.py0000664000175000017500000001477000000000000021574 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import string from keystone import exception # The characters used to generate verifiers are limited to alphanumerical # values for ease of manual entry. Commonly confused characters are omitted. VERIFIER_CHARS = string.ascii_letters + string.digits CONFUSED_CHARS = 'jiIl1oO0' VERIFIER_CHARS = ''.join(c for c in VERIFIER_CHARS if c not in CONFUSED_CHARS) def filter_token(access_token_ref): """Filter out private items in an access token dict. 'access_secret' is never returned. :returns: access_token_ref """ if access_token_ref: access_token_ref = access_token_ref.copy() access_token_ref.pop('access_secret', None) return access_token_ref def filter_consumer(consumer_ref): """Filter out private items in a consumer dict. 'secret' is never returned. :returns: consumer_ref """ if consumer_ref: consumer_ref = consumer_ref.copy() consumer_ref.pop('secret', None) return consumer_ref class Oauth1DriverBase(metaclass=abc.ABCMeta): """Interface description for an OAuth1 driver.""" @abc.abstractmethod def create_consumer(self, consumer_ref): """Create consumer. :param consumer_ref: consumer ref with consumer name :type consumer_ref: dict :returns: consumer_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_consumer(self, consumer_id, consumer_ref): """Update consumer. :param consumer_id: id of consumer to update :type consumer_id: string :param consumer_ref: new consumer ref with consumer name :type consumer_ref: dict :returns: consumer_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_consumers(self): """List consumers. :returns: list of consumers """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_consumer(self, consumer_id): """Get consumer, returns the consumer id (key) and description. :param consumer_id: id of consumer to get :type consumer_id: string :returns: consumer_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_consumer_with_secret(self, consumer_id): """Like get_consumer(), but also returns consumer secret. Returned dictionary consumer_ref includes consumer secret. Secrets should only be shared upon consumer creation; the consumer secret is required to verify incoming OAuth requests. :param consumer_id: id of consumer to get :type consumer_id: string :returns: consumer_ref containing consumer secret """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_consumer(self, consumer_id): """Delete consumer. :param consumer_id: id of consumer to get :type consumer_id: string :returns: None. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_access_tokens(self, user_id): """List access tokens. :param user_id: search for access tokens authorized by given user id :type user_id: string :returns: list of access tokens the user has authorized """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_access_token(self, user_id, access_token_id): """Delete access token. :param user_id: authorizing user id :type user_id: string :param access_token_id: access token to delete :type access_token_id: string :returns: None """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_request_token( self, consumer_id, requested_project, request_token_duration ): """Create request token. :param consumer_id: the id of the consumer :type consumer_id: string :param requested_project_id: requested project id :type requested_project_id: string :param request_token_duration: duration of request token :type request_token_duration: string :returns: request_token_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_request_token(self, request_token_id): """Get request token. :param request_token_id: the id of the request token :type request_token_id: string :returns: request_token_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_access_token(self, access_token_id): """Get access token. :param access_token_id: the id of the access token :type access_token_id: string :returns: access_token_ref """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def authorize_request_token(self, request_token_id, user_id, role_ids): """Authorize request token. :param request_token_id: the id of the request token, to be authorized :type request_token_id: string :param user_id: the id of the authorizing user :type user_id: string :param role_ids: list of role ids to authorize :type role_ids: list :returns: verifier """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_access_token(self, request_id, access_token_duration): """Create access token. :param request_id: the id of the request token, to be deleted :type request_id: string :param access_token_duration: duration of an access token :type access_token_duration: string :returns: access_token_ref """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth1/backends/sql.py0000664000175000017500000002447400000000000021463 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import random as _random import uuid from oslo_serialization import jsonutils from oslo_utils import timeutils from keystone.common import sql from keystone.common import utils from keystone import exception from keystone.i18n import _ from keystone.oauth1.backends import base random = _random.SystemRandom() class Consumer(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'consumer' attributes = ['id', 'description', 'secret'] id = sql.Column(sql.String(64), primary_key=True, nullable=False) description = sql.Column(sql.String(64), nullable=True) secret = sql.Column(sql.String(64), nullable=False) extra = sql.Column(sql.JsonBlob(), nullable=False) class RequestToken(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'request_token' attributes = [ 'id', 'request_secret', 'verifier', 'authorizing_user_id', 'requested_project_id', 'role_ids', 'consumer_id', 'expires_at', ] id = sql.Column(sql.String(64), primary_key=True, nullable=False) request_secret = sql.Column(sql.String(64), nullable=False) verifier = sql.Column(sql.String(64), nullable=True) authorizing_user_id = sql.Column(sql.String(64), nullable=True) requested_project_id = sql.Column(sql.String(64), nullable=False) role_ids = sql.Column(sql.Text(), nullable=True) consumer_id = sql.Column( sql.String(64), sql.ForeignKey('consumer.id'), nullable=False, index=True, ) expires_at = sql.Column(sql.String(64), nullable=True) @classmethod def from_dict(cls, user_dict): return cls(**user_dict) def to_dict(self): return dict(self.items()) class AccessToken(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'access_token' attributes = [ 'id', 'access_secret', 'authorizing_user_id', 'project_id', 'role_ids', 'consumer_id', 'expires_at', ] id = sql.Column(sql.String(64), primary_key=True, nullable=False) access_secret = sql.Column(sql.String(64), nullable=False) authorizing_user_id = sql.Column( sql.String(64), nullable=False, index=True ) project_id = sql.Column(sql.String(64), nullable=False) role_ids = sql.Column(sql.Text(), nullable=False) consumer_id = sql.Column( sql.String(64), sql.ForeignKey('consumer.id'), nullable=False ) expires_at = sql.Column(sql.String(64), nullable=True) @classmethod def from_dict(cls, user_dict): return cls(**user_dict) def to_dict(self): return dict(self.items()) class OAuth1(base.Oauth1DriverBase): def _get_consumer(self, session, consumer_id): consumer_ref = session.get(Consumer, consumer_id) if consumer_ref is None: raise exception.NotFound(_('Consumer not found')) return consumer_ref def get_consumer_with_secret(self, consumer_id): with sql.session_for_read() as session: consumer_ref = self._get_consumer(session, consumer_id) return consumer_ref.to_dict() def get_consumer(self, consumer_id): return base.filter_consumer(self.get_consumer_with_secret(consumer_id)) def create_consumer(self, consumer_ref): with sql.session_for_write() as session: consumer = Consumer.from_dict(consumer_ref) session.add(consumer) return consumer.to_dict() def _delete_consumer(self, session, consumer_id): consumer_ref = self._get_consumer(session, consumer_id) session.delete(consumer_ref) def _delete_request_tokens(self, session, consumer_id): q = session.query(RequestToken) req_tokens = q.filter_by(consumer_id=consumer_id) req_tokens_list = {x.id for x in req_tokens} for token_id in req_tokens_list: token_ref = self._get_request_token(session, token_id) session.delete(token_ref) def _delete_access_tokens(self, session, consumer_id): q = session.query(AccessToken) acc_tokens = q.filter_by(consumer_id=consumer_id) acc_tokens_list = {x.id for x in acc_tokens} for token_id in acc_tokens_list: token_ref = self._get_access_token(session, token_id) session.delete(token_ref) def delete_consumer(self, consumer_id): with sql.session_for_write() as session: self._delete_request_tokens(session, consumer_id) self._delete_access_tokens(session, consumer_id) self._delete_consumer(session, consumer_id) def list_consumers(self): with sql.session_for_read() as session: cons = session.query(Consumer) return [base.filter_consumer(x.to_dict()) for x in cons] def update_consumer(self, consumer_id, consumer_ref): with sql.session_for_write() as session: consumer = self._get_consumer(session, consumer_id) old_consumer_dict = consumer.to_dict() old_consumer_dict.update(consumer_ref) new_consumer = Consumer.from_dict(old_consumer_dict) consumer.description = new_consumer.description consumer.extra = new_consumer.extra return base.filter_consumer(consumer.to_dict()) def create_request_token( self, consumer_id, requested_project, request_token_duration ): request_token_id = uuid.uuid4().hex request_token_secret = uuid.uuid4().hex expiry_date = None if request_token_duration > 0: now = timeutils.utcnow() future = now + datetime.timedelta(seconds=request_token_duration) expiry_date = utils.isotime(future, subsecond=True) ref = {} ref['id'] = request_token_id ref['request_secret'] = request_token_secret ref['verifier'] = None ref['authorizing_user_id'] = None ref['requested_project_id'] = requested_project ref['role_ids'] = None ref['consumer_id'] = consumer_id ref['expires_at'] = expiry_date with sql.session_for_write() as session: token_ref = RequestToken.from_dict(ref) session.add(token_ref) return token_ref.to_dict() def _get_request_token(self, session, request_token_id): token_ref = session.get(RequestToken, request_token_id) if token_ref is None: raise exception.NotFound(_('Request token not found')) return token_ref def get_request_token(self, request_token_id): with sql.session_for_read() as session: token_ref = self._get_request_token(session, request_token_id) return token_ref.to_dict() def authorize_request_token(self, request_token_id, user_id, role_ids): with sql.session_for_write() as session: token_ref = self._get_request_token(session, request_token_id) token_dict = token_ref.to_dict() token_dict['authorizing_user_id'] = user_id token_dict['verifier'] = ''.join( random.sample(base.VERIFIER_CHARS, 8) ) token_dict['role_ids'] = jsonutils.dumps(role_ids) new_token = RequestToken.from_dict(token_dict) for attr in RequestToken.attributes: if attr in ['authorizing_user_id', 'verifier', 'role_ids']: setattr(token_ref, attr, getattr(new_token, attr)) return token_ref.to_dict() def create_access_token(self, request_id, access_token_duration): access_token_id = uuid.uuid4().hex access_token_secret = uuid.uuid4().hex with sql.session_for_write() as session: req_token_ref = self._get_request_token(session, request_id) token_dict = req_token_ref.to_dict() expiry_date = None if access_token_duration > 0: now = timeutils.utcnow() future = now + datetime.timedelta( seconds=access_token_duration ) expiry_date = utils.isotime(future, subsecond=True) # add Access Token ref = {} ref['id'] = access_token_id ref['access_secret'] = access_token_secret ref['authorizing_user_id'] = token_dict['authorizing_user_id'] ref['project_id'] = token_dict['requested_project_id'] ref['role_ids'] = token_dict['role_ids'] ref['consumer_id'] = token_dict['consumer_id'] ref['expires_at'] = expiry_date token_ref = AccessToken.from_dict(ref) session.add(token_ref) # remove request token, it's been used session.delete(req_token_ref) return token_ref.to_dict() def _get_access_token(self, session, access_token_id): token_ref = session.get(AccessToken, access_token_id) if token_ref is None: raise exception.NotFound(_('Access token not found')) return token_ref def get_access_token(self, access_token_id): with sql.session_for_read() as session: token_ref = self._get_access_token(session, access_token_id) return token_ref.to_dict() def list_access_tokens(self, user_id): with sql.session_for_read() as session: q = session.query(AccessToken) user_auths = q.filter_by(authorizing_user_id=user_id) return [base.filter_token(x.to_dict()) for x in user_auths] def delete_access_token(self, user_id, access_token_id): with sql.session_for_write() as session: token_ref = self._get_access_token(session, access_token_id) token_dict = token_ref.to_dict() if token_dict['authorizing_user_id'] != user_id: raise exception.Unauthorized(_('User IDs do not match')) session.delete(token_ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth1/core.py0000664000175000017500000001354300000000000020035 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the OAuth1 service.""" import uuid import oauthlib.common from oauthlib import oauth1 from oslo_log import log from keystone.common import manager import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications RequestValidator = oauth1.RequestValidator Client = oauth1.Client AccessTokenEndpoint = oauth1.AccessTokenEndpoint ResourceEndpoint = oauth1.ResourceEndpoint AuthorizationEndpoint = oauth1.AuthorizationEndpoint SIG_HMAC = oauth1.SIGNATURE_HMAC RequestTokenEndpoint = oauth1.RequestTokenEndpoint oRequest = oauthlib.common.Request class Token: def __init__(self, key, secret): self.key = key self.secret = secret self.verifier = None def set_verifier(self, verifier): self.verifier = verifier CONF = keystone.conf.CONF LOG = log.getLogger(__name__) def token_generator(*args, **kwargs): return uuid.uuid4().hex def get_oauth_headers(headers): parameters = {} # The incoming headers variable is your usual heading from context # In an OAuth signed req, where the oauth variables are in the header, # they with the key 'Authorization'. if headers and 'Authorization' in headers: # A typical value for Authorization is seen below # 'OAuth realm="", oauth_body_hash="2jm%3D", oauth_nonce="14475435" # along with other oauth variables, the 'OAuth ' part is trimmed # to split the rest of the headers. auth_header = headers['Authorization'] params = oauth1.rfc5849.utils.parse_authorization_header(auth_header) parameters.update(dict(params)) return parameters else: msg = 'Cannot retrieve Authorization headers' LOG.error(msg) raise exception.OAuthHeadersMissingError() def validate_oauth_params(query_string): # Invalid request would end up with the body like below: # 'error=invalid_request&description=missing+resource+owner+key' # Log this detail message so that we will know where is the # validation failed. params = oauthlib.common.extract_params(query_string) params_fitered = {k: v for k, v in params if not k.startswith('oauth_')} if params_fitered: if 'error' in params_fitered: msg = ( 'Validation failed with errors: %(error)s, detail ' 'message is: %(desc)s.' ) % { 'error': params_fitered['error'], 'desc': params_fitered['error_description'], } tr_msg = _( 'Validation failed with errors: %(error)s, detail ' 'message is: %(desc)s.' ) % { 'error': params_fitered['error'], 'desc': params_fitered['error_description'], } else: msg = ( 'Unknown parameters found,' 'please provide only oauth parameters.' ) tr_msg = _( 'Unknown parameters found,' 'please provide only oauth parameters.' ) LOG.warning(msg) raise exception.ValidationError(message=tr_msg) class Manager(manager.Manager): """Default pivot point for the OAuth1 backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.oauth1' _provides_api = 'oauth_api' _ACCESS_TOKEN = "OS-OAUTH1:access_token" # nosec _REQUEST_TOKEN = "OS-OAUTH1:request_token" # nosec _CONSUMER = "OS-OAUTH1:consumer" def __init__(self): super().__init__(CONF.oauth1.driver) def create_consumer(self, consumer_ref, initiator=None): consumer_ref = consumer_ref.copy() consumer_ref['secret'] = uuid.uuid4().hex ret = self.driver.create_consumer(consumer_ref) notifications.Audit.created(self._CONSUMER, ret['id'], initiator) return ret def update_consumer(self, consumer_id, consumer_ref, initiator=None): ret = self.driver.update_consumer(consumer_id, consumer_ref) notifications.Audit.updated(self._CONSUMER, consumer_id, initiator) return ret def delete_consumer(self, consumer_id, initiator=None): ret = self.driver.delete_consumer(consumer_id) notifications.Audit.deleted(self._CONSUMER, consumer_id, initiator) return ret def create_access_token( self, request_id, access_token_duration, initiator=None ): ret = self.driver.create_access_token( request_id, access_token_duration ) notifications.Audit.created(self._ACCESS_TOKEN, ret['id'], initiator) return ret def delete_access_token(self, user_id, access_token_id, initiator=None): ret = self.driver.delete_access_token(user_id, access_token_id) notifications.Audit.deleted( self._ACCESS_TOKEN, access_token_id, initiator ) return ret def create_request_token( self, consumer_id, requested_project, request_token_duration, initiator=None, ): ret = self.driver.create_request_token( consumer_id, requested_project, request_token_duration ) notifications.Audit.created(self._REQUEST_TOKEN, ret['id'], initiator) return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth1/schema.py0000664000175000017500000000250300000000000020337 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _consumer_properties = { 'description': validation.nullable(parameter_types.description) } consumer_create = { 'type': 'object', 'properties': _consumer_properties, 'additionalProperties': True, } consumer_update = { 'type': 'object', 'properties': _consumer_properties, 'not': {'required': ['secret']}, 'minProperties': 1, 'additionalProperties': True, } request_token_authorize = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': parameter_types.id_string, 'name': parameter_types.name, }, 'minProperties': 1, 'maxProperties': 1, 'additionalProperties': False, }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth1/validator.py0000664000175000017500000002071000000000000021064 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oAuthlib request validator.""" from keystone.common import provider_api from keystone import exception from keystone.oauth1.backends import base from keystone.oauth1 import core as oauth1 METHOD_NAME = 'oauth_validator' PROVIDERS = provider_api.ProviderAPIs class OAuthValidator(provider_api.ProviderAPIMixin, oauth1.RequestValidator): # TODO(mhu) set as option probably? @property def enforce_ssl(self): return False @property def safe_characters(self): # oauth tokens are generated from a uuid hex value return set("abcdef0123456789") def _check_token(self, token): # generic token verification when they're obtained from a uuid hex return set(token) <= self.safe_characters and len(token) == 32 def check_client_key(self, client_key): return self._check_token(client_key) def check_request_token(self, request_token): return self._check_token(request_token) def check_access_token(self, access_token): return self._check_token(access_token) def check_nonce(self, nonce): # Assuming length is not a concern return set(nonce) <= self.safe_characters def check_verifier(self, verifier): return ( all(i in base.VERIFIER_CHARS for i in verifier) and len(verifier) == 8 ) def get_client_secret(self, client_key, request): client = PROVIDERS.oauth_api.get_consumer_with_secret(client_key) return client['secret'] def get_request_token_secret(self, client_key, token, request): token_ref = PROVIDERS.oauth_api.get_request_token(token) return token_ref['request_secret'] def get_access_token_secret(self, client_key, token, request): access_token = PROVIDERS.oauth_api.get_access_token(token) return access_token['access_secret'] def get_default_realms(self, client_key, request): # realms weren't implemented with the previous library return [] def get_realms(self, token, request): return [] def get_redirect_uri(self, token, request): # OOB (out of band) is supposed to be the default value to use return 'oob' def get_rsa_key(self, client_key, request): # HMAC signing is used, so return a dummy value return '' def invalidate_request_token(self, client_key, request_token, request): """Invalidate a used request token. :param client_key: The client/consumer key. :param request_token: The request token string. :param request: An oauthlib.common.Request object. :returns: None Per `Section 2.3`_ of the spec: "The server MUST (...) ensure that the temporary credentials have not expired or been used before." .. _`Section 2.3`: https://tools.ietf.org/html/rfc5849#section-2.3 This method should ensure that provided token won't validate anymore. It can be simply removing RequestToken from storage or setting specific flag that makes it invalid (note that such flag should be also validated during request token validation). This method is used by * AccessTokenEndpoint """ # FIXME(lbragstad): Remove the above documentation string once # https://bugs.launchpad.net/keystone/+bug/1778603 is resolved. It is # being duplicated here to work around oauthlib compatibility issues # with Sphinx 1.7.5, which have been reported upstream in # https://github.com/oauthlib/oauthlib/issues/558. # this method is invoked when an access token is generated out of a # request token, to make sure that request token cannot be consumed # anymore. This is done in the backend, so we do nothing here. pass def validate_client_key(self, client_key, request): try: return PROVIDERS.oauth_api.get_consumer(client_key) is not None except exception.NotFound: return False def validate_request_token(self, client_key, token, request): try: req_token = PROVIDERS.oauth_api.get_request_token(token) if req_token: return req_token['consumer_id'] == client_key else: return False except exception.NotFound: return False def validate_access_token(self, client_key, token, request): try: return PROVIDERS.oauth_api.get_access_token(token) is not None except exception.NotFound: return False def validate_timestamp_and_nonce( self, client_key, timestamp, nonce, request, request_token=None, access_token=None, ): return True def validate_redirect_uri(self, client_key, redirect_uri, request): # we expect OOB, we don't really care return True def validate_requested_realms(self, client_key, realms, request): # realms are not used return True def validate_realms( self, client_key, token, request, uri=None, realms=None ): return True def validate_verifier(self, client_key, token, verifier, request): try: req_token = PROVIDERS.oauth_api.get_request_token(token) return req_token['verifier'] == verifier except exception.NotFound: return False def verify_request_token(self, token, request): # there aren't strong expectations on the request token format return isinstance(token, str) def verify_realms(self, token, realms, request): return True # The following save_XXX methods are called to create tokens. I chose to # keep the original logic, but the comments below show how that could be # implemented. The real implementation logic is in the backend. def save_access_token(self, token, request): pass # token_duration = CONF.oauth1.request_token_duration # request_token_id = request.client_key # self.oauth_api.create_access_token(request_token_id, # token_duration, # token["oauth_token"], # token["oauth_token_secret"]) def save_request_token(self, token, request): pass # project_id = request.headers.get('Requested-Project-Id') # token_duration = CONF.oauth1.request_token_duration # self.oauth_api.create_request_token(request.client_key, # project_id, # token_duration, # token["oauth_token"], # token["oauth_token_secret"]) def save_verifier(self, token, verifier, request): """Associate an authorization verifier with a request token. :param token: A request token string. :param verifier: A dictionary containing the oauth_verifier and oauth_token :param request: An oauthlib.common.Request object. We need to associate verifiers with tokens for validation during the access token request. Note that unlike save_x_token token here is the ``oauth_token`` token string from the request token saved previously. This method is used by * AuthorizationEndpoint """ # FIXME(lbragstad): Remove the above documentation string once # https://bugs.launchpad.net/keystone/+bug/1778603 is resolved. It is # being duplicated here to work around oauthlib compatibility issues # with Sphinx 1.7.5, which have been reported upstream in # https://github.com/oauthlib/oauthlib/issues/558. # keep the old logic for this, as it is done in two steps and requires # information that the request validator has no access to pass ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/oauth2/0000775000175000017500000000000000000000000016526 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth2/__init__.py0000664000175000017500000000000000000000000020625 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/oauth2/handlers.py0000664000175000017500000000206000000000000020676 0ustar00zuulzuul00000000000000# Copyright 2022 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import flask from keystone.server import flask as ks_flask def build_response(error): response = flask.make_response( ( { 'error': error.error_title, 'error_description': error.message_format, }, f"{error.code} {error.title}", ) ) if error.code == 401: response.headers['WWW-Authenticate'] = ( 'Keystone uri="%s"' % ks_flask.base_url() ) return response ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/policy/0000775000175000017500000000000000000000000016623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/policy/__init__.py0000664000175000017500000000116500000000000020737 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.policy.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/policy/backends/0000775000175000017500000000000000000000000020375 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/policy/backends/__init__.py0000664000175000017500000000000000000000000022474 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/policy/backends/base.py0000664000175000017500000000437300000000000021670 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import keystone.conf from keystone import exception CONF = keystone.conf.CONF class PolicyDriverBase(metaclass=abc.ABCMeta): def _get_list_limit(self): return CONF.policy.list_limit or CONF.list_limit @abc.abstractmethod def enforce(self, context, credentials, action, target): """Verify that a user is authorized to perform action. For more information on a full implementation of this see: `keystone.policy.backends.rules.Policy.enforce` """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def create_policy(self, policy_id, policy): """Store a policy blob. :raises keystone.exception.Conflict: If a duplicate policy exists. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_policies(self): """List all policies.""" raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_policy(self, policy_id): """Retrieve a specific policy blob. :raises keystone.exception.PolicyNotFound: If the policy doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_policy(self, policy_id, policy): """Update a policy blob. :raises keystone.exception.PolicyNotFound: If the policy doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_policy(self, policy_id): """Remove a policy blob. :raises keystone.exception.PolicyNotFound: If the policy doesn't exist. """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/policy/backends/rules.py0000664000175000017500000000277400000000000022113 0ustar00zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy engine for keystone.""" from oslo_log import log from keystone.common.rbac_enforcer import policy from keystone import exception from keystone.policy.backends import base LOG = log.getLogger(__name__) class Policy(base.PolicyDriverBase): def enforce(self, credentials, action, target): msg = 'enforce %(action)s: %(credentials)s' LOG.debug(msg, {'action': action, 'credentials': credentials}) policy.enforce(credentials, action, target) def create_policy(self, policy_id, policy): raise exception.NotImplemented() def list_policies(self): raise exception.NotImplemented() def get_policy(self, policy_id): raise exception.NotImplemented() def update_policy(self, policy_id, policy): raise exception.NotImplemented() def delete_policy(self, policy_id): raise exception.NotImplemented() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/policy/backends/sql.py0000664000175000017500000000503500000000000021551 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone import exception from keystone.policy.backends import rules class PolicyModel(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'policy' attributes = ['id', 'blob', 'type'] id = sql.Column(sql.String(64), primary_key=True) blob = sql.Column(sql.JsonBlob(), nullable=False) type = sql.Column(sql.String(255), nullable=False) extra = sql.Column(sql.JsonBlob()) class Policy(rules.Policy): @sql.handle_conflicts(conflict_type='policy') def create_policy(self, policy_id, policy): with sql.session_for_write() as session: ref = PolicyModel.from_dict(policy) session.add(ref) return ref.to_dict() def list_policies(self): with sql.session_for_read() as session: refs = session.query(PolicyModel).all() return [ref.to_dict() for ref in refs] def _get_policy(self, session, policy_id): """Private method to get a policy model object (NOT a dictionary).""" ref = session.get(PolicyModel, policy_id) if not ref: raise exception.PolicyNotFound(policy_id=policy_id) return ref def get_policy(self, policy_id): with sql.session_for_read() as session: return self._get_policy(session, policy_id).to_dict() @sql.handle_conflicts(conflict_type='policy') def update_policy(self, policy_id, policy): with sql.session_for_write() as session: ref = self._get_policy(session, policy_id) old_dict = ref.to_dict() old_dict.update(policy) new_policy = PolicyModel.from_dict(old_dict) ref.blob = new_policy.blob ref.type = new_policy.type ref.extra = new_policy.extra return ref.to_dict() def delete_policy(self, policy_id): with sql.session_for_write() as session: ref = self._get_policy(session, policy_id) session.delete(ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/policy/core.py0000664000175000017500000000436400000000000020134 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Policy service.""" from keystone.common import manager import keystone.conf from keystone import exception from keystone import notifications CONF = keystone.conf.CONF class Manager(manager.Manager): """Default pivot point for the Policy backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.policy' _provides_api = 'policy_api' _POLICY = 'policy' def __init__(self): super().__init__(CONF.policy.driver) def create_policy(self, policy_id, policy, initiator=None): ref = self.driver.create_policy(policy_id, policy) notifications.Audit.created(self._POLICY, policy_id, initiator) return ref def get_policy(self, policy_id): return self.driver.get_policy(policy_id) def update_policy(self, policy_id, policy, initiator=None): if 'id' in policy and policy_id != policy['id']: raise exception.ValidationError('Cannot change policy ID') ref = self.driver.update_policy(policy_id, policy) notifications.Audit.updated(self._POLICY, policy_id, initiator) return ref @manager.response_truncated def list_policies(self, hints=None): # NOTE(henry-nash): Since the advantage of filtering or list limiting # of policies at the driver level is minimal, we leave this to the # caller. return self.driver.list_policies() def delete_policy(self, policy_id, initiator=None): ret = self.driver.delete_policy(policy_id) notifications.Audit.deleted(self._POLICY, policy_id, initiator) return ret ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/policy/schema.py0000664000175000017500000000165100000000000020440 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. _policy_properties = { 'blob': {'type': 'string'}, 'type': {'type': 'string', 'maxLength': 255}, } policy_create = { 'type': 'object', 'properties': _policy_properties, 'required': ['blob', 'type'], 'additionalProperties': True, } policy_update = { 'type': 'object', 'properties': _policy_properties, 'minProperties': 1, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/receipt/0000775000175000017500000000000000000000000016757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/__init__.py0000664000175000017500000000120700000000000021070 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.receipt import provider __all__ = ("provider",) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/handlers.py0000664000175000017500000000471400000000000021137 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import flask from oslo_serialization import jsonutils from keystone.common import authorization from keystone.common import provider_api from keystone import exception PROVIDERS = provider_api.ProviderAPIs def extract_receipt(auth_context): receipt_id = flask.request.headers.get( authorization.AUTH_RECEIPT_HEADER, None ) if receipt_id: receipt = PROVIDERS.receipt_provider_api.validate_receipt(receipt_id) if auth_context['user_id'] != receipt.user_id: raise exception.ReceiptNotFound( "AuthContext user_id: %s does not match " "user_id for supplied auth receipt: %s" % (auth_context['user_id'], receipt.user_id), receipt_id=receipt_id, ) else: receipt = None return receipt def _render_receipt_response_from_model(receipt): receipt_reference = { 'receipt': { 'methods': receipt.methods, 'user': { 'id': receipt.user['id'], 'name': receipt.user['name'], 'domain': { 'id': receipt.user_domain['id'], 'name': receipt.user_domain['name'], }, }, 'expires_at': receipt.expires_at, 'issued_at': receipt.issued_at, }, 'required_auth_methods': receipt.required_methods, } return receipt_reference def build_receipt(mfa_error): receipt = PROVIDERS.receipt_provider_api.issue_receipt( mfa_error.user_id, mfa_error.methods ) resp_data = _render_receipt_response_from_model(receipt) resp_body = jsonutils.dumps(resp_data) response = flask.make_response(resp_body, http.client.UNAUTHORIZED) response.headers[authorization.AUTH_RECEIPT_HEADER] = receipt.id response.headers['Content-Type'] = 'application/json' return response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/provider.py0000664000175000017500000001406400000000000021170 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Receipt provider interface.""" import datetime from oslo_log import log from oslo_utils import timeutils from keystone.common import cache from keystone.common import manager from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.models import receipt_model from keystone import notifications CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs RECEIPTS_REGION = cache.create_region(name='receipts') MEMOIZE_RECEIPTS = cache.get_memoization_decorator( group='receipt', region=RECEIPTS_REGION ) def default_expire_time(): """Determine when a fresh receipt should expire. Expiration time varies based on configuration (see ``[receipt] expiration``). :returns: a naive UTC datetime.datetime object """ expire_delta = datetime.timedelta(seconds=CONF.receipt.expiration) expires_at = timeutils.utcnow() + expire_delta return expires_at.replace(microsecond=0) class Manager(manager.Manager): """Default pivot point for the receipt provider backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.receipt.provider' _provides_api = 'receipt_provider_api' def __init__(self): super().__init__(CONF.receipt.provider) self._register_callback_listeners() def _register_callback_listeners(self): callbacks = { notifications.ACTIONS.deleted: [ ['OS-TRUST:trust', self._drop_receipt_cache], ['user', self._drop_receipt_cache], ['domain', self._drop_receipt_cache], ], notifications.ACTIONS.disabled: [ ['user', self._drop_receipt_cache], ['domain', self._drop_receipt_cache], ['project', self._drop_receipt_cache], ], notifications.ACTIONS.internal: [ [ notifications.INVALIDATE_TOKEN_CACHE, self._drop_receipt_cache, ], ], } for event, cb_info in callbacks.items(): for resource_type, callback_fns in cb_info: notifications.register_event_callback( event, resource_type, callback_fns ) def _drop_receipt_cache(self, service, resource_type, operation, payload): """Invalidate the entire receipt cache. This is a handy private utility method that should be used when consuming notifications that signal invalidating the receipt cache. """ if CONF.receipt.cache_on_issue: RECEIPTS_REGION.invalidate() def validate_receipt(self, receipt_id, window_seconds=0): if not receipt_id: raise exception.ReceiptNotFound( _('No receipt in the request'), receipt_id=receipt_id ) try: receipt = self._validate_receipt(receipt_id) self._is_valid_receipt(receipt, window_seconds=window_seconds) return receipt except exception.Unauthorized as e: LOG.debug('Unable to validate receipt: %s', e) raise exception.ReceiptNotFound(receipt_id=receipt_id) @MEMOIZE_RECEIPTS def _validate_receipt(self, receipt_id): (user_id, methods, issued_at, expires_at) = ( self.driver.validate_receipt(receipt_id) ) receipt = receipt_model.ReceiptModel() receipt.user_id = user_id receipt.methods = methods receipt.expires_at = expires_at receipt.mint(receipt_id, issued_at) return receipt def _is_valid_receipt(self, receipt, window_seconds=0): """Verify the receipt is valid format and has not expired.""" current_time = timeutils.normalize_time(timeutils.utcnow()) try: expiry = timeutils.parse_isotime(receipt.expires_at) expiry = timeutils.normalize_time(expiry) # add a window in which you can fetch a receipt beyond expiry expiry += datetime.timedelta(seconds=window_seconds) except Exception: LOG.exception( 'Unexpected error or malformed receipt ' 'determining receipt expiry: %s', receipt, ) raise exception.ReceiptNotFound( _('Failed to validate receipt'), receipt_id=receipt.id ) if current_time < expiry: return None else: raise exception.ReceiptNotFound( _('Failed to validate receipt'), receipt_id=receipt.id ) def issue_receipt(self, user_id, method_names, expires_at=None): receipt = receipt_model.ReceiptModel() receipt.user_id = user_id receipt.methods = method_names if isinstance(expires_at, datetime.datetime): receipt.expires_at = utils.isotime(expires_at, subsecond=True) if isinstance(expires_at, str): receipt.expires_at = expires_at elif not expires_at: receipt.expires_at = utils.isotime( default_expire_time(), subsecond=True ) receipt_id, issued_at = self.driver.generate_id_and_issued_at(receipt) receipt.mint(receipt_id, issued_at) if CONF.receipt.cache_on_issue: self._validate_receipt.set(receipt, RECEIPTS_REGION, receipt_id) return receipt ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/receipt/providers/0000775000175000017500000000000000000000000020774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/providers/__init__.py0000664000175000017500000000000000000000000023073 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/providers/base.py0000664000175000017500000000370500000000000022265 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class Provider(metaclass=abc.ABCMeta): """Interface description for a Receipt provider.""" @abc.abstractmethod def validate_receipt(self, receipt_id): """Validate a given receipt by its ID and return the receipt_data. :param receipt_id: the unique ID of the receipt :type receipt_id: str :returns: receipt data as a tuple in the form of: (user_id, methods, issued_at, expires_at) ``user_id`` is the unique ID of the user as a string ``methods`` a list of authentication methods used to obtain the receipt ``issued_at`` a datetime object of when the receipt was minted ``expires_at`` a datetime object of when the receipt expires :raises keystone.exception.ReceiptNotFound: when receipt doesn't exist. """ @abc.abstractmethod def generate_id_and_issued_at(self, receipt): """Generate a receipt based on the information provided. :param receipt: A receipt object containing information about the authorization context of the request. :type receipt: `keystone.models.receipt.ReceiptModel` :returns: tuple containing an ID for the receipt and the issued at time of the receipt (receipt_id, issued_at). """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5301137 keystone-26.0.0/keystone/receipt/providers/fernet/0000775000175000017500000000000000000000000022257 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/providers/fernet/__init__.py0000664000175000017500000000123500000000000024371 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.receipt.providers.fernet.core import Provider __all__ = ("Provider",) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/providers/fernet/core.py0000664000175000017500000000512200000000000023561 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystone.common import utils as ks_utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.receipt.providers import base from keystone.receipt import receipt_formatters as tf CONF = keystone.conf.CONF class Provider(base.Provider): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # NOTE(lbragstad): We add these checks here because if the fernet # provider is going to be used and either the `key_repository` is empty # or doesn't exist we should fail, hard. It doesn't make sense to start # keystone and just 500 because we can't do anything with an empty or # non-existant key repository. if not os.path.exists(CONF.fernet_receipts.key_repository): subs = {'key_repo': CONF.fernet_receipts.key_repository} raise SystemExit(_('%(key_repo)s does not exist') % subs) if not os.listdir(CONF.fernet_receipts.key_repository): subs = {'key_repo': CONF.fernet_receipts.key_repository} raise SystemExit( _( '%(key_repo)s does not contain keys, use ' 'keystone-manage fernet_setup to create ' 'Fernet keys.' ) % subs ) self.receipt_formatter = tf.ReceiptFormatter() def validate_receipt(self, receipt_id): try: return self.receipt_formatter.validate_receipt(receipt_id) except exception.ValidationError: raise exception.ReceiptNotFound(receipt_id=receipt_id) def generate_id_and_issued_at(self, receipt): receipt_id = self.receipt_formatter.create_receipt( receipt.user_id, receipt.methods, receipt.expires_at, ) creation_datetime_obj = self.receipt_formatter.creation_time( receipt_id ) issued_at = ks_utils.isotime(at=creation_datetime_obj, subsecond=True) return receipt_id, issued_at ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/receipt/receipt_formatters.py0000664000175000017500000002424500000000000023241 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import struct import uuid from cryptography import fernet import msgpack from oslo_log import log from oslo_utils import timeutils from keystone.auth import plugins as auth_plugins from keystone.common import fernet_utils as utils from keystone.common import utils as ks_utils import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) # Fernet byte indexes as computed by pypi/keyless_fernet and defined in # https://github.com/fernet/spec TIMESTAMP_START = 1 TIMESTAMP_END = 9 class ReceiptFormatter: """Packs and unpacks payloads into receipts for transport.""" @property def crypto(self): """Return a cryptography instance. You can extend this class with a custom crypto @property to provide your own receipt encoding / decoding. For example, using a different cryptography library (e.g. ``python-keyczar``) or to meet arbitrary security requirements. This @property just needs to return an object that implements ``encrypt(plaintext)`` and ``decrypt(ciphertext)``. """ fernet_utils = utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) keys = fernet_utils.load_keys() if not keys: raise exception.KeysNotFound() fernet_instances = [fernet.Fernet(key) for key in keys] return fernet.MultiFernet(fernet_instances) def pack(self, payload): """Pack a payload for transport as a receipt. :type payload: bytes :rtype: str """ # base64 padding (if any) is not URL-safe return self.crypto.encrypt(payload).rstrip(b'=').decode('utf-8') def unpack(self, receipt): """Unpack a receipt, and validate the payload. :type receipt: str :rtype: bytes """ receipt = ReceiptFormatter.restore_padding(receipt) try: return self.crypto.decrypt(receipt.encode('utf-8')) except fernet.InvalidToken: raise exception.ValidationError( _('This is not a recognized Fernet receipt %s') % receipt ) @classmethod def restore_padding(cls, receipt): """Restore padding based on receipt size. :param receipt: receipt to restore padding on :type receipt: str :returns: receipt with correct padding """ # Re-inflate the padding mod_returned = len(receipt) % 4 if mod_returned: missing_padding = 4 - mod_returned receipt += '=' * missing_padding return receipt @classmethod def creation_time(cls, fernet_receipt): """Return the creation time of a valid Fernet receipt. :type fernet_receipt: str """ fernet_receipt = ReceiptFormatter.restore_padding(fernet_receipt) # fernet_receipt is str # Fernet receipts are base64 encoded, so we need to unpack them first # urlsafe_b64decode() requires bytes receipt_bytes = base64.urlsafe_b64decode( fernet_receipt.encode('utf-8') ) # slice into the byte array to get just the timestamp timestamp_bytes = receipt_bytes[TIMESTAMP_START:TIMESTAMP_END] # convert those bytes to an integer # (it's a 64-bit "unsigned long long int" in C) timestamp_int = struct.unpack(">Q", timestamp_bytes)[0] # and with an integer, it's trivial to produce a datetime object issued_at = datetime.datetime.fromtimestamp( timestamp_int, datetime.timezone.utc ).replace(tzinfo=None) return issued_at def create_receipt(self, user_id, methods, expires_at): """Given a set of payload attributes, generate a Fernet receipt.""" payload = ReceiptPayload.assemble(user_id, methods, expires_at) serialized_payload = msgpack.packb(payload) receipt = self.pack(serialized_payload) # NOTE(lbragstad): We should warn against Fernet receipts that are over # 255 characters in length. This is mostly due to persisting the # receipts in a backend store of some kind that might have a limit of # 255 characters. Even though Keystone isn't storing a Fernet receipt # anywhere, we can't say it isn't being stored somewhere else with # those kind of backend constraints. if len(receipt) > 255: LOG.info( 'Fernet receipt created with length of %d ' 'characters, which exceeds 255 characters', len(receipt), ) return receipt def validate_receipt(self, receipt): """Validate a Fernet receipt and returns the payload attributes. :type receipt: str """ serialized_payload = self.unpack(receipt) payload = msgpack.unpackb(serialized_payload) (user_id, methods, expires_at) = ReceiptPayload.disassemble(payload) # rather than appearing in the payload, the creation time is encoded # into the receipt format itself issued_at = ReceiptFormatter.creation_time(receipt) issued_at = ks_utils.isotime(at=issued_at, subsecond=True) expires_at = timeutils.parse_isotime(expires_at) expires_at = ks_utils.isotime(at=expires_at, subsecond=True) return (user_id, methods, issued_at, expires_at) class ReceiptPayload: @classmethod def assemble(cls, user_id, methods, expires_at): """Assemble the payload of a receipt. :param user_id: identifier of the user in the receipt request :param methods: list of authentication methods used :param expires_at: datetime of the receipt's expiration :returns: the payload of a receipt """ b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) expires_at_int = cls._convert_time_string_to_float(expires_at) return (b_user_id, methods, expires_at_int) @classmethod def disassemble(cls, payload): """Disassemble a payload into the component data. The tuple consists of:: (user_id, methods, expires_at_str) * ``methods`` are the auth methods. :param payload: this variant of payload :returns: a tuple of the payloads component data """ (is_stored_as_bytes, user_id) = payload[0] if is_stored_as_bytes: user_id = cls.convert_uuid_bytes_to_hex(user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) expires_at_str = cls._convert_float_to_time_string(payload[2]) return (user_id, methods, expires_at_str) @classmethod def convert_uuid_hex_to_bytes(cls, uuid_string): """Compress UUID formatted strings to bytes. :param uuid_string: uuid string to compress to bytes :returns: a byte representation of the uuid """ uuid_obj = uuid.UUID(uuid_string) return uuid_obj.bytes @classmethod def convert_uuid_bytes_to_hex(cls, uuid_byte_string): """Generate uuid.hex format based on byte string. :param uuid_byte_string: uuid string to generate from :returns: uuid hex formatted string """ uuid_obj = uuid.UUID(bytes=uuid_byte_string) return uuid_obj.hex @classmethod def _convert_time_string_to_float(cls, time_string): """Convert a time formatted string to a float. :param time_string: time formatted string :returns: a timestamp as a float """ time_object = timeutils.parse_isotime(time_string) return ( timeutils.normalize_time(time_object) - datetime.datetime.fromtimestamp( 0, datetime.timezone.utc ).replace(tzinfo=None) ).total_seconds() @classmethod def _convert_float_to_time_string(cls, time_float): """Convert a floating point timestamp to a string. :param time_float: integer representing timestamp :returns: a time formatted strings """ time_object = datetime.datetime.fromtimestamp( time_float, datetime.timezone.utc ).replace(tzinfo=None) return ks_utils.isotime(time_object, subsecond=True) @classmethod def attempt_convert_uuid_hex_to_bytes(cls, value): """Attempt to convert value to bytes or return value. :param value: value to attempt to convert to bytes :returns: tuple containing boolean indicating whether user_id was stored as bytes and uuid value as bytes or the original value """ try: return (True, cls.convert_uuid_hex_to_bytes(value)) except ValueError: # this might not be a UUID, depending on the situation (i.e. # federation) return (False, value) @classmethod def base64_encode(cls, s): """Encode a URL-safe string. :type s: str :rtype: str """ # urlsafe_b64encode() returns bytes so need to convert to # str, might as well do it before stripping. return base64.urlsafe_b64encode(s).decode('utf-8').rstrip('=') @classmethod def random_urlsafe_str_to_bytes(cls, s): """Convert string from :func:`random_urlsafe_str()` to bytes. :type s: str :rtype: bytes """ # urlsafe_b64decode() requires str, unicode isn't accepted. s = str(s) # restore the padding (==) at the end of the string return base64.urlsafe_b64decode(s + '==') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/resource/0000775000175000017500000000000000000000000017153 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/__init__.py0000664000175000017500000000111700000000000021264 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.resource.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/resource/backends/0000775000175000017500000000000000000000000020725 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/backends/__init__.py0000664000175000017500000000000000000000000023024 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/backends/base.py0000664000175000017500000002132200000000000022211 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import keystone.conf from keystone import exception CONF = keystone.conf.CONF def get_project_from_domain(domain_ref): """Create a project ref from the provided domain ref.""" project_ref = domain_ref.copy() project_ref['is_domain'] = True project_ref['domain_id'] = None project_ref['parent_id'] = None return project_ref # The provided SQL driver uses a special value to represent a domain_id of # None. See comment in Project class of resource/backends/sql.py for more # details. NULL_DOMAIN_ID = '<>' class ResourceDriverBase(metaclass=abc.ABCMeta): def _get_list_limit(self): return CONF.resource.list_limit or CONF.list_limit # project crud @abc.abstractmethod def list_projects(self, hints): """List projects in the system. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of project_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_from_ids(self, project_ids): """List projects for the provided list of ids. :param project_ids: list of ids :returns: a list of project_refs. This method is used internally by the assignment manager to bulk read a set of projects given their ids. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_project_ids_from_domain_ids(self, domain_ids): """List project ids for the provided list of domain ids. :param domain_ids: list of domain ids :returns: a list of project ids owned by the specified domain ids. This method is used internally by the assignment manager to bulk read a set of project ids given a list of domain ids. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_in_domain(self, domain_id): """List projects in the domain. :param domain_id: the driver MUST only return projects within this domain. :returns: a list of project_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_project(self, project_id): """Get a project by ID. :returns: project_ref :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_project(self, project_id, project): """Update an existing project. :raises keystone.exception.ProjectNotFound: if project_id does not exist :raises keystone.exception.Conflict: if project name already exists """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_project(self, project_id): """Delete an existing project. :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_project_parents(self, project_id): """List all parents from a project by its ID. :param project_id: the driver will list the parents of this project. :returns: a list of project_refs or an empty list. :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() @abc.abstractmethod def list_projects_in_subtree(self, project_id): """List all projects in the subtree of a given project. :param project_id: the driver will get the subtree under this project. :returns: a list of project_refs or an empty list :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() @abc.abstractmethod def is_leaf_project(self, project_id): """Check if a project is a leaf in the hierarchy. :param project_id: the driver will check if this project is a leaf in the hierarchy. :raises keystone.exception.ProjectNotFound: if project_id does not exist """ raise exception.NotImplemented() def _validate_default_domain(self, ref): """Validate that either the default domain or nothing is specified. Also removes the domain from the ref so that LDAP doesn't have to persist the attribute. """ ref = ref.copy() domain_id = ref.pop('domain_id', CONF.identity.default_domain_id) self._validate_default_domain_id(domain_id) return ref def _validate_default_domain_id(self, domain_id): """Validate that the domain ID belongs to the default domain.""" if domain_id != CONF.identity.default_domain_id: raise exception.DomainNotFound(domain_id=domain_id) @abc.abstractmethod def create_project(self, project_id, project): """Create a new project. :param project_id: This parameter can be ignored. :param dict project: The new project Project schema:: type: object properties: id: type: string name: type: string domain_id: type: [string, null] description: type: string enabled: type: boolean parent_id: type: string is_domain: type: boolean required: [id, name, domain_id] additionalProperties: true If the project doesn't match the schema the behavior is undefined. The driver can impose requirements such as the maximum length of a field. If these requirements are not met the behavior is undefined. :raises keystone.exception.Conflict: if the project id already exists or the name already exists for the domain_id. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_project_by_name(self, project_name, domain_id): """Get a project by name. :returns: project_ref :raises keystone.exception.ProjectNotFound: if a project with the project_name does not exist within the domain """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_projects_from_ids(self, project_ids): """Delete a given list of projects. Deletes a list of projects. Ensures no project on the list exists after it is successfully called. If an empty list is provided, the it is silently ignored. In addition, if a project ID in the list of project_ids is not found in the backend, no exception is raised, but a message is logged. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_projects_acting_as_domain(self, hints): """List all projects acting as domains. :param hints: filter hints which the driver should implement if at all possible. :returns: a list of project_refs or an empty list. """ raise exception.NotImplemented() # pragma: no cover def check_project_depth(self, max_depth): """Check the projects depth in the backend whether exceed the limit. :param max_depth: the limit depth that project depth should not exceed. :type max_depth: integer :returns: the exceeded project's id or None if no exceeding. """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/backends/resource_options.py0000664000175000017500000000174600000000000024711 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import resource_options from keystone.common.resource_options import options as ro_opt PROJECT_OPTIONS_REGISTRY = resource_options.ResourceOptionRegistry('PROJECT') # NOTE(morgan): wrap this in a function for testing purposes. # This is called on import by design. def register_role_options(): for opt in [ ro_opt.IMMUTABLE_OPT, ]: PROJECT_OPTIONS_REGISTRY.register_option(opt) register_role_options() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/backends/sql.py0000664000175000017500000003745500000000000022114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from sqlalchemy import orm from sqlalchemy.sql import expression from keystone.common import driver_hints from keystone.common import resource_options from keystone.common import sql from keystone import exception from keystone.resource.backends import base from keystone.resource.backends import sql_model LOG = log.getLogger(__name__) class Resource(base.ResourceDriverBase): def _encode_domain_id(self, ref): if 'domain_id' in ref and ref['domain_id'] is None: new_ref = ref.copy() new_ref['domain_id'] = base.NULL_DOMAIN_ID return new_ref else: return ref def _is_hidden_ref(self, ref): return ref.id == base.NULL_DOMAIN_ID def _get_project(self, session, project_id): project_ref = session.get(sql_model.Project, project_id) if project_ref is None or self._is_hidden_ref(project_ref): raise exception.ProjectNotFound(project_id=project_id) return project_ref def get_project(self, project_id): with sql.session_for_read() as session: return self._get_project(session, project_id).to_dict() def get_project_by_name(self, project_name, domain_id): with sql.session_for_read() as session: query = session.query(sql_model.Project) query = query.filter_by(name=project_name) if domain_id is None: query = query.filter_by(domain_id=base.NULL_DOMAIN_ID) else: query = query.filter_by(domain_id=domain_id) try: project_ref = query.one() except sql.NotFound: raise exception.ProjectNotFound(project_id=project_name) if self._is_hidden_ref(project_ref): raise exception.ProjectNotFound(project_id=project_name) return project_ref.to_dict() @driver_hints.truncated def list_projects(self, hints): # If there is a filter on domain_id and the value is None, then to # ensure that the sql filtering works correctly, we need to patch # the value to be NULL_DOMAIN_ID. This is safe to do here since we # know we are able to satisfy any filter of this type in the call to # filter_limit_query() below, which will remove the filter from the # hints (hence ensuring our substitution is not exposed to the caller). for f in hints.filters: if f['name'] == 'domain_id' and f['value'] is None: f['value'] = base.NULL_DOMAIN_ID with sql.session_for_read() as session: query = session.query(sql_model.Project) query = query.filter(sql_model.Project.id != base.NULL_DOMAIN_ID) project_refs = sql.filter_limit_query( sql_model.Project, query, hints ) return [project_ref.to_dict() for project_ref in project_refs] def list_projects_from_ids(self, ids): if not ids: return [] else: with sql.session_for_read() as session: query = session.query(sql_model.Project) query = query.filter(sql_model.Project.id.in_(ids)) return [ project_ref.to_dict() for project_ref in query.all() if not self._is_hidden_ref(project_ref) ] def list_project_ids_from_domain_ids(self, domain_ids): if not domain_ids: return [] else: with sql.session_for_read() as session: query = session.query(sql_model.Project.id) query = query.filter( sql_model.Project.domain_id.in_(domain_ids) ) return [ x.id for x in query.all() if not self._is_hidden_ref(x) ] def list_projects_in_domain(self, domain_id): with sql.session_for_read() as session: try: self._get_project(session, domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) query = session.query(sql_model.Project) project_refs = query.filter( sql_model.Project.domain_id == domain_id ) return [project_ref.to_dict() for project_ref in project_refs] def list_projects_acting_as_domain(self, hints): hints.add_filter('is_domain', True) return self.list_projects(hints) def _get_children(self, session, project_ids, domain_id=None): query = session.query(sql_model.Project) query = query.filter(sql_model.Project.parent_id.in_(project_ids)) project_refs = query.all() return [project_ref.to_dict() for project_ref in project_refs] def list_projects_in_subtree(self, project_id): with sql.session_for_read() as session: children = self._get_children(session, [project_id]) subtree = [] examined = {project_id} while children: children_ids = set() for ref in children: if ref['id'] in examined: msg = ( 'Circular reference or a repeated ' 'entry found in projects hierarchy - ' '%(project_id)s.' ) LOG.error(msg, {'project_id': ref['id']}) return children_ids.add(ref['id']) examined.update(children_ids) subtree += children children = self._get_children(session, children_ids) return subtree def list_project_parents(self, project_id): with sql.session_for_read() as session: project = self._get_project(session, project_id).to_dict() parents = [] examined = set() while project.get('parent_id') is not None: if project['id'] in examined: msg = ( 'Circular reference or a repeated ' 'entry found in projects hierarchy - ' '%(project_id)s.' ) LOG.error(msg, {'project_id': project['id']}) return examined.add(project['id']) parent_project = self._get_project( session, project['parent_id'] ).to_dict() parents.append(parent_project) project = parent_project return parents def is_leaf_project(self, project_id): with sql.session_for_read() as session: project_refs = self._get_children(session, [project_id]) return not project_refs def list_projects_by_tags(self, filters): filtered_ids = [] with sql.session_for_read() as session: query = session.query(sql_model.ProjectTag) if 'tags' in filters.keys(): filtered_ids += self._filter_ids_by_tags( query, filters['tags'].split(',') ) if 'tags-any' in filters.keys(): any_tags = filters['tags-any'].split(',') subq = query.filter(sql_model.ProjectTag.name.in_(any_tags)) any_tags = [ptag['project_id'] for ptag in subq] if 'tags' in filters.keys(): any_tags = set(any_tags) & set(filtered_ids) filtered_ids = any_tags if 'not-tags' in filters.keys(): blacklist_ids = self._filter_ids_by_tags( query, filters['not-tags'].split(',') ) filtered_ids = self._filter_not_tags( session, filtered_ids, blacklist_ids ) if 'not-tags-any' in filters.keys(): any_tags = filters['not-tags-any'].split(',') subq = query.filter(sql_model.ProjectTag.name.in_(any_tags)) blacklist_ids = [ptag['project_id'] for ptag in subq] if 'not-tags' in filters.keys(): filtered_ids += blacklist_ids else: filtered_ids = self._filter_not_tags( session, filtered_ids, blacklist_ids ) if not filtered_ids: return [] query = session.query(sql_model.Project) query = query.filter(sql_model.Project.id.in_(filtered_ids)) return [ project_ref.to_dict() for project_ref in query.all() if not self._is_hidden_ref(project_ref) ] def _filter_ids_by_tags(self, query, tags): filtered_ids = [] subq = query.filter(sql_model.ProjectTag.name.in_(tags)) for ptag in subq: subq_tags = query.filter( sql_model.ProjectTag.project_id == ptag['project_id'] ) result = map(lambda x: x['name'], subq_tags.all()) if set(tags) <= set(result): filtered_ids.append(ptag['project_id']) return filtered_ids def _filter_not_tags(self, session, filtered_ids, blacklist_ids): subq = session.query(sql_model.Project) valid_ids = [q['id'] for q in subq if q['id'] not in blacklist_ids] if filtered_ids: valid_ids = list(set(valid_ids) & set(filtered_ids)) return valid_ids # CRUD @sql.handle_conflicts(conflict_type='project') def create_project(self, project_id, project): new_project = self._encode_domain_id(project) with sql.session_for_write() as session: project_ref = sql_model.Project.from_dict(new_project) session.add(project_ref) # Set resource options passed on creation resource_options.resource_options_ref_to_mapper( project_ref, sql_model.ProjectOption ) return project_ref.to_dict() @sql.handle_conflicts(conflict_type='project') def update_project(self, project_id, project): update_project = self._encode_domain_id(project) with sql.session_for_write() as session: project_ref = self._get_project(session, project_id) old_project_dict = project_ref.to_dict() for k in update_project: old_project_dict[k] = update_project[k] # When we read the old_project_dict, any "null" domain_id will have # been decoded, so we need to re-encode it old_project_dict = self._encode_domain_id(old_project_dict) new_project = sql_model.Project.from_dict(old_project_dict) for attr in sql_model.Project.attributes: if attr != 'id': setattr(project_ref, attr, getattr(new_project, attr)) # Move the "_resource_options" attribute over to the real ref # so that resource_options.resource_options_ref_to_mapper can # handle the work. setattr( project_ref, '_resource_options', getattr(new_project, '_resource_options', {}), ) # Move options into the proper attribute mapper construct resource_options.resource_options_ref_to_mapper( project_ref, sql_model.ProjectOption ) project_ref.extra = new_project.extra return project_ref.to_dict(include_extra_dict=True) @sql.handle_conflicts(conflict_type='project') def delete_project(self, project_id): with sql.session_for_write() as session: project_ref = self._get_project(session, project_id) session.delete(project_ref) @sql.handle_conflicts(conflict_type='project') def delete_projects_from_ids(self, project_ids): if not project_ids: return with sql.session_for_write() as session: query = session.query(sql_model.Project).filter( sql_model.Project.id.in_(project_ids) ) project_ids_from_bd = [p['id'] for p in query.all()] for project_id in project_ids: if ( project_id not in project_ids_from_bd or project_id == base.NULL_DOMAIN_ID ): LOG.warning( 'Project %s does not exist and was not deleted.', project_id, ) query.delete(synchronize_session=False) def check_project_depth(self, max_depth): with sql.session_for_read() as session: obj_list = [] # Using db table self outerjoin to find the project descendants. # # We'll only outerjoin the project table `max_depth` times to # check whether current project tree exceed the max depth limit. # # For example: # # If max_depth is 2, we will take the outerjoin 2 times, then the # SQL result may be like: # # +---- +-------------+-------------+-------------+ # | No. | project1_id | project2_id | project3_id | # +--- -+-------------+-------------+-------------+ # | 1 | domain_x | | | # +- ---+-------------+-------------+-------------+ # | 2 | project_a | | | # +- ---+-------------+-------------+-------------+ # | 3 | domain_y | project_a | | # +- ---+-------------+-------------+-------------+ # | 4 | project_b | project_c | | # +- ---+-------------+-------------+-------------+ # | 5 | domain_y | project_b | project_c | # +- ---+-------------+-------------+-------------+ # # `project1_id` column is the root. It is a project or a domain. # If `project1_id` is a project, there must exist a line that # `project1` is its domain. # # We got 5 lines here. It includes three scenarios: # # 1). The No.1 line means there is a domain `domain_x` which has no # children. The depth is 1. # # 2). The No.2 and No.3 lines mean project `project_a` has no child # and its parent is domain `domain_y`. The depth is 2. # # 3). The No.4 and No.5 lines mean project `project_b` has a child # `project_c` and its parent is domain `domain_y`. The depth is # 3. This tree hit the max depth # # So we can see that if column "project3_id" has value, it means # some trees hit the max depth limit. for _ in range(max_depth + 1): obj_list.append(orm.aliased(sql_model.Project)) query = session.query(*obj_list) for index in range(max_depth): query = query.outerjoin( obj_list[index + 1], obj_list[index].id == obj_list[index + 1].parent_id, ) exceeded_lines = query.filter(obj_list[-1].id != expression.null()) if exceeded_lines: return [line[max_depth].id for line in exceeded_lines] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/backends/sql_model.py0000664000175000017500000001230000000000000023252 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import orm from sqlalchemy.orm import collections from keystone.common import resource_options from keystone.common import sql from keystone.resource.backends import base from keystone.resource.backends import resource_options as ro class Project(sql.ModelBase, sql.ModelDictMixinWithExtras): # NOTE(henry-nash): From the manager and above perspective, the domain_id # is nullable. However, to ensure uniqueness in multi-process # configurations, it is better to still use the sql uniqueness constraint. # Since the support for a nullable component of a uniqueness constraint # across different sql databases is mixed, we instead store a special value # to represent null, as defined in NULL_DOMAIN_ID above. def to_dict(self, include_extra_dict=False): d = super().to_dict(include_extra_dict=include_extra_dict) if d['domain_id'] == base.NULL_DOMAIN_ID: d['domain_id'] = None # NOTE(notmorgan): Eventually it may make sense to drop the empty # option dict creation to the superclass (if enough models use it) d['options'] = resource_options.ref_mapper_to_dict_options(self) return d @classmethod def from_dict(cls, project_dict): new_dict = project_dict.copy() # TODO(morgan): move this functionality to a common location resource_options = {} options = new_dict.pop('options', {}) for opt in cls.resource_options_registry.options: if opt.option_name in options: opt_value = options[opt.option_name] # NOTE(notmorgan): None is always a valid type if opt_value is not None: opt.validator(opt_value) resource_options[opt.option_id] = opt_value project_obj = super().from_dict(new_dict) setattr(project_obj, '_resource_options', resource_options) return project_obj __tablename__ = 'project' attributes = [ 'id', 'name', 'domain_id', 'description', 'enabled', 'parent_id', 'is_domain', 'tags', ] resource_options_registry = ro.PROJECT_OPTIONS_REGISTRY id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(64), nullable=False) domain_id = sql.Column( sql.String(64), sql.ForeignKey('project.id'), nullable=False ) description = sql.Column(sql.Text()) enabled = sql.Column(sql.Boolean) extra = sql.Column(sql.JsonBlob()) parent_id = sql.Column(sql.String(64), sql.ForeignKey('project.id')) is_domain = sql.Column( sql.Boolean, default=False, nullable=False, server_default='0' ) _tags = orm.relationship( 'ProjectTag', single_parent=True, lazy='subquery', cascade='all,delete-orphan', backref='project', primaryjoin='and_(ProjectTag.project_id==Project.id)', ) _resource_option_mapper = orm.relationship( 'ProjectOption', single_parent=True, cascade='all,delete,delete-orphan', lazy='subquery', backref='project', collection_class=collections.attribute_mapped_collection('option_id'), ) # Unique constraint across two columns to create the separation # rather than just only 'name' being unique __table_args__ = (sql.UniqueConstraint('domain_id', 'name'),) @property def tags(self): if self._tags: return [tag.name for tag in self._tags] return [] @tags.setter def tags(self, values): new_tags = [] for tag in values: tag_ref = ProjectTag() tag_ref.project_id = self.id tag_ref.name = str(tag) new_tags.append(tag_ref) self._tags = new_tags class ProjectTag(sql.ModelBase, sql.ModelDictMixin): def to_dict(self): d = super().to_dict() return d __tablename__ = 'project_tag' attributes = ['project_id', 'name'] project_id = sql.Column( sql.String(64), sql.ForeignKey('project.id', ondelete='CASCADE'), nullable=False, primary_key=True, ) name = sql.Column(sql.Unicode(255), nullable=False, primary_key=True) class ProjectOption(sql.ModelBase): __tablename__ = 'project_option' project_id = sql.Column( sql.String(64), sql.ForeignKey('project.id', ondelete='CASCADE'), nullable=False, primary_key=True, ) option_id = sql.Column(sql.String(4), nullable=False, primary_key=True) option_value = sql.Column(sql.JsonBlob, nullable=True) def __init__(self, option_id, option_value): self.option_id = option_id self.option_value = option_value ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/resource/config_backends/0000775000175000017500000000000000000000000022252 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/config_backends/__init__.py0000664000175000017500000000000000000000000024351 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/config_backends/base.py0000664000175000017500000001225200000000000023540 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class DomainConfigDriverBase(metaclass=abc.ABCMeta): """Interface description for a Domain Config driver.""" @abc.abstractmethod def create_config_options(self, domain_id, option_list): """Create config options for a domain. Any existing config options will first be deleted. :param domain_id: the domain for this option :param option_list: a list of dicts, each one specifying an option Option schema:: type: dict properties: group: type: string option: type: string value: type: depends on the option sensitive: type: boolean required: [group, option, value, sensitive] additionalProperties: false """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_config_option(self, domain_id, group, option, sensitive=False): """Get the config option for a domain. :param domain_id: the domain for this option :param group: the group name :param option: the option name :param sensitive: whether the option is sensitive :returns: dict containing group, option and value :raises keystone.exception.DomainConfigNotFound: the option doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_config_options( self, domain_id, group=None, option=False, sensitive=False ): """Get a config options for a domain. :param domain_id: the domain for this option :param group: optional group option name :param option: optional option name. If group is None, then this parameter is ignored :param sensitive: whether the option is sensitive :returns: list of dicts containing group, option and value """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def update_config_options(self, domain_id, option_list): """Update config options for a domain. :param domain_id: the domain for this option :param option_list: a list of dicts, each one specifying an option """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_config_options(self, domain_id, group=None, option=None): """Delete config options for a domain. Allows deletion of all options for a domain, all options in a group or a specific option. The driver is silent if there are no options to delete. :param domain_id: the domain for this option :param group: optional group option name :param option: optional option name. If group is None, then this parameter is ignored The option is uniquely defined by domain_id, group and option, irrespective of whether it is sensitive ot not. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def obtain_registration(self, domain_id, type): """Try and register this domain to use the type specified. :param domain_id: the domain required :param type: type of registration :returns: True if the domain was registered, False otherwise. Failing to register means that someone already has it (which could even be the domain being requested). """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def read_registration(self, type): """Get the domain ID of who is registered to use this type. :param type: type of registration :returns: domain_id of who is registered. :raises keystone.exception.ConfigRegistrationNotFound: If nobody is registered. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def release_registration(self, domain_id, type=None): """Release registration if it is held by the domain specified. If the specified domain is registered for this domain then free it, if it is not then do nothing - no exception is raised. :param domain_id: the domain in question :param type: type of registration, if None then all registrations for this domain will be freed """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/config_backends/sql.py0000664000175000017500000001467700000000000023442 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone import exception from keystone.i18n import _ from keystone.resource.config_backends import base class WhiteListedConfig(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'whitelisted_config' domain_id = sql.Column(sql.String(64), primary_key=True) group = sql.Column(sql.String(255), primary_key=True) option = sql.Column(sql.String(255), primary_key=True) value = sql.Column(sql.JsonBlob(), nullable=False) def to_dict(self): d = super().to_dict() d.pop('domain_id') return d class SensitiveConfig(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'sensitive_config' domain_id = sql.Column(sql.String(64), primary_key=True) group = sql.Column(sql.String(255), primary_key=True) option = sql.Column(sql.String(255), primary_key=True) value = sql.Column(sql.JsonBlob(), nullable=False) def to_dict(self): d = super().to_dict() d.pop('domain_id') return d class ConfigRegister(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'config_register' type = sql.Column(sql.String(64), primary_key=True) domain_id = sql.Column(sql.String(64), nullable=False) class DomainConfig(base.DomainConfigDriverBase): def choose_table(self, sensitive): if sensitive: return SensitiveConfig else: return WhiteListedConfig def _create_config_option( self, session, domain_id, group, option, sensitive, value ): config_table = self.choose_table(sensitive) ref = config_table( domain_id=domain_id, group=group, option=option, value=value ) session.add(ref) def create_config_options(self, domain_id, option_list): with sql.session_for_write() as session: for config_table in [WhiteListedConfig, SensitiveConfig]: query = session.query(config_table) query = query.filter_by(domain_id=domain_id) query.delete(False) for option in option_list: self._create_config_option( session, domain_id, option['group'], option['option'], option['sensitive'], option['value'], ) def _get_config_option(self, session, domain_id, group, option, sensitive): try: config_table = self.choose_table(sensitive) ref = ( session.query(config_table) .filter_by(domain_id=domain_id, group=group, option=option) .one() ) except sql.NotFound: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option, } raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg ) return ref def get_config_option(self, domain_id, group, option, sensitive=False): with sql.session_for_read() as session: ref = self._get_config_option( session, domain_id, group, option, sensitive ) return ref.to_dict() def list_config_options( self, domain_id, group=None, option=None, sensitive=False ): with sql.session_for_read() as session: config_table = self.choose_table(sensitive) query = session.query(config_table) query = query.filter_by(domain_id=domain_id) if group: query = query.filter_by(group=group) if option: query = query.filter_by(option=option) return [ref.to_dict() for ref in query.all()] def update_config_options(self, domain_id, option_list): with sql.session_for_write() as session: for option in option_list: self._delete_config_options( session, domain_id, option['group'], option['option'] ) self._create_config_option( session, domain_id, option['group'], option['option'], option['sensitive'], option['value'], ) def _delete_config_options(self, session, domain_id, group, option): for config_table in [WhiteListedConfig, SensitiveConfig]: query = session.query(config_table) query = query.filter_by(domain_id=domain_id) if group: query = query.filter_by(group=group) if option: query = query.filter_by(option=option) query.delete(False) def delete_config_options(self, domain_id, group=None, option=None): with sql.session_for_write() as session: self._delete_config_options(session, domain_id, group, option) def obtain_registration(self, domain_id, type): try: with sql.session_for_write() as session: ref = ConfigRegister(type=type, domain_id=domain_id) session.add(ref) return True except sql.DBDuplicateEntry: # nosec # Continue on and return False to indicate failure. pass return False def read_registration(self, type): with sql.session_for_read() as session: ref = session.get(ConfigRegister, type) if not ref: raise exception.ConfigRegistrationNotFound() return ref.domain_id def release_registration(self, domain_id, type=None): """Silently delete anything registered for the domain specified.""" with sql.session_for_write() as session: query = session.query(ConfigRegister) if type: query = query.filter_by(type=type) query = query.filter_by(domain_id=domain_id) query.delete(False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/core.py0000664000175000017500000022042500000000000020462 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Resource service.""" from oslo_log import log from keystone import assignment from keystone.common import cache from keystone.common import driver_hints from keystone.common import manager from keystone.common import provider_api from keystone.common.resource_options import options as ro_opt from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications from keystone.resource.backends import base from keystone.token import provider as token_provider CONF = keystone.conf.CONF LOG = log.getLogger(__name__) MEMOIZE = cache.get_memoization_decorator(group='resource') PROVIDERS = provider_api.ProviderAPIs TAG_SEARCH_FILTERS = ('tags', 'tags-any', 'not-tags', 'not-tags-any') class Manager(manager.Manager): """Default pivot point for the Resource backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.resource' _provides_api = 'resource_api' _DOMAIN = 'domain' _PROJECT = 'project' _PROJECT_TAG = 'project tag' def __init__(self): resource_driver = CONF.resource.driver super().__init__(resource_driver) def _get_hierarchy_depth(self, parents_list): return len(parents_list) + 1 def _assert_max_hierarchy_depth(self, project_id, parents_list=None): if parents_list is None: parents_list = self.list_project_parents(project_id) # NOTE(henry-nash): In upgrading to a scenario where domains are # represented as projects acting as domains, we will effectively # increase the depth of any existing project hierarchy by one. To avoid # pushing any existing hierarchies over the limit, we add one to the # maximum depth allowed, as specified in the configuration file. max_depth = CONF.max_project_tree_depth + 1 # NOTE(wxy): If the hierarchical limit enforcement model is used, the # project depth should be not greater than the model's limit as well. # # TODO(wxy): Deprecate and remove CONF.max_project_tree_depth, let the # depth check only based on the limit enforcement model. limit_model = PROVIDERS.unified_limit_api.enforcement_model if limit_model.MAX_PROJECT_TREE_DEPTH is not None: max_depth = min(max_depth, limit_model.MAX_PROJECT_TREE_DEPTH + 1) if self._get_hierarchy_depth(parents_list) > max_depth: raise exception.ForbiddenNotSecurity( _('Max hierarchy depth reached for %s branch.') % project_id ) def _assert_is_domain_project_constraints(self, project_ref): """Enforce specific constraints of projects that act as domains. Called when is_domain is true, this method ensures that: * multiple domains are enabled * the project name is not the reserved name for a federated domain * the project is a root project :raises keystone.exception.ValidationError: If one of the constraints was not satisfied. """ if ( not PROVIDERS.identity_api.multiple_domains_supported and project_ref['id'] != CONF.identity.default_domain_id and project_ref['id'] != base.NULL_DOMAIN_ID ): raise exception.ValidationError( message=_('Multiple domains are not supported') ) self.assert_domain_not_federated(project_ref['id'], project_ref) if project_ref['parent_id']: raise exception.ValidationError( message=_('only root projects are allowed to act as domains.') ) def _assert_regular_project_constraints(self, project_ref): """Enforce regular project hierarchy constraints. Called when is_domain is false. The project must contain a valid domain_id and parent_id. The goal of this method is to check that the domain_id specified is consistent with the domain of its parent. :raises keystone.exception.ValidationError: If one of the constraints was not satisfied. :raises keystone.exception.DomainNotFound: In case the domain is not found. """ # Ensure domain_id is valid, and by inference will not be None. domain = self.get_domain(project_ref['domain_id']) parent_ref = self.get_project(project_ref['parent_id']) if parent_ref['is_domain']: if parent_ref['id'] != domain['id']: raise exception.ValidationError( message=_( 'Cannot create project, the parent ' '(%(parent_id)s) is acting as a domain, ' 'but this project\'s domain id (%(domain_id)s) ' 'does not match the parent\'s id.' ) % { 'parent_id': parent_ref['id'], 'domain_id': domain['id'], } ) else: parent_domain_id = parent_ref.get('domain_id') if parent_domain_id != domain['id']: raise exception.ValidationError( message=_( 'Cannot create project, since it specifies ' 'its domain_id %(domain_id)s, but ' 'specifies a parent in a different domain ' '(%(parent_domain_id)s).' ) % { 'domain_id': domain['id'], 'parent_domain_id': parent_domain_id, } ) def _enforce_project_constraints(self, project_ref): if project_ref.get('is_domain'): self._assert_is_domain_project_constraints(project_ref) else: self._assert_regular_project_constraints(project_ref) # The whole hierarchy (upwards) must be enabled parent_id = project_ref['parent_id'] parents_list = self.list_project_parents(parent_id) parent_ref = self.get_project(parent_id) parents_list.append(parent_ref) for ref in parents_list: if not ref.get('enabled', True): raise exception.ValidationError( message=_( 'cannot create a project in a ' 'branch containing a disabled ' 'project: %s' ) % ref['id'] ) self._assert_max_hierarchy_depth( project_ref.get('parent_id'), parents_list ) def _raise_reserved_character_exception(self, entity_type, name): msg = _( '%(entity)s name cannot contain the following reserved ' 'characters: %(chars)s' ) raise exception.ValidationError( message=msg % { 'entity': entity_type, 'chars': utils.list_url_unsafe_chars(name), } ) def _generate_project_name_conflict_msg(self, project): if project['is_domain']: return ( _( 'it is not permitted to have two projects ' 'acting as domains with the same name: %s' ) % project['name'] ) else: return ( _( 'it is not permitted to have two projects ' 'with either the same name or same id in ' 'the same domain: ' 'name is %(name)s, project id %(id)s' ) % project ) def create_project(self, project_id, project, initiator=None): project = project.copy() if ( CONF.resource.project_name_url_safe != 'off' and utils.is_not_url_safe(project['name']) ): self._raise_reserved_character_exception( 'Project', project['name'] ) project.setdefault('enabled', True) project['name'] = project['name'].strip() project.setdefault('description', '') # For regular projects, the controller will ensure we have a valid # domain_id. For projects acting as a domain, the project_id # is, effectively, the domain_id - and for such projects we don't # bother to store a copy of it in the domain_id attribute. project.setdefault('domain_id', None) project.setdefault('parent_id', None) if not project['parent_id']: project['parent_id'] = project['domain_id'] project.setdefault('is_domain', False) self._enforce_project_constraints(project) # We leave enforcing name uniqueness to the underlying driver (instead # of doing it in code in the project_constraints above), so as to allow # this check to be done at the storage level, avoiding race conditions # in multi-process keystone configurations. try: ret = self.driver.create_project(project_id, project) except exception.Conflict: raise exception.Conflict( type='project', details=self._generate_project_name_conflict_msg(project), ) if project.get('is_domain'): notifications.Audit.created(self._DOMAIN, project_id, initiator) else: notifications.Audit.created(self._PROJECT, project_id, initiator) if MEMOIZE.should_cache(ret): self.get_project.set(ret, self, project_id) self.get_project_by_name.set( ret, self, ret['name'], ret['domain_id'] ) assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() return ret def assert_domain_enabled(self, domain_id, domain=None): """Assert the Domain is enabled. :raise AssertionError: if domain is disabled. """ if domain is None: domain = self.get_domain(domain_id) if not domain.get('enabled', True): raise AssertionError(_('Domain is disabled: %s') % domain_id) def assert_domain_not_federated(self, domain_id, domain): """Assert the Domain's name and id do not match the reserved keyword. Note that the reserved keyword is defined in the configuration file, by default, it is 'Federated', it is also case insensitive. If config's option is empty the default hardcoded value 'Federated' will be used. :raise AssertionError: if domain named match the value in the config. """ # NOTE(marek-denis): We cannot create this attribute in the __init__ as # config values are always initialized to default value. federated_domain = CONF.federation.federated_domain_name.lower() if domain.get('name') and domain['name'].lower() == federated_domain: raise AssertionError( _('Domain cannot be named %s') % domain['name'] ) if domain_id.lower() == federated_domain: raise AssertionError(_('Domain cannot have ID %s') % domain_id) def assert_project_enabled(self, project_id, project=None): """Assert the project is enabled and its associated domain is enabled. :raise AssertionError: if the project or domain is disabled. """ if project is None: project = self.get_project(project_id) # If it's a regular project (i.e. it has a domain_id), we need to make # sure the domain itself is not disabled if project['domain_id']: self.assert_domain_enabled(domain_id=project['domain_id']) if not project.get('enabled', True): raise AssertionError(_('Project is disabled: %s') % project_id) def _assert_all_parents_are_enabled(self, project_id): parents_list = self.list_project_parents(project_id) for project in parents_list: if not project.get('enabled', True): raise exception.ForbiddenNotSecurity( _( 'Cannot enable project %s since it has disabled ' 'parents' ) % project_id ) def _is_immutable(self, project_ref): return project_ref['options'].get( ro_opt.IMMUTABLE_OPT.option_name, False ) def _check_whole_subtree_is_disabled(self, project_id, subtree_list=None): if not subtree_list: subtree_list = self.list_projects_in_subtree(project_id) subtree_enabled = [ref.get('enabled', True) for ref in subtree_list] return not any(subtree_enabled) def _update_project( self, project_id, project, initiator=None, cascade=False ): # Use the driver directly to prevent using old cached value. original_project = self.driver.get_project(project_id) project = project.copy() self._require_matching_domain_id(project, original_project) if original_project['is_domain']: # prevent updates to immutable domains ro_opt.check_immutable_update( original_resource_ref=original_project, new_resource_ref=project, type='domain', resource_id=project_id, ) domain = self._get_domain_from_project(original_project) self.assert_domain_not_federated(project_id, domain) url_safe_option = CONF.resource.domain_name_url_safe exception_entity = 'Domain' else: # prevent updates to immutable projects ro_opt.check_immutable_update( original_resource_ref=original_project, new_resource_ref=project, type='project', resource_id=project_id, ) url_safe_option = CONF.resource.project_name_url_safe exception_entity = 'Project' project_name_changed = ( 'name' in project and project['name'] != original_project['name'] ) if ( url_safe_option != 'off' and project_name_changed and utils.is_not_url_safe(project['name']) ): self._raise_reserved_character_exception( exception_entity, project['name'] ) elif project_name_changed: project['name'] = project['name'].strip() parent_id = original_project.get('parent_id') if 'parent_id' in project and project.get('parent_id') != parent_id: raise exception.ForbiddenNotSecurity( _('Update of `parent_id` is not allowed.') ) if ( 'is_domain' in project and project['is_domain'] != original_project['is_domain'] ): raise exception.ValidationError( message=_('Update of `is_domain` is not allowed.') ) original_project_enabled = original_project.get('enabled', True) project_enabled = project.get('enabled', True) if not original_project_enabled and project_enabled: self._assert_all_parents_are_enabled(project_id) if original_project_enabled and not project_enabled: # NOTE(htruta): In order to disable a regular project, all its # children must already be disabled. However, to keep # compatibility with the existing domain behaviour, we allow a # project acting as a domain to be disabled irrespective of the # state of its children. Disabling a project acting as domain # effectively disables its children. if ( not original_project.get('is_domain') and not cascade and not self._check_whole_subtree_is_disabled(project_id) ): raise exception.ForbiddenNotSecurity( _( 'Cannot disable project %(project_id)s since its ' 'subtree contains enabled projects.' ) % {'project_id': project_id} ) notifications.Audit.disabled( self._PROJECT, project_id, public=False ) # Drop the computed assignments if the project is being disabled. # This ensures an accurate list of projects is returned when # listing projects/domains for a user based on role assignments. assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() if cascade: self._only_allow_enabled_to_update_cascade( project, original_project ) self._update_project_enabled_cascade(project_id, project_enabled) try: project['is_domain'] = ( project.get('is_domain') or original_project['is_domain'] ) ret = self.driver.update_project(project_id, project) except exception.Conflict: raise exception.Conflict( type='project', details=self._generate_project_name_conflict_msg(project), ) try: self.get_project.invalidate(self, project_id) self.get_project_by_name.invalidate( self, original_project['name'], original_project['domain_id'] ) if ( 'domain_id' in project and project['domain_id'] != original_project['domain_id'] ): # If the project's domain_id has been updated, invalidate user # role assignments cache region, as it may be caching inherited # assignments from the old domain to the specified project assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() finally: # attempt to send audit event even if the cache invalidation raises notifications.Audit.updated(self._PROJECT, project_id, initiator) if original_project['is_domain']: notifications.Audit.updated( self._DOMAIN, project_id, initiator ) # If the domain is being disabled, issue the disable # notification as well if original_project_enabled and not project_enabled: # NOTE(lbragstad): When a domain is disabled, we have to # invalidate the entire token cache. With persistent # tokens, we did something similar where all tokens for a # specific domain were deleted when that domain was # disabled. This effectively offers the same behavior for # non-persistent tokens by removing them from the cache and # requiring the authorization context to be rebuilt the # next time they're validated. token_provider.TOKENS_REGION.invalidate() notifications.Audit.disabled( self._DOMAIN, project_id, public=False ) return ret def _only_allow_enabled_to_update_cascade(self, project, original_project): for attr in project: if attr != 'enabled': if project.get(attr) != original_project.get(attr): raise exception.ValidationError( message=_( 'Cascade update is only allowed for ' 'enabled attribute.' ) ) def _update_project_enabled_cascade(self, project_id, enabled): subtree = self.list_projects_in_subtree(project_id) # Update enabled only if different from original value subtree_to_update = [ child for child in subtree if child['enabled'] != enabled ] for child in subtree_to_update: child['enabled'] = enabled if not enabled: # Does not in fact disable the project, only emits a # notification that it was disabled. The actual disablement # is done in the next line. notifications.Audit.disabled( self._PROJECT, child['id'], public=False ) self.driver.update_project(child['id'], child) def update_project( self, project_id, project, initiator=None, cascade=False ): ret = self._update_project(project_id, project, initiator, cascade) if ret['is_domain']: self.get_domain.invalidate(self, project_id) self.get_domain_by_name.invalidate(self, ret['name']) return ret def _post_delete_cleanup_project( self, project_id, project, initiator=None ): try: self.get_project.invalidate(self, project_id) self.get_project_by_name.invalidate( self, project['name'], project['domain_id'] ) PROVIDERS.assignment_api.delete_project_assignments(project_id) # Invalidate user role assignments cache region, as it may # be caching role assignments where the target is # the specified project assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() PROVIDERS.credential_api.delete_credentials_for_project(project_id) PROVIDERS.trust_api.delete_trusts_for_project(project_id) PROVIDERS.unified_limit_api.delete_limits_for_project(project_id) finally: # attempt to send audit event even if the cache invalidation raises notifications.Audit.deleted(self._PROJECT, project_id, initiator) def delete_project(self, project_id, initiator=None, cascade=False): """Delete one project or a subtree. :param cascade: If true, the specified project and all its sub-projects are deleted. Otherwise, only the specified project is deleted. :type cascade: boolean :raises keystone.exception.ValidationError: if project is a domain :raises keystone.exception.Forbidden: if project is not a leaf """ project = self.driver.get_project(project_id) if project.get('is_domain'): self._delete_domain(project, initiator) else: self._delete_project(project, initiator, cascade) def _delete_project(self, project, initiator=None, cascade=False): # Prevent deletion of immutable projects ro_opt.check_immutable_delete( resource_ref=project, resource_type='project', resource_id=project['id'], ) project_id = project['id'] if project['is_domain'] and project['enabled']: raise exception.ValidationError( message=_( 'cannot delete an enabled project acting as a ' 'domain. Please disable the project %s first.' ) % project.get('id') ) if not self.is_leaf_project(project_id) and not cascade: raise exception.ForbiddenNotSecurity( _( 'Cannot delete the project %s since it is not a leaf in the ' 'hierarchy. Use the cascade option if you want to delete a ' 'whole subtree.' ) % project_id ) if cascade: # Getting reversed project's subtrees list, i.e. from the leaves # to the root, so we do not break parent_id FK. subtree_list = self.list_projects_in_subtree(project_id) subtree_list.reverse() if not self._check_whole_subtree_is_disabled( project_id, subtree_list=subtree_list ): raise exception.ForbiddenNotSecurity( _( 'Cannot delete project %(project_id)s since its subtree ' 'contains enabled projects.' ) % {'project_id': project_id} ) project_list = subtree_list + [project] projects_ids = [x['id'] for x in project_list] ret = self.driver.delete_projects_from_ids(projects_ids) for prj in project_list: self._post_delete_cleanup_project(prj['id'], prj, initiator) else: ret = self.driver.delete_project(project_id) self._post_delete_cleanup_project(project_id, project, initiator) reason = ( 'The token cache is being invalidate because project ' '%(project_id)s was deleted. Authorization will be recalculated ' 'and enforced accordingly the next time users authenticate or ' 'validate a token.' % {'project_id': project_id} ) notifications.invalidate_token_cache_notification(reason) return ret def _filter_projects_list(self, projects_list, user_id): user_projects = PROVIDERS.assignment_api.list_projects_for_user( user_id ) user_projects_ids = {proj['id'] for proj in user_projects} # Keep only the projects present in user_projects return [ proj for proj in projects_list if proj['id'] in user_projects_ids ] def _assert_valid_project_id(self, project_id): if project_id is None: msg = _('Project field is required and cannot be empty.') raise exception.ValidationError(message=msg) # Check if project_id exists self.get_project(project_id) def _include_limits(self, projects): """Modify a list of projects to include limit information. :param projects: a list of project references including an `id` :type projects: list of dictionaries """ for project in projects: hints = driver_hints.Hints() hints.add_filter('project_id', project['id']) limits = PROVIDERS.unified_limit_api.list_limits(hints) project['limits'] = limits def list_project_parents( self, project_id, user_id=None, include_limits=False ): self._assert_valid_project_id(project_id) parents = self.driver.list_project_parents(project_id) # If a user_id was provided, the returned list should be filtered # against the projects this user has access to. if user_id: parents = self._filter_projects_list(parents, user_id) if include_limits: self._include_limits(parents) return parents def _build_parents_as_ids_dict(self, project, parents_by_id): # NOTE(rodrigods): we don't rely in the order of the projects returned # by the list_project_parents() method. Thus, we create a project cache # (parents_by_id) in order to access each parent in constant time and # traverse up the hierarchy. def traverse_parents_hierarchy(project): parent_id = project.get('parent_id') if not parent_id: return None parent = parents_by_id[parent_id] return {parent_id: traverse_parents_hierarchy(parent)} return traverse_parents_hierarchy(project) def get_project_parents_as_ids(self, project): """Get the IDs from the parents from a given project. The project IDs are returned as a structured dictionary traversing up the hierarchy to the top level project. For example, considering the following project hierarchy:: A | +-B-+ | | C D If we query for project C parents, the expected return is the following dictionary:: 'parents': { B['id']: { A['id']: None } } """ parents_list = self.list_project_parents(project['id']) parents_as_ids = self._build_parents_as_ids_dict( project, {proj['id']: proj for proj in parents_list} ) return parents_as_ids def list_projects_in_subtree( self, project_id, user_id=None, include_limits=False ): self._assert_valid_project_id(project_id) subtree = self.driver.list_projects_in_subtree(project_id) # If a user_id was provided, the returned list should be filtered # against the projects this user has access to. if user_id: subtree = self._filter_projects_list(subtree, user_id) if include_limits: self._include_limits(subtree) return subtree def _build_subtree_as_ids_dict(self, project_id, subtree_by_parent): # NOTE(rodrigods): we perform a depth first search to construct the # dictionaries representing each level of the subtree hierarchy. In # order to improve this traversal performance, we create a cache of # projects (subtree_py_parent) that accesses in constant time the # direct children of a given project. def traverse_subtree_hierarchy(project_id): children = subtree_by_parent.get(project_id) if not children: return None children_ids = {} for child in children: children_ids[child['id']] = traverse_subtree_hierarchy( child['id'] ) return children_ids return traverse_subtree_hierarchy(project_id) def get_projects_in_subtree_as_ids(self, project_id): """Get the IDs from the projects in the subtree from a given project. The project IDs are returned as a structured dictionary representing their hierarchy. For example, considering the following project hierarchy:: A | +-B-+ | | C D If we query for project A subtree, the expected return is the following dictionary:: 'subtree': { B['id']: { C['id']: None, D['id']: None } } """ def _projects_indexed_by_parent(projects_list): projects_by_parent = {} for proj in projects_list: parent_id = proj.get('parent_id') if parent_id: if parent_id in projects_by_parent: projects_by_parent[parent_id].append(proj) else: projects_by_parent[parent_id] = [proj] return projects_by_parent subtree_list = self.list_projects_in_subtree(project_id) subtree_as_ids = self._build_subtree_as_ids_dict( project_id, _projects_indexed_by_parent(subtree_list) ) return subtree_as_ids def list_domains_from_ids(self, domain_ids): """List domains for the provided list of ids. :param domain_ids: list of ids :returns: a list of domain_refs. This method is used internally by the assignment manager to bulk read a set of domains given their ids. """ # Retrieve the projects acting as domains get their correspondent # domains projects = self.list_projects_from_ids(domain_ids) domains = [ self._get_domain_from_project(project) for project in projects ] return domains @MEMOIZE def get_domain(self, domain_id): try: # Retrieve the corresponding project that acts as a domain project = self.driver.get_project(domain_id) # the DB backend might not operate in case sensitive mode, # therefore verify for exact match of IDs if domain_id != project['id']: raise exception.DomainNotFound(domain_id=domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) # Return its correspondent domain return self._get_domain_from_project(project) @MEMOIZE def get_domain_by_name(self, domain_name): try: # Retrieve the corresponding project that acts as a domain project = self.driver.get_project_by_name( domain_name, domain_id=None ) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_name) # Return its correspondent domain return self._get_domain_from_project(project) def _get_domain_from_project(self, project_ref): """Create a domain ref from a project ref. Based on the provided project ref, create a domain ref, so that the result can be returned in response to a domain API call. """ if not project_ref['is_domain']: LOG.error( 'Asked to convert a non-domain project into a ' 'domain - Domain: %(domain_id)s, Project ID: ' '%(id)s, Project Name: %(project_name)s', { 'domain_id': project_ref['domain_id'], 'id': project_ref['id'], 'project_name': project_ref['name'], }, ) raise exception.DomainNotFound(domain_id=project_ref['id']) domain_ref = project_ref.copy() # As well as the project specific attributes that we need to remove, # there is an old compatibility issue in that update project (as well # as extracting an extra attributes), also includes a copy of the # actual extra dict as well - something that update domain does not do. for k in ['parent_id', 'domain_id', 'is_domain', 'extra']: domain_ref.pop(k, None) return domain_ref def create_domain(self, domain_id, domain, initiator=None): if ( CONF.resource.domain_name_url_safe != 'off' and utils.is_not_url_safe(domain['name']) ): self._raise_reserved_character_exception('Domain', domain['name']) project_from_domain = base.get_project_from_domain(domain) is_domain_project = self.create_project( domain_id, project_from_domain, initiator ) return self._get_domain_from_project(is_domain_project) @manager.response_truncated def list_domains(self, hints=None): projects = self.list_projects_acting_as_domain(hints) domains = [ self._get_domain_from_project(project) for project in projects ] return domains def update_domain(self, domain_id, domain, initiator=None): # TODO(henry-nash): We shouldn't have to check for the federated domain # here as well as _update_project, but currently our tests assume the # checks are done in a specific order. The tests should be refactored. self.assert_domain_not_federated(domain_id, domain) project = base.get_project_from_domain(domain) try: original_domain = self.driver.get_project(domain_id) project = self._update_project(domain_id, project, initiator) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) domain_from_project = self._get_domain_from_project(project) self.get_domain.invalidate(self, domain_id) self.get_domain_by_name.invalidate(self, original_domain['name']) return domain_from_project def delete_domain(self, domain_id, initiator=None): # Use the driver directly to get the project that acts as a domain and # prevent using old cached value. try: domain = self.driver.get_project(domain_id) except exception.ProjectNotFound: raise exception.DomainNotFound(domain_id=domain_id) self._delete_domain(domain, initiator) def _delete_domain(self, domain, initiator=None): # Disallow deletion of immutable domains ro_opt.check_immutable_delete( resource_ref=domain, resource_type='domain', resource_id=domain['id'], ) # To help avoid inadvertent deletes, we insist that the domain # has been previously disabled. This also prevents a user deleting # their own domain since, once it is disabled, they won't be able # to get a valid token to issue this delete. if domain['enabled']: raise exception.ForbiddenNotSecurity( _( 'Cannot delete a domain that is enabled, please disable it ' 'first.' ) ) domain_id = domain['id'] self._delete_domain_contents(domain_id) notifications.Audit.internal(notifications.DOMAIN_DELETED, domain_id) self._delete_project(domain, initiator) try: self.get_domain.invalidate(self, domain_id) self.get_domain_by_name.invalidate(self, domain['name']) # Delete any database stored domain config PROVIDERS.domain_config_api.delete_config_options(domain_id) PROVIDERS.domain_config_api.release_registration(domain_id) finally: # attempt to send audit event even if the cache invalidation raises notifications.Audit.deleted(self._DOMAIN, domain_id, initiator) def _delete_domain_contents(self, domain_id): """Delete the contents of a domain. Before we delete a domain, we need to remove all the entities that are owned by it, i.e. Projects. To do this we call the delete function for these entities, which are themselves responsible for deleting any credentials and role grants associated with them as well as revoking any relevant tokens. """ def _delete_projects(project, projects, examined): if project['id'] in examined: msg = ( 'Circular reference or a repeated entry found ' 'projects hierarchy - %(project_id)s.' ) LOG.error(msg, {'project_id': project['id']}) return examined.add(project['id']) children = [ proj for proj in projects if proj.get('parent_id') == project['id'] ] for proj in children: _delete_projects(proj, projects, examined) try: self._delete_project(project, initiator=None) except exception.ProjectNotFound: LOG.debug( ( 'Project %(projectid)s not found when ' 'deleting domain contents for %(domainid)s, ' 'continuing with cleanup.' ), {'projectid': project['id'], 'domainid': domain_id}, ) proj_refs = self.list_projects_in_domain(domain_id) # Deleting projects recursively roots = [x for x in proj_refs if x.get('parent_id') == domain_id] examined = set() for project in roots: _delete_projects(project, proj_refs, examined) @manager.response_truncated def list_projects(self, hints=None): if hints: tag_filters = {} # Handle project tag filters separately for f in list(hints.filters): if f['name'] in TAG_SEARCH_FILTERS: tag_filters[f['name']] = f['value'] hints.filters.remove(f) if tag_filters: tag_refs = self.driver.list_projects_by_tags(tag_filters) project_refs = self.driver.list_projects(hints) ref_ids = [ref['id'] for ref in tag_refs] return [ref for ref in project_refs if ref['id'] in ref_ids] return self.driver.list_projects(hints or driver_hints.Hints()) # NOTE(henry-nash): list_projects_in_domain is actually an internal method # and not exposed via the API. Therefore there is no need to support # driver hints for it. def list_projects_in_domain(self, domain_id): return self.driver.list_projects_in_domain(domain_id) def list_projects_acting_as_domain(self, hints=None): return self.driver.list_projects_acting_as_domain( hints or driver_hints.Hints() ) @MEMOIZE def get_project(self, project_id): return self.driver.get_project(project_id) @MEMOIZE def get_project_by_name(self, project_name, domain_id): return self.driver.get_project_by_name(project_name, domain_id) def _require_matching_domain_id(self, new_ref, orig_ref): """Ensure the current domain ID matches the reference one, if any. Provided we want domain IDs to be immutable, check whether any domain_id specified in the ref dictionary matches the existing domain_id for this entity. :param new_ref: the dictionary of new values proposed for this entity :param orig_ref: the dictionary of original values proposed for this entity :raises: :class:`keystone.exception.ValidationError` """ if 'domain_id' in new_ref: if new_ref['domain_id'] != orig_ref['domain_id']: raise exception.ValidationError(_('Cannot change Domain ID')) def create_project_tag(self, project_id, tag, initiator=None): """Create a new tag on project. :param project_id: ID of a project to create a tag for :param tag: The string value of a tag to add :returns: The value of the created tag """ project = self.driver.get_project(project_id) if ro_opt.check_resource_immutable(resource_ref=project): raise exception.ResourceUpdateForbidden( message=_( 'Cannot create project tags for %(project_id)s, project ' 'is immutable. Set "immutable" option to false before ' 'creating project tags.' ) % {'project_id': project_id} ) tag_name = tag.strip() project['tags'].append(tag_name) self.update_project(project_id, {'tags': project['tags']}) notifications.Audit.created(self._PROJECT_TAG, tag_name, initiator) return tag_name def get_project_tag(self, project_id, tag_name): """Return information for a single tag on a project. :param project_id: ID of a project to retrive a tag from :param tag_name: Name of a tag to return :raises keystone.exception.ProjectTagNotFound: If the tag name does not exist on the project :returns: The tag value """ project = self.driver.get_project(project_id) if tag_name not in project.get('tags'): raise exception.ProjectTagNotFound(project_tag=tag_name) return tag_name def list_project_tags(self, project_id): """List all tags on project. :param project_id: The ID of a project :returns: A list of tags from a project """ project = self.driver.get_project(project_id) return project.get('tags', []) def update_project_tags(self, project_id, tags, initiator=None): """Update all tags on a project. :param project_id: The ID of the project to update :param tags: A list of tags to update on the project :returns: A list of tags """ project = self.driver.get_project(project_id) if ro_opt.check_resource_immutable(resource_ref=project): raise exception.ResourceUpdateForbidden( message=_( 'Cannot update project tags for %(project_id)s, project ' 'is immutable. Set "immutable" option to false before ' 'creating project tags.' ) % {'project_id': project_id} ) tag_list = [t.strip() for t in tags] project = {'tags': tag_list} self.update_project(project_id, project) return tag_list def delete_project_tag(self, project_id, tag): """Delete single tag from project. :param project_id: The ID of the project :param tag: The tag value to delete :raises keystone.exception.ProjectTagNotFound: If the tag name does not exist on the project """ project = self.driver.get_project(project_id) if ro_opt.check_resource_immutable(resource_ref=project): raise exception.ResourceUpdateForbidden( message=_( 'Cannot delete project tags for %(project_id)s, project ' 'is immutable. Set "immutable" option to false before ' 'creating project tags.' ) % {'project_id': project_id} ) try: project['tags'].remove(tag) except ValueError: raise exception.ProjectTagNotFound(project_tag=tag) self.update_project(project_id, project) notifications.Audit.deleted(self._PROJECT_TAG, tag) def check_project_depth(self, max_depth=None): """Check project depth whether greater than input or not.""" if max_depth: exceeded_project_ids = self.driver.check_project_depth(max_depth) if exceeded_project_ids: raise exception.LimitTreeExceedError( exceeded_project_ids, max_depth ) MEMOIZE_CONFIG = cache.get_memoization_decorator(group='domain_config') class DomainConfigManager(manager.Manager): """Default pivot point for the Domain Config backend.""" # NOTE(henry-nash): In order for a config option to be stored in the # standard table, it must be explicitly whitelisted. Options marked as # sensitive are stored in a separate table. Attempting to store options # that are not listed as either whitelisted or sensitive will raise an # exception. # # Only those options that affect the domain-specific driver support in # the identity manager are supported. driver_namespace = 'keystone.resource.domain_config' _provides_api = 'domain_config_api' # We explicitly state each whitelisted option instead of pulling all ldap # options from CONF and selectively pruning them to prevent a security # lapse. That way if a new ldap CONF key/value were to be added it wouldn't # automatically be added to the whitelisted options unless that is what was # intended. In which case, we explicitly add it to the list ourselves. whitelisted_options = { 'identity': ['driver', 'list_limit'], 'ldap': [ 'url', 'user', 'suffix', 'query_scope', 'page_size', 'alias_dereferencing', 'debug_level', 'chase_referrals', 'user_tree_dn', 'user_filter', 'user_objectclass', 'user_id_attribute', 'user_name_attribute', 'user_mail_attribute', 'user_description_attribute', 'user_pass_attribute', 'user_enabled_attribute', 'user_enabled_invert', 'user_enabled_mask', 'user_enabled_default', 'user_attribute_ignore', 'user_default_project_id_attribute', 'user_enabled_emulation', 'user_enabled_emulation_dn', 'user_enabled_emulation_use_group_config', 'user_additional_attribute_mapping', 'group_tree_dn', 'group_filter', 'group_objectclass', 'group_id_attribute', 'group_name_attribute', 'group_members_are_ids', 'group_member_attribute', 'group_desc_attribute', 'group_attribute_ignore', 'group_additional_attribute_mapping', 'tls_cacertfile', 'tls_cacertdir', 'use_tls', 'tls_req_cert', 'use_pool', 'pool_size', 'pool_retry_max', 'pool_retry_delay', 'pool_connection_timeout', 'pool_connection_lifetime', 'use_auth_pool', 'auth_pool_size', 'auth_pool_connection_lifetime', ], } sensitive_options = {'identity': [], 'ldap': ['password']} def __init__(self): super().__init__(CONF.domain_config.driver) def _assert_valid_config(self, config): """Ensure the options in the config are valid. This method is called to validate the request config in create and update manager calls. :param config: config structure being created or updated """ # Something must be defined in the request if not config: raise exception.InvalidDomainConfig( reason=_('No options specified') ) # Make sure the groups/options defined in config itself are valid for group in config: if not config[group] or not isinstance(config[group], dict): msg = _( 'The value of group %(group)s specified in the ' 'config should be a dictionary of options' ) % {'group': group} raise exception.InvalidDomainConfig(reason=msg) for option in config[group]: self._assert_valid_group_and_option(group, option) def _assert_valid_group_and_option(self, group, option): """Ensure the combination of group and option is valid. :param group: optional group name, if specified it must be one we support :param option: optional option name, if specified it must be one we support and a group must also be specified """ if not group and not option: # For all calls, it's OK for neither to be defined, it means you # are operating on all config options for that domain. return if not group and option: # Our API structure should prevent this from ever happening, so if # it does, then this is coding error. msg = _( 'Option %(option)s found with no group specified while ' 'checking domain configuration request' ) % {'option': option} raise exception.UnexpectedError(exception=msg) if CONF.domain_config.additional_whitelisted_options: self.whitelisted_options.update( **CONF.domain_config.additional_whitelisted_options ) if CONF.domain_config.additional_sensitive_options: self.sensitive_options.update( **CONF.domain_config.additional_sensitive_options ) if ( group and group not in self.whitelisted_options and group not in self.sensitive_options ): msg = _( 'Group %(group)s is not supported ' 'for domain specific configurations' ) % {'group': group} raise exception.InvalidDomainConfig(reason=msg) if option: if option not in self.whitelisted_options.get( group, {} ) and option not in self.sensitive_options.get(group, {}): msg = _( 'Option %(option)s in group %(group)s is not ' 'supported for domain specific configurations' ) % {'group': group, 'option': option} raise exception.InvalidDomainConfig(reason=msg) def _is_sensitive(self, group, option): return option in self.sensitive_options.get(group, {}) def _config_to_list(self, config): """Build list of options for use by backend drivers.""" option_list = [] for group in config: for option in config[group]: option_list.append( { 'group': group, 'option': option, 'value': config[group][option], 'sensitive': self._is_sensitive(group, option), } ) return option_list def _option_dict(self, group, option): group_attr = getattr(CONF, group) return { 'group': group, 'option': option, 'value': getattr(group_attr, option), } def _list_to_config(self, whitelisted, sensitive=None, req_option=None): """Build config dict from a list of option dicts. :param whitelisted: list of dicts containing options and their groups, this has already been filtered to only contain those options to include in the output. :param sensitive: list of dicts containing sensitive options and their groups, this has already been filtered to only contain those options to include in the output. :param req_option: the individual option requested :returns: a config dict, including sensitive if specified """ the_list = whitelisted + (sensitive or []) if not the_list: return {} if req_option: # The request was specific to an individual option, so # no need to include the group in the output. We first check that # there is only one option in the answer (and that it's the right # one) - if not, something has gone wrong and we raise an error if len(the_list) > 1 or the_list[0]['option'] != req_option: LOG.error( 'Unexpected results in response for domain ' 'config - %(count)s responses, first option is ' '%(option)s, expected option %(expected)s', { 'count': len(the_list), 'option': list[0]['option'], 'expected': req_option, }, ) raise exception.UnexpectedError( _( 'An unexpected error occurred when retrieving domain ' 'configs' ) ) return {the_list[0]['option']: the_list[0]['value']} config = {} for option in the_list: config.setdefault(option['group'], {}) config[option['group']][option['option']] = option['value'] return config def create_config(self, domain_id, config): """Create config for a domain. :param domain_id: the domain in question :param config: the dict of config groups/options to assign to the domain Creates a new config, overwriting any previous config (no Conflict error will be generated). :returns: a dict of group dicts containing the options, with any that are sensitive removed :raises keystone.exception.InvalidDomainConfig: when the config contains options we do not support """ self._assert_valid_config(config) option_list = self._config_to_list(config) self.create_config_options(domain_id, option_list) # Since we are caching on the full substituted config, we just # invalidate here, rather than try and create the right result to # cache. self.get_config_with_sensitive_info.invalidate(self, domain_id) return self._list_to_config(self.list_config_options(domain_id)) def get_config(self, domain_id, group=None, option=None): """Get config, or partial config, for a domain. :param domain_id: the domain in question :param group: an optional specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the whitelisted options, filtered by group and option specified :raises keystone.exception.DomainConfigNotFound: when no config found that matches domain_id, group and option specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support An example response:: { 'ldap': { 'url': 'myurl' 'user_tree_dn': 'OU=myou'}, 'identity': { 'driver': 'ldap'} } """ self._assert_valid_group_and_option(group, option) whitelisted = self.list_config_options(domain_id, group, option) if whitelisted: return self._list_to_config(whitelisted, req_option=option) if option: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option, } elif group: msg = _('group %(group)s') % {'group': group} else: msg = _('any options') raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg ) def get_security_compliance_config(self, domain_id, group, option=None): r"""Get full or partial security compliance config from configuration. :param domain_id: the domain in question :param group: a specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the whitelisted options, filtered by group and option specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support An example response:: { 'security_compliance': { 'password_regex': '^(?=.*\d)(?=.*[a-zA-Z]).{7,}$' 'password_regex_description': 'A password must consist of at least 1 letter, ' '1 digit, and have a minimum length of 7 characters' } } """ if domain_id != CONF.identity.default_domain_id: msg = _( 'Reading security compliance information for any domain ' 'other than the default domain is not allowed or ' 'supported.' ) raise exception.InvalidDomainConfig(reason=msg) config_list = [] readable_options = ['password_regex', 'password_regex_description'] if option and option not in readable_options: msg = _( 'Reading security compliance values other than ' 'password_regex and password_regex_description is not ' 'allowed.' ) raise exception.InvalidDomainConfig(reason=msg) elif option and option in readable_options: config_list.append(self._option_dict(group, option)) elif not option: for op in readable_options: config_list.append(self._option_dict(group, op)) # We already validated that the group is the security_compliance group # so we can move along and start validating the options return self._list_to_config(config_list, req_option=option) def update_config(self, domain_id, config, group=None, option=None): """Update config, or partial config, for a domain. :param domain_id: the domain in question :param config: the config dict containing and groups/options being updated :param group: an optional specific group of options, which if specified must appear in config, with no other groups :param option: an optional specific option within the group, which if specified must appear in config, with no other options The contents of the supplied config will be merged with the existing config for this domain, updating or creating new options if these did not previously exist. If group or option is specified, then the update will be limited to those specified items and the inclusion of other options in the supplied config will raise an exception, as will the situation when those options do not already exist in the current config. :returns: a dict of groups containing all whitelisted options :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support or one that does not exist in the original config """ def _assert_valid_update(domain_id, config, group=None, option=None): """Ensure the combination of config, group and option is valid.""" self._assert_valid_config(config) self._assert_valid_group_and_option(group, option) # If a group has been specified, then the request is to # explicitly only update the options in that group - so the config # must not contain anything else. Further, that group must exist in # the original config. Likewise, if an option has been specified, # then the group in the config must only contain that option and it # also must exist in the original config. if group: if len(config) != 1 or (option and len(config[group]) != 1): if option: msg = _( 'Trying to update option %(option)s in group ' '%(group)s, so that, and only that, option ' 'must be specified in the config' ) % {'group': group, 'option': option} else: msg = _( 'Trying to update group %(group)s, so that, ' 'and only that, group must be specified in ' 'the config' ) % {'group': group} raise exception.InvalidDomainConfig(reason=msg) # So we now know we have the right number of entries in the # config that align with a group/option being specified, but we # must also make sure they match. if group not in config: msg = _( 'request to update group %(group)s, but config ' 'provided contains group %(group_other)s ' 'instead' ) % {'group': group, 'group_other': list(config.keys())[0]} raise exception.InvalidDomainConfig(reason=msg) if option and option not in config[group]: msg = _( 'Trying to update option %(option)s in group ' '%(group)s, but config provided contains option ' '%(option_other)s instead' ) % { 'group': group, 'option': option, 'option_other': list(config[group].keys())[0], } raise exception.InvalidDomainConfig(reason=msg) # Finally, we need to check if the group/option specified # already exists in the original config - since if not, to keep # with the semantics of an update, we need to fail with # a DomainConfigNotFound if not self._get_config_with_sensitive_info( domain_id, group, option ): if option: msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option, } raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg ) else: msg = _('group %(group)s') % {'group': group} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg ) update_config = config if group and option: # The config will just be a dict containing the option and # its value, so make it look like a single option under the # group in question update_config = {group: config} _assert_valid_update(domain_id, update_config, group, option) option_list = self._config_to_list(update_config) self.update_config_options(domain_id, option_list) self.get_config_with_sensitive_info.invalidate(self, domain_id) return self.get_config(domain_id) def delete_config(self, domain_id, group=None, option=None): """Delete config, or partial config, for the domain. :param domain_id: the domain in question :param group: an optional specific group of options :param option: an optional specific option within the group If group and option are None, then the entire config for the domain is deleted. If group is not None, then just that group of options will be deleted. If group and option are both specified, then just that option is deleted. :raises keystone.exception.InvalidDomainConfig: when group/option parameters specify an option we do not support or one that does not exist in the original config. """ self._assert_valid_group_and_option(group, option) if group: # As this is a partial delete, then make sure the items requested # are valid and exist in the current config current_config = self._get_config_with_sensitive_info(domain_id) # Raise an exception if the group/options specified don't exist in # the current config so that the delete method provides the # correct error semantics. current_group = current_config.get(group) if not current_group: msg = _('group %(group)s') % {'group': group} raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg ) if option and not current_group.get(option): msg = _('option %(option)s in group %(group)s') % { 'group': group, 'option': option, } raise exception.DomainConfigNotFound( domain_id=domain_id, group_or_option=msg ) self.delete_config_options(domain_id, group, option) self.get_config_with_sensitive_info.invalidate(self, domain_id) def _get_config_with_sensitive_info( self, domain_id, group=None, option=None ): """Get config for a domain/group/option with sensitive info included. This is only used by the methods within this class, which may need to check individual groups or options. """ whitelisted = self.list_config_options(domain_id, group, option) sensitive = self.list_config_options( domain_id, group, option, sensitive=True ) # Check if there are any sensitive substitutions needed. We first try # and simply ensure any sensitive options that have valid substitution # references in the whitelisted options are substituted. We then check # the resulting whitelisted option and raise a warning if there # appears to be an unmatched or incorrectly constructed substitution # reference. To avoid the risk of logging any sensitive options that # have already been substituted, we first take a copy of the # whitelisted option. # Build a dict of the sensitive options ready to try substitution sensitive_dict = {s['option']: s['value'] for s in sensitive} for each_whitelisted in whitelisted: if not isinstance(each_whitelisted['value'], str): # We only support substitutions into string types, if its an # integer, list etc. then just continue onto the next one continue # Store away the original value in case we need to raise a warning # after substitution. original_value = each_whitelisted['value'] warning_msg = '' try: each_whitelisted['value'] = ( each_whitelisted['value'] % sensitive_dict ) except KeyError: warning_msg = ( 'Found what looks like an unmatched config option ' 'substitution reference - domain: %(domain)s, group: ' '%(group)s, option: %(option)s, value: %(value)s. Perhaps ' 'the config option to which it refers has yet to be ' 'added?' ) except (ValueError, TypeError): warning_msg = ( 'Found what looks like an incorrectly constructed ' 'config option substitution reference - domain: ' '%(domain)s, group: %(group)s, option: %(option)s, ' 'value: %(value)s.' ) if warning_msg: LOG.warning( warning_msg, { 'domain': domain_id, 'group': each_whitelisted['group'], 'option': each_whitelisted['option'], 'value': original_value, }, ) return self._list_to_config(whitelisted, sensitive) @MEMOIZE_CONFIG def get_config_with_sensitive_info(self, domain_id): """Get config for a domain with sensitive info included. This method is not exposed via the public API, but is used by the identity manager to initialize a domain with the fully formed config options. """ return self._get_config_with_sensitive_info(domain_id) def get_config_default(self, group=None, option=None): """Get default config, or partial default config. :param group: an optional specific group of options :param option: an optional specific option within the group :returns: a dict of group dicts containing the default options, filtered by group and option if specified :raises keystone.exception.InvalidDomainConfig: when the config and group/option parameters specify an option we do not support (or one that is not whitelisted). An example response:: { 'ldap': { 'url': 'myurl', 'user_tree_dn': 'OU=myou', ....}, 'identity': { 'driver': 'ldap'} } """ self._assert_valid_group_and_option(group, option) config_list = [] if group: if option: if option not in self.whitelisted_options[group]: msg = _( 'Reading the default for option %(option)s in ' 'group %(group)s is not supported' ) % {'option': option, 'group': group} raise exception.InvalidDomainConfig(reason=msg) config_list.append(self._option_dict(group, option)) else: for each_option in self.whitelisted_options[group]: config_list.append(self._option_dict(group, each_option)) else: for each_group in self.whitelisted_options: for each_option in self.whitelisted_options[each_group]: config_list.append( self._option_dict(each_group, each_option) ) return self._list_to_config(config_list, req_option=option) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/resource/schema.py0000664000175000017500000000633700000000000020776 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types from keystone.resource.backends import resource_options as ro _name_properties = { 'type': 'string', 'minLength': 1, 'maxLength': 64, 'pattern': r'[\S]+', } _project_tag_name_properties = { 'type': 'string', 'minLength': 1, 'maxLength': 255, # NOTE(gagehugo) This pattern is for tags which follows the # guidelines as set by the API-WG, which matches anything that # does not contain a '/' or ','. # https://specs.openstack.org/openstack/api-wg/guidelines/tags.html 'pattern': '^[^,/]*$', } _project_tags_list_properties = { 'type': 'array', 'items': _project_tag_name_properties, 'required': [], 'maxItems': 80, 'uniqueItems': True, } _project_properties = { 'description': validation.nullable(parameter_types.description), # NOTE(htruta): domain_id is nullable for projects acting as a domain. 'domain_id': validation.nullable(parameter_types.id_string), 'enabled': parameter_types.boolean, 'is_domain': parameter_types.boolean, 'parent_id': validation.nullable(parameter_types.id_string), 'name': _name_properties, 'tags': _project_tags_list_properties, 'options': ro.PROJECT_OPTIONS_REGISTRY.json_schema, } # This is for updating a single project tag via the URL project_tag_create = _project_tag_name_properties # This is for updaing a project with a list of tags project_tags_update = _project_tags_list_properties project_create = { 'type': 'object', 'properties': _project_properties, # NOTE(lbragstad): A project name is the only parameter required for # project creation according to the Identity V3 API. We should think # about using the maxProperties validator here, and in update. 'required': ['name'], 'additionalProperties': True, } project_update = { 'type': 'object', 'properties': _project_properties, # NOTE(lbragstad): Make sure at least one property is being updated 'minProperties': 1, 'additionalProperties': True, } _domain_properties = { 'description': validation.nullable(parameter_types.description), 'enabled': parameter_types.boolean, 'name': _name_properties, 'tags': project_tags_update, } domain_create = { 'type': 'object', 'properties': _domain_properties, # TODO(lbragstad): According to the V3 API spec, name isn't required but # the current implementation in assignment.controller:DomainV3 requires a # name for the domain. 'required': ['name'], 'additionalProperties': True, } domain_update = { 'type': 'object', 'properties': _domain_properties, 'minProperties': 1, 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/revoke/0000775000175000017500000000000000000000000016617 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/revoke/__init__.py0000664000175000017500000000111500000000000020726 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.revoke.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/revoke/backends/0000775000175000017500000000000000000000000020371 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/revoke/backends/__init__.py0000664000175000017500000000000000000000000022470 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/revoke/backends/base.py0000664000175000017500000000402000000000000021651 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import datetime from oslo_utils import timeutils import keystone.conf from keystone import exception CONF = keystone.conf.CONF def revoked_before_cutoff_time(): expire_delta = datetime.timedelta( seconds=CONF.token.expiration + CONF.revoke.expiration_buffer ) oldest = timeutils.utcnow() - expire_delta return oldest class RevokeDriverBase(metaclass=abc.ABCMeta): """Interface for recording and reporting revocation events.""" @abc.abstractmethod def list_events(self, last_fetch=None, token=None): """Return the revocation events, as a list of objects. :param last_fetch: Time of last fetch. Return all events newer. :param token: dictionary of values from a token, normalized for differences between v2 and v3. The checked values are a subset of the attributes of model.TokenEvent :returns: A list of keystone.revoke.model.RevokeEvent newer than `last_fetch.` If no last_fetch is specified, returns all events for tokens issued after the expiration cutoff. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def revoke(self, event): """Register a revocation event. :param event: An instance of keystone.revoke.model.RevocationEvent """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/revoke/backends/sql.py0000664000175000017500000001456100000000000021551 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db import api as oslo_db_api import sqlalchemy from keystone.common import sql from keystone.models import revoke_model from keystone.revoke.backends import base class RevocationEvent(sql.ModelBase, sql.ModelDictMixin): __tablename__ = 'revocation_event' attributes = revoke_model.REVOKE_KEYS # The id field is not going to be exposed to the outside world. # It is, however, necessary for SQLAlchemy. id = sql.Column(sql.Integer, primary_key=True, nullable=False) domain_id = sql.Column(sql.String(64)) project_id = sql.Column(sql.String(64)) user_id = sql.Column(sql.String(64)) role_id = sql.Column(sql.String(64)) trust_id = sql.Column(sql.String(64)) consumer_id = sql.Column(sql.String(64)) access_token_id = sql.Column(sql.String(64)) issued_before = sql.Column(sql.DateTime(), nullable=False, index=True) expires_at = sql.Column(sql.DateTime()) revoked_at = sql.Column(sql.DateTime(), nullable=False, index=True) audit_id = sql.Column(sql.String(32)) audit_chain_id = sql.Column(sql.String(32)) __table_args__ = ( sql.Index( 'ix_revocation_event_project_id_issued_before', 'project_id', 'issued_before', ), sql.Index( 'ix_revocation_event_user_id_issued_before', 'user_id', 'issued_before', ), sql.Index( 'ix_revocation_event_audit_id_issued_before', 'audit_id', 'issued_before', ), ) class Revoke(base.RevokeDriverBase): def _flush_batch_size(self, dialect): batch_size = 0 if dialect == 'ibm_db_sa': # This functionality is limited to DB2, because # it is necessary to prevent the transaction log # from filling up, whereas at least some of the # other supported databases do not support update # queries with LIMIT subqueries nor do they appear # to require the use of such queries when deleting # large numbers of records at once. batch_size = 100 # Limit of 100 is known to not fill a transaction log # of default maximum size while not significantly # impacting the performance of large token purges on # systems where the maximum transaction log size has # been increased beyond the default. return batch_size def _prune_expired_events(self): oldest = base.revoked_before_cutoff_time() with sql.session_for_write() as session: dialect = session.bind.dialect.name batch_size = self._flush_batch_size(dialect) if batch_size > 0: query = session.query(RevocationEvent.id) query = query.filter(RevocationEvent.revoked_at < oldest) query = query.limit(batch_size).subquery() delete_query = session.query(RevocationEvent).filter( RevocationEvent.id.in_(query) ) while True: rowcount = delete_query.delete(synchronize_session=False) if rowcount == 0: break else: query = session.query(RevocationEvent) query = query.filter(RevocationEvent.revoked_at < oldest) query.delete(synchronize_session=False) session.flush() def _list_token_events(self, token): with sql.session_for_read() as session: query = session.query(RevocationEvent).filter( RevocationEvent.issued_before >= token['issued_at'] ) user = [RevocationEvent.user_id.is_(None)] proj = [RevocationEvent.project_id.is_(None)] audit = [RevocationEvent.audit_id.is_(None)] trust = [RevocationEvent.trust_id.is_(None)] if token['user_id']: user.append(RevocationEvent.user_id == token['user_id']) if token['trustor_id']: user.append(RevocationEvent.user_id == token['trustor_id']) if token['trustee_id']: user.append(RevocationEvent.user_id == token['trustee_id']) if token['project_id']: proj.append(RevocationEvent.project_id == token['project_id']) if token['audit_id']: audit.append(RevocationEvent.audit_id == token['audit_id']) if token['trust_id']: trust.append(RevocationEvent.trust_id == token['trust_id']) query = query.filter( sqlalchemy.and_( sqlalchemy.or_(*user), sqlalchemy.or_(*proj), sqlalchemy.or_(*audit), sqlalchemy.or_(*trust), ) ) events = [revoke_model.RevokeEvent(**e.to_dict()) for e in query] return events def _list_last_fetch_events(self, last_fetch=None): with sql.session_for_read() as session: query = session.query(RevocationEvent).order_by( RevocationEvent.revoked_at ) if last_fetch: query = query.filter(RevocationEvent.revoked_at > last_fetch) events = [revoke_model.RevokeEvent(**e.to_dict()) for e in query] return events def list_events(self, last_fetch=None, token=None): if token: return self._list_token_events(token) else: return self._list_last_fetch_events(last_fetch) @oslo_db_api.wrap_db_retry(retry_on_deadlock=True) def revoke(self, event): kwargs = dict() for attr in revoke_model.REVOKE_KEYS: kwargs[attr] = getattr(event, attr) record = RevocationEvent(**kwargs) with sql.session_for_write() as session: session.add(record) self._prune_expired_events() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/revoke/core.py0000664000175000017500000001205300000000000020122 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Revoke service.""" from keystone.common import cache from keystone.common import manager import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.models import revoke_model from keystone import notifications CONF = keystone.conf.CONF # This builds a discrete cache region dedicated to revoke events. The API can # return a filtered list based upon last fetchtime. This is deprecated but # must be maintained. REVOKE_REGION = cache.create_region(name='revoke') MEMOIZE = cache.get_memoization_decorator(group='revoke', region=REVOKE_REGION) class Manager(manager.Manager): """Default pivot point for the Revoke backend. Performs common logic for recording revocations. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.revoke' _provides_api = 'revoke_api' def __init__(self): super().__init__(CONF.revoke.driver) self._register_listeners() self.model = revoke_model @MEMOIZE def _list_events(self, last_fetch): return self.driver.list_events(last_fetch) def list_events(self, last_fetch=None): return self._list_events(last_fetch) def _user_callback(self, service, resource_type, operation, payload): self.revoke_by_user(payload['resource_info']) def _project_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(project_id=payload['resource_info']) ) def _trust_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(trust_id=payload['resource_info']) ) def _consumer_callback(self, service, resource_type, operation, payload): self.revoke( revoke_model.RevokeEvent(consumer_id=payload['resource_info']) ) def _register_listeners(self): callbacks = { notifications.ACTIONS.deleted: [ ['OS-TRUST:trust', self._trust_callback], ['OS-OAUTH1:consumer', self._consumer_callback], ['user', self._user_callback], ['project', self._project_callback], ], notifications.ACTIONS.disabled: [['user', self._user_callback]], notifications.ACTIONS.internal: [ [ notifications.PERSIST_REVOCATION_EVENT_FOR_USER, self._user_callback, ], ], } for event, cb_info in callbacks.items(): for resource_type, callback_fns in cb_info: notifications.register_event_callback( event, resource_type, callback_fns ) def revoke_by_user(self, user_id): return self.revoke(revoke_model.RevokeEvent(user_id=user_id)) def _assert_not_domain_and_project_scoped( self, domain_id=None, project_id=None ): if domain_id is not None and project_id is not None: msg = _( 'The revoke call must not have both domain_id and ' 'project_id. This is a bug in the Keystone server. The ' 'current request is aborted.' ) raise exception.UnexpectedError(exception=msg) def revoke_by_audit_id(self, audit_id): self.revoke(revoke_model.RevokeEvent(audit_id=audit_id)) def revoke_by_audit_chain_id( self, audit_chain_id, project_id=None, domain_id=None ): self._assert_not_domain_and_project_scoped( domain_id=domain_id, project_id=project_id ) self.revoke( revoke_model.RevokeEvent( audit_chain_id=audit_chain_id, domain_id=domain_id, project_id=project_id, ) ) def check_token(self, token): """Check the values from a token against the revocation list. :param token: dictionary of values from a token, normalized for differences between v2 and v3. The checked values are a subset of the attributes of model.TokenEvent :raises keystone.exception.TokenNotFound: If the token is invalid. """ if revoke_model.is_revoked( self.driver.list_events(token=token), token ): raise exception.TokenNotFound(_('Failed to validate token')) def revoke(self, event): self.driver.revoke(event) REVOKE_REGION.invalidate() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/revoke/model.py0000664000175000017500000000112500000000000020270 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.models.revoke_model import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/server/0000775000175000017500000000000000000000000016632 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/__init__.py0000664000175000017500000000272500000000000020751 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from keystone.common import sql import keystone.conf from keystone.server import backends CONF = keystone.conf.CONF LOG = log.getLogger(__name__) def configure( version=None, config_files=None, pre_setup_logging_fn=lambda: None ): keystone.conf.configure() sql.initialize() keystone.conf.set_config_defaults() CONF( project='keystone', version=version, default_config_files=config_files ) pre_setup_logging_fn() keystone.conf.setup_logging() if CONF.insecure_debug: LOG.warning( 'insecure_debug is enabled so responses may include sensitive ' 'information.' ) def setup_backends( load_extra_backends_fn=lambda: {}, startup_application_fn=lambda: None ): drivers = backends.load_backends() drivers.update(load_extra_backends_fn()) res = startup_application_fn() return drivers, res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/backends.py0000664000175000017500000000553000000000000020761 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_log import log from keystone import application_credential from keystone import assignment from keystone import auth from keystone import catalog from keystone.common import cache from keystone.common import provider_api from keystone import credential from keystone import endpoint_policy from keystone import exception from keystone import federation from keystone import identity from keystone import limit from keystone import oauth1 from keystone import policy from keystone import receipt from keystone import resource from keystone import revoke from keystone import token from keystone import trust LOG = log.getLogger(__name__) def load_backends(): # Configure and build the cache cache.configure_cache() cache.configure_cache(region=catalog.COMPUTED_CATALOG_REGION) cache.configure_cache(region=assignment.COMPUTED_ASSIGNMENTS_REGION) cache.configure_cache(region=revoke.REVOKE_REGION) cache.configure_cache(region=token.provider.TOKENS_REGION) cache.configure_cache(region=receipt.provider.RECEIPTS_REGION) cache.configure_cache(region=identity.ID_MAPPING_REGION) cache.configure_invalidation_region() managers = [ application_credential.Manager, assignment.Manager, catalog.Manager, credential.Manager, credential.provider.Manager, resource.DomainConfigManager, endpoint_policy.Manager, federation.Manager, identity.generator.Manager, identity.MappingManager, identity.Manager, identity.ShadowUsersManager, limit.Manager, oauth1.Manager, policy.Manager, resource.Manager, revoke.Manager, assignment.RoleManager, receipt.provider.Manager, trust.Manager, token.provider.Manager, ] drivers = {d._provides_api: d() for d in managers} # NOTE(morgan): lock the APIs, these should only ever be instantiated # before running keystone. provider_api.ProviderAPIs.lock_provider_registry() try: # Check project depth before start process. If fail, Keystone will not # start. drivers['unified_limit_api'].check_project_depth() except exception.LimitTreeExceedError as e: LOG.critical(e) sys.exit(1) auth.core.load_auth_methods() return drivers ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/server/flask/0000775000175000017500000000000000000000000017732 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/__init__.py0000664000175000017500000000322700000000000022047 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morgan): Import relevant stuff so importing individual under-pinnings # isn't needed, keystone.server.flask exposes all the interesting bits # needed to develop restful APIs for keystone. from keystone.server.flask.common import APIBase # noqa from keystone.server.flask.common import base_url # noqa from keystone.server.flask.common import construct_json_home_data # noqa from keystone.server.flask.common import construct_resource_map # noqa from keystone.server.flask.common import full_url # noqa from keystone.server.flask.common import JsonHomeData # noqa from keystone.server.flask.common import ResourceBase # noqa from keystone.server.flask.common import ResourceMap # noqa from keystone.server.flask.common import unenforced_api # noqa # NOTE(morgan): This allows for from keystone.flask import * and have all the # cool stuff needed to develop new APIs within a module/subsystem __all__ = ( 'APIBase', 'JsonHomeData', 'ResourceBase', 'ResourceMap', 'base_url', 'construct_json_home_data', 'construct_resource_map', 'full_url', 'unenforced_api', ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/server/flask/application.py0000664000175000017500000001446100000000000022615 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import sys import flask import oslo_i18n from oslo_log import log from oslo_middleware import healthcheck try: # werkzeug 0.15.x from werkzeug.middleware import dispatcher as wsgi_dispatcher except ImportError: # werkzeug 0.14.x import werkzeug.wsgi as wsgi_dispatcher import keystone.api from keystone import exception from keystone.oauth2 import handlers as oauth2_handlers from keystone.receipt import handlers as receipt_handlers from keystone.server.flask import common as ks_flask from keystone.server.flask.request_processing import json_body from keystone.server.flask.request_processing import req_logging LOG = log.getLogger(__name__) def fail_gracefully(f): """Log exceptions and aborts.""" @functools.wraps(f) def wrapper(*args, **kw): try: return f(*args, **kw) except Exception as e: LOG.debug(e, exc_info=True) # exception message is printed to all logs LOG.critical(e) sys.exit(1) return wrapper def _add_vary_x_auth_token_header(response): # Add the expected Vary Header, this is run after every request in the # response-phase response.headers['Vary'] = 'X-Auth-Token' return response def _best_match_language(): """Determine the best available locale. This returns best available locale based on the Accept-Language HTTP header passed in the request. """ if not flask.request.accept_languages: return None return flask.request.accept_languages.best_match( oslo_i18n.get_available_languages('keystone') ) def _handle_keystone_exception(error): # TODO(adriant): register this with its own specific handler: if isinstance(error, exception.InsufficientAuthMethods): return receipt_handlers.build_receipt(error) elif isinstance(error, exception.OAuth2Error): return oauth2_handlers.build_response(error) if isinstance(error, exception.RedirectRequired): return flask.redirect(error.redirect_url) # Handle logging if isinstance(error, exception.Unauthorized): LOG.warning( "Authorization failed. %(exception)s from %(remote_addr)s", {'exception': error, 'remote_addr': flask.request.remote_addr}, ) # NOTE(frickler): NotFound exceptions happen regularly during # normal operations, e.g. when doing "openstack role show member", # so don't make a fuss about them elif isinstance(error, exception.NotFound): LOG.debug(str(error)) else: LOG.exception(str(error)) # Render the exception to something user "friendly" error_message = error.args[0] message = oslo_i18n.translate(error_message, _best_match_language()) if message is error_message: # translate() didn't do anything because it wasn't a Message, # convert to a string. message = str(message) body = dict( error={'code': error.code, 'title': error.title, 'message': message} ) if isinstance(error, exception.AuthPluginException): body['error']['identity'] = error.authentication # Create the response and set status code. response = flask.jsonify(body) response.status_code = error.code # Add the appropriate WWW-Authenticate header for Unauthorized if isinstance(error, exception.Unauthorized): url = ks_flask.base_url() response.headers['WWW-Authenticate'] = 'Keystone uri="%s"' % url return response def _handle_unknown_keystone_exception(error): # translate a python exception to something we can properly render as # an API error. if isinstance(error, TypeError): new_exc = exception.ValidationError(error) else: new_exc = exception.UnexpectedError(error) return _handle_keystone_exception(new_exc) @fail_gracefully def application_factory(name='public'): if name not in ('admin', 'public'): raise RuntimeError( 'Application name (for base_url lookup) must be ' 'either `admin` or `public`.' ) app = flask.Flask(name) # Register Error Handler Function for Keystone Errors. # NOTE(morgan): Flask passes errors to an error handling function. All of # keystone's api errors are explicitly registered in # keystone.exception.KEYSTONE_API_EXCEPTIONS and those are in turn # registered here to ensure a proper error is bubbled up to the end user # instead of a 500 error. for exc in exception.KEYSTONE_API_EXCEPTIONS: app.register_error_handler(exc, _handle_keystone_exception) # Register extra (python) exceptions with the proper exception handler, # specifically TypeError. It will render as a 400 error, but presented in # a "web-ified" manner app.register_error_handler(TypeError, _handle_unknown_keystone_exception) # Add core before request functions app.before_request(req_logging.log_request_info) app.before_request(json_body.json_body_before_request) # Add core after request functions app.after_request(_add_vary_x_auth_token_header) # NOTE(morgan): Configure the Flask Environment for our needs. app.config.update( # We want to bubble up Flask Exceptions (for now) PROPAGATE_EXCEPTIONS=True ) for api in keystone.api.__apis__: for api_bp in api.APIs: api_bp.instantiate_and_register_to_app(app) # Load in Healthcheck and map it to /healthcheck hc_app = healthcheck.Healthcheck.app_factory( {}, oslo_config_project='keystone' ) # Use the simple form of the dispatch middleware, no extra logic needed # for legacy dispatching. This is to mount /healthcheck at a consistent # place app.wsgi_app = wsgi_dispatcher.DispatcherMiddleware( app.wsgi_app, {'/healthcheck': hc_app} ) return app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/server/flask/common.py0000664000175000017500000012747200000000000021611 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import collections import functools import http.client import re import typing as ty import uuid import wsgiref.util import flask from flask import blueprints import flask_restful import flask_restful.utils from oslo_log import log from oslo_log import versionutils from oslo_serialization import jsonutils from keystone.common import authorization from keystone.common import context from keystone.common import driver_hints from keystone.common import json_home from keystone.common.rbac_enforcer import enforcer from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications # NOTE(morgan): Capture the relevant part of the flask url route rule for # substitution. In flask arguments (e.g. url elements to be passed to the # "resource" method, e.g. user_id, are specified like `` # we use this regex to replace the <> with {} for JSON Home purposes and # remove the argument type. Use of this is done like # _URL_SUBST.sub('{\\1}', entity_path), which replaces the whole match # match rule bit with the capture group (this is a greedy sub). _URL_SUBST = re.compile(r'<[^\s:]+:([^>]+)>') CONF = keystone.conf.CONF LOG = log.getLogger(__name__) ResourceMap = collections.namedtuple( 'ResourceMap', ['resource', 'url', 'alternate_urls', 'kwargs', 'json_home_data'], ) JsonHomeData = collections.namedtuple( 'JsonHomeData', ['rel', 'status', 'path_vars'] ) _v3_resource_relation = json_home.build_v3_resource_relation def construct_resource_map( resource, url, resource_kwargs, alternate_urls=None, rel=None, status=json_home.Status.STABLE, path_vars=None, resource_relation_func=_v3_resource_relation, ): """Construct the ResourceMap Named Tuple. :param resource: The flask-RESTful resource class implementing the methods for the API. :type resource: :class:`ResourceMap` :param url: Flask-standard url route, all flask url routing rules apply. url variables will be passed to the Resource methods as arguments. :type url: str :param resource_kwargs: a dict of optional value(s) that can further modify the handling of the routing. * endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower` Can be used to reference this route in :class:`fields.Url` fields (str) * resource_class_args: args to be forwarded to the constructor of the resource. (tuple) * resource_class_kwargs: kwargs to be forwarded to the constructor of the resource. (dict) Additional keyword arguments not specified above will be passed as-is to :meth:`flask.Flask.add_url_rule`. :param alternate_urls: An iterable (list) of dictionaries containing urls and associated json home REL data. Each element is expected to be a dictionary with a 'url' key and an optional 'json_home' key for a 'JsonHomeData' named tuple These urls will also map to the resource. These are used to ensure API compatibility when a "new" path is more correct for the API but old paths must continue to work. Example: `/auth/domains` being the new path for `/OS-FEDERATION/domains`. The `OS-FEDERATION` part would be listed as an alternate url. If a 'json_home' key is provided, the original path with the new json_home data will be added to the JSON Home Document. :type: iterable or None :param rel: :type rel: str or None :param status: JSON Home API Status, e.g. "STABLE" :type status: str :param path_vars: JSON Home Path Var Data (arguments) :type path_vars: dict or None :param resource_relation_func: function to build expected resource rel data :type resource_relation_func: callable :return: """ if rel is not None: jh_data = construct_json_home_data( rel=rel, status=status, path_vars=path_vars, resource_relation_func=resource_relation_func, ) else: jh_data = None if not url.startswith('/'): url = '/%s' % url return ResourceMap( resource=resource, url=url, alternate_urls=alternate_urls, kwargs=resource_kwargs, json_home_data=jh_data, ) def construct_json_home_data( rel, status=json_home.Status.STABLE, path_vars=None, resource_relation_func=_v3_resource_relation, ): rel = resource_relation_func(resource_name=rel) return JsonHomeData(rel=rel, status=status, path_vars=(path_vars or {})) def _initialize_rbac_enforcement_check(): setattr(flask.g, enforcer._ENFORCEMENT_CHECK_ATTR, False) def _assert_rbac_enforcement_called(resp): # assert is intended to be used to ensure code during development works # as expected, it is fine to be optimized out with `python -O` msg = ( 'PROGRAMMING ERROR: enforcement (`keystone.common.rbac_enforcer.' 'enforcer.RBACEnforcer.enforce_call()`) has not been called; API ' 'is unenforced.' ) g = flask.g # NOTE(morgan): OPTIONS is a special case and is handled by flask # internally. We should never be enforcing on OPTIONS calls. if flask.request.method != 'OPTIONS': assert getattr( # nosec g, enforcer._ENFORCEMENT_CHECK_ATTR, False ), msg # nosec return resp def _remove_content_type_on_204(resp): # Remove content-type if the resp is 204. if resp.status_code == http.client.NO_CONTENT: resp.headers.pop('content-type', None) return resp class APIBase(metaclass=abc.ABCMeta): @property @abc.abstractmethod def _name(self): """Override with an attr consisting of the API Name, e.g 'users'.""" raise NotImplementedError() @property @abc.abstractmethod def _import_name(self): """Override with an attr consisting of the value of `__name__`.""" raise NotImplementedError() @property @abc.abstractmethod def resource_mapping(self) -> list[ResourceMap]: """An attr containing of an iterable of :class:`ResourceMap`. Each :class:`ResourceMap` is a NamedTuple with the following elements: * resource: a :class:`flask_restful.Resource` class or subclass * url: a url route to match for the resource, standard flask routing rules apply. Any url variables will be passed to the resource method as args. (str) * alternate_urls: an iterable of url routes to match for the resource, standard flask routing rules apply. These rules are in addition (for API compat) to the primary url. Any url variables will be passed to the resource method as args. (iterable) * json_home_data: :class:`JsonHomeData` populated with relevant info for populated JSON Home Documents or None. * kwargs: a dict of optional value(s) that can further modify the handling of the routing. * endpoint: endpoint name (defaults to :meth:`Resource.__name__.lower` Can be used to reference this route in :class:`fields.Url` fields (str) * resource_class_args: args to be forwarded to the constructor of the resource. (tuple) * resource_class_kwargs: kwargs to be forwarded to the constructor of the resource. (dict) Additional keyword arguments not specified above will be passed as-is to :meth:`flask.Flask.add_url_rule`. """ raise NotImplementedError() @property def resources(self) -> ty.List[ty.Type["ResourceBase"]]: return [] @staticmethod def _build_bp_url_prefix(prefix): # NOTE(morgan): Keystone only has a V3 API, this is here for future # proofing and exceptional cases such as root discovery API object(s) parts = ['/v3'] if prefix: parts.append(prefix.lstrip('/')) return '/'.join(parts).rstrip('/') @property def api(self): # The API may be directly accessed via this property return self.__api @property def blueprint(self): # The API Blueprint may be directly accessed via this property return self.__blueprint def __init__( self, blueprint_url_prefix='', api_url_prefix='', default_mediatype='application/json', decorators=None, errors=None, ): self.__before_request_functions_added = False self.__after_request_functions_added = False self._default_mediatype = default_mediatype blueprint_url_prefix = blueprint_url_prefix.rstrip('/') api_url_prefix = api_url_prefix.rstrip('/') if api_url_prefix and not api_url_prefix.startswith('/'): self._api_url_prefix = '/%s' % api_url_prefix else: # NOTE(morgan): If the api_url_prefix is empty fall back on the # class-level defined `_api_url_prefix` if it is set. self._api_url_prefix = api_url_prefix or getattr( self, '_api_url_prefix', '' ) if blueprint_url_prefix and not blueprint_url_prefix.startswith('/'): self._blueprint_url_prefix = self._build_bp_url_prefix( '/%s' % blueprint_url_prefix ) else: self._blueprint_url_prefix = self._build_bp_url_prefix( blueprint_url_prefix ) self.__blueprint = blueprints.Blueprint( name=self._name, import_name=self._import_name, url_prefix=self._blueprint_url_prefix, ) self.__api = flask_restful.Api( app=self.__blueprint, prefix=self._api_url_prefix, default_mediatype=self._default_mediatype, decorators=decorators, errors=errors, ) # NOTE(morgan): Make sure we're using oslo_serialization.jsonutils # instead of the default json serializer. Keystone has data types that # the default serializer cannot handle, representation is a decorator # but since we instantiate the API in-line we need to do some magic # and call it as a normal method. self.__api.representation('application/json')(self._output_json) self._add_resources() self._add_mapped_resources() # Apply Before and After request functions self._register_before_request_functions() self._register_after_request_functions() # Assert is intended to ensure code works as expected in development, # it is fine to optimize out with python -O msg = '%s_request functions not registered' assert self.__before_request_functions_added, msg % 'before' # nosec assert self.__after_request_functions_added, msg % 'after' # nosec def _add_resources(self): # Add resources that are standardized. Each resource implements a # base set of handling for a collection of entities such as # `users`. Resources are sourced from self.resources. Each resource # should have an attribute/property containing the `collection_key` # which is typically the "plural" form of the entity, e.g. `users` and # `member_key` which is typically the "singular" of the entity, e.g. # `user`. Resources are sourced from self.resources, each element is # simply a :class:`flask_restful.Resource`. for r in self.resources: c_key = getattr(r, 'collection_key', None) m_key = getattr(r, 'member_key', None) r_pfx = getattr(r, 'api_prefix', None) if not c_key or not m_key: LOG.debug( 'Unable to add resource %(resource)s to API ' '%(name)s, both `member_key` and `collection_key` ' 'must be implemented. [collection_key(%(col_key)s) ' 'member_key(%(m_key)s)]', { 'resource': r.__name__, 'name': self._name, 'col_key': c_key, 'm_key': m_key, }, ) continue if r_pfx != self._api_url_prefix: LOG.debug( 'Unable to add resource %(resource)s to API as the ' 'API Prefixes do not match: %(apfx)r != %(rpfx)r', { 'resource': r.__name__, 'rpfx': r_pfx, 'apfx': self._api_url_prefix, }, ) continue # NOTE(morgan): The Prefix is automatically added by the API, so # we do not add it to the paths here. collection_path = '/%s' % c_key if getattr(r, '_id_path_param_name_override', None): # The member_key doesn't match the "id" key in the url, make # sure to use the correct path-key for ID. member_id_key = getattr(r, '_id_path_param_name_override') else: member_id_key = f'{m_key}_id' entity_path = '/{collection}/'.format( collection=c_key, member=member_id_key, ) # NOTE(morgan): The json-home form of the entity path is different # from the flask-url routing form. Must also include the prefix jh_e_path = _URL_SUBST.sub( '{\\1}', '%(pfx)s/%(e_path)s' % { 'pfx': self._api_url_prefix, 'e_path': entity_path.lstrip('/'), }, ) LOG.debug( 'Adding standard routes to API %(name)s for `%(resource)s` ' '(API Prefix: %(prefix)s) [%(collection_path)s, ' '%(entity_path)s]', { 'name': self._name, 'resource': r.__class__.__name__, 'collection_path': collection_path, 'entity_path': entity_path, 'prefix': self._api_url_prefix, }, ) self.api.add_resource(r, collection_path, entity_path) # Add JSON Home data resource_rel_func = getattr( r, 'json_home_resource_rel_func', json_home.build_v3_resource_relation, ) resource_rel_status = getattr(r, 'json_home_resource_status', None) collection_rel_resource_name = getattr( r, 'json_home_collection_resource_name_override', c_key ) collection_rel = resource_rel_func( resource_name=collection_rel_resource_name ) # NOTE(morgan): Add the prefix explicitly for JSON Home documents # to the collection path. href_val = '{pfx}{collection_path}'.format( pfx=self._api_url_prefix, collection_path=collection_path, ) # If additional parameters exist in the URL, add them to the # href-vars dict. additional_params = getattr( r, 'json_home_additional_parameters', {} ) if additional_params: # NOTE(morgan): Special case, we have 'additional params' which # means we know the params are in the "prefix". This guarantees # the correct data in the json_home document with href-template # and href-vars even on the "collection" entry rel_data = dict() rel_data['href-template'] = _URL_SUBST.sub('{\\1}', href_val) rel_data['href-vars'] = additional_params else: rel_data = {'href': href_val} member_rel_resource_name = getattr( r, 'json_home_member_resource_name_override', m_key ) entity_rel = resource_rel_func( resource_name=member_rel_resource_name ) id_str = member_id_key parameter_rel_func = getattr( r, 'json_home_parameter_rel_func', json_home.build_v3_parameter_relation, ) id_param_rel = parameter_rel_func(parameter_name=id_str) entity_rel_data = { 'href-template': jh_e_path, 'href-vars': {id_str: id_param_rel}, } if additional_params: entity_rel_data.setdefault('href-vars', {}).update( additional_params ) if resource_rel_status is not None: json_home.Status.update_resource_data( rel_data, resource_rel_status ) json_home.Status.update_resource_data( entity_rel_data, resource_rel_status ) json_home.JsonHomeResources.append_resource( collection_rel, rel_data ) json_home.JsonHomeResources.append_resource( entity_rel, entity_rel_data ) def _add_mapped_resources(self): # Add resource mappings, non-standard resource connections for r in self.resource_mapping: alt_url_json_home_data = [] LOG.debug( 'Adding resource routes to API %(name)s: ' '[%(url)r %(kwargs)r]', {'name': self._name, 'url': r.url, 'kwargs': r.kwargs}, ) urls = [r.url] if r.alternate_urls is not None: for element in r.alternate_urls: if self._api_url_prefix: LOG.debug( 'Unable to add additional resource route ' '`%(route)s` to API %(name)s because API has a ' 'URL prefix. Only APIs without explicit prefixes ' 'can have alternate URL routes added.', {'route': element['url'], 'name': self._name}, ) continue LOG.debug( 'Adding additional resource route (alternate) to API ' '%(name)s: [%(url)r %(kwargs)r]', { 'name': self._name, 'url': element['url'], 'kwargs': r.kwargs, }, ) urls.append(element['url']) if element.get('json_home'): alt_url_json_home_data.append(element['json_home']) # Add all URL routes at once. self.api.add_resource(r.resource, *urls, **r.kwargs) # Build the JSON Home data and add it to the relevant JSON Home # Documents for explicit JSON Home data. if r.json_home_data: resource_data = {} # NOTE(morgan): JSON Home form of the URL is different # from FLASK, do the conversion here. conv_url = '{pfx}/{url}'.format( url=_URL_SUBST.sub('{\\1}', r.url).lstrip('/'), pfx=self._api_url_prefix, ) if r.json_home_data.path_vars: resource_data['href-template'] = conv_url resource_data['href-vars'] = r.json_home_data.path_vars else: resource_data['href'] = conv_url json_home.Status.update_resource_data( resource_data, r.json_home_data.status ) json_home.JsonHomeResources.append_resource( r.json_home_data.rel, resource_data ) for element in alt_url_json_home_data: # Append the "new" path (resource) data with the old rel # reference. json_home.JsonHomeResources.append_resource( element.rel, resource_data ) def _register_before_request_functions(self, functions=None): """Register functions to be executed in the `before request` phase. Override this method and pass in via "super" any additional functions that should be registered. It is assumed that any override will also accept a "functions" list and append the passed in values to it's list prior to calling super. Each function will be called with no arguments and expects a NoneType return. If the function returns a value, that value will be returned as the response to the entire request, no further processing will happen. :param functions: list of functions that will be run in the `before_request` phase. :type functions: list """ functions = functions or [] # Assert is intended to ensure code works as expected in development, # it is fine to optimize out with python -O msg = 'before_request functions already registered' assert not self.__before_request_functions_added, msg # nosec # register global before request functions # e.g. self.__blueprint.before_request(function) self.__blueprint.before_request(_initialize_rbac_enforcement_check) # Add passed-in functions for f in functions: self.__blueprint.before_request(f) self.__before_request_functions_added = True def _register_after_request_functions(self, functions=None): """Register functions to be executed in the `after request` phase. Override this method and pass in via "super" any additional functions that should be registered. It is assumed that any override will also accept a "functions" list and append the passed in values to it's list prior to calling super. Each function will be called with a single argument of the Response class type. The function must return either the passed in Response or a new Response. NOTE: As of flask 0.7, these functions may not be executed in the case of an unhandled exception. :param functions: list of functions that will be run in the `after_request` phase. :type functions: list """ functions = functions or [] # Assert is intended to ensure code works as expected in development, # it is fine to optimize out with python -O msg = 'after_request functions already registered' assert not self.__after_request_functions_added, msg # nosec # register global after request functions # e.g. self.__blueprint.after_request(function) self.__blueprint.after_request(_assert_rbac_enforcement_called) self.__blueprint.after_request(_remove_content_type_on_204) # Add Passed-In Functions for f in functions: self.__blueprint.after_request(f) self.__after_request_functions_added = True @staticmethod def _output_json(data, code, headers=None): """Make a Flask response with a JSON encoded body. This is a replacement of the default that is shipped with flask-RESTful as we need oslo_serialization for the wider datatypes in our objects that are serialized to json. """ settings = flask.current_app.config.get('RESTFUL_JSON', {}) # If we're in debug mode, and the indent is not set, we set it to # a reasonable value here. Note that this won't override any existing # value that was set. We also set the "sort_keys" value. if flask.current_app.debug: settings.setdefault('indent', 4) settings.setdefault('sort_keys', not flask_restful.utils.PY3) # always end the json dumps with a new line # see https://github.com/mitsuhiko/flask/pull/1262 dumped = jsonutils.dumps(data, **settings) + "\n" resp = flask.make_response(dumped, code) resp.headers.extend(headers or {}) return resp @classmethod def instantiate_and_register_to_app(cls, flask_app): """Build the API object and register to the passed in flask_app. This is a simplistic loader that makes assumptions about how the blueprint is loaded. Anything beyond defaults should be done explicitly via normal instantiation where more values may be passed via :meth:`__init__`. :returns: :class:`keystone.server.flask.common.APIBase` """ inst = cls() flask_app.register_blueprint(inst.blueprint) return inst class ResourceBase(flask_restful.Resource): collection_key: str member_key: str _public_parameters: frozenset[str] = frozenset([]) # NOTE(morgan): This must match the string on the API the resource is # registered to. api_prefix: str = '' _id_path_param_name_override: ty.Optional[str] = None method_decorators: list[ty.Callable] = [] @staticmethod def _assign_unique_id(ref): ref = ref.copy() ref['id'] = uuid.uuid4().hex return ref @staticmethod def _validate_id_format(id): uval = uuid.UUID(id).hex if uval != id: raise ValueError('badly formed hexadecimal UUID value') @classmethod def _require_matching_id(cls, ref): """Ensure the value matches the reference's ID, if any.""" id_arg = None if cls.member_key is not None: id_arg = flask.request.view_args.get('%s_id' % cls.member_key) if ref.get('id') is not None and id_arg != ref['id']: raise exception.ValidationError('Cannot change ID') @classmethod def filter_params(cls, ref): """Remove unspecified parameters from the dictionary. This function removes unspecified parameters from the dictionary. This method checks only root-level keys from a ref dictionary. :param ref: a dictionary representing deserialized response to be serialized """ # NOTE(morgan): if _public_parameters is empty, do nothing. We do not # filter if we do not have an explicit white-list to work from. if cls._public_parameters: ref_keys = set(ref.keys()) blocked_keys = ref_keys - cls._public_parameters for blocked_param in blocked_keys: del ref[blocked_param] return ref @classmethod def wrap_collection(cls, refs, hints=None, collection_name=None): """Wrap a collection, checking for filtering and pagination. Returns the wrapped collection, which includes: - Executing any filtering not already carried out - Truncate to a set limit if necessary - Adds 'self' links in every member - Adds 'next', 'self' and 'prev' links for the whole collection. :param refs: the list of members of the collection :param hints: list hints, containing any relevant filters and limit. Any filters already satisfied by managers will have been removed :param collection_name: optional override for the 'collection key' class attribute. This is to be used when wrapping a collection for a different api, e.g. 'roles' from the 'trust' api. """ # Check if there are any filters in hints that were not handled by # the drivers. The driver will not have paginated or limited the # output if it found there were filters it was unable to handle if hints: refs = cls.filter_by_attributes(refs, hints) list_limited, refs = cls.limit(refs, hints) collection = collection_name or cls.collection_key for ref in refs: cls._add_self_referential_link(ref, collection_name=collection) container = {collection: refs} self_url = full_url(flask.request.environ['PATH_INFO']) container['links'] = {'next': None, 'self': self_url, 'previous': None} if list_limited: container['truncated'] = True return container @classmethod def wrap_member(cls, ref, collection_name=None, member_name=None): cls._add_self_referential_link(ref, collection_name) return {member_name or cls.member_key: ref} @classmethod def _add_self_referential_link(cls, ref, collection_name=None): collection_element = collection_name or cls.collection_key if cls.api_prefix: api_prefix = cls.api_prefix.lstrip('/').rstrip('/') # ensure we have substituted the flask-arg specification # to the "keystone" mechanism, then format the string api_prefix = _URL_SUBST.sub('{\\1}', api_prefix) if flask.request.view_args: # if a prefix has substitutions it is *required* that the # values are passed as view_args to the HTTP action method # (e.g. head/get/post/...). api_prefix = api_prefix.format(**flask.request.view_args) collection_element = '/'.join( [api_prefix, collection_name or cls.collection_key] ) self_link = base_url(path='/'.join([collection_element, ref['id']])) ref.setdefault('links', {})['self'] = self_link @classmethod def filter_by_attributes(cls, refs, hints): """Filter a list of references by filter values.""" def _attr_match(ref_attr, val_attr): """Matche attributes allowing for booleans as strings. We test explicitly for a value that defines it as 'False', which also means that the existence of the attribute with no value implies 'True' """ if type(ref_attr) is bool: return ref_attr == utils.attr_as_boolean(val_attr) else: return ref_attr == val_attr def _inexact_attr_match(inexact_filter, ref): """Apply an inexact filter to a result dict. :param inexact_filter: the filter in question :param ref: the dict to check :returns: True if there is a match """ comparator = inexact_filter['comparator'] key = inexact_filter['name'] if key in ref: filter_value = inexact_filter['value'] target_value = ref[key] if not inexact_filter['case_sensitive']: # We only support inexact filters on strings so # it's OK to use lower() filter_value = filter_value.lower() target_value = target_value.lower() if comparator == 'contains': return filter_value in target_value elif comparator == 'startswith': return target_value.startswith(filter_value) elif comparator == 'endswith': return target_value.endswith(filter_value) else: # We silently ignore unsupported filters return True return False for f in hints.filters: if f['comparator'] == 'equals': attr = f['name'] value = f['value'] refs = [ r for r in refs if _attr_match(utils.flatten_dict(r).get(attr), value) ] else: # It might be an inexact filter refs = [r for r in refs if _inexact_attr_match(f, r)] return refs @property def auth_context(self): return flask.request.environ.get(authorization.AUTH_CONTEXT_ENV, None) @property def oslo_context(self): return flask.request.environ.get(context.REQUEST_CONTEXT_ENV, None) @property def audit_initiator(self): """A pyCADF initiator describing the current authenticated context. As a property. """ return notifications.build_audit_initiator() @staticmethod def query_filter_is_true(filter_name): """Determine if bool query param is 'True'. We treat this the same way as we do for policy enforcement: {bool_param}=0 is treated as False Any other value is considered to be equivalent to True, including the absence of a value (but existence as a parameter). False Examples for param named `p`: * http://host/url * http://host/url?p=0 All other forms of the param 'p' would be result in a True value including: `http://host/url?param`. """ val = False if filter_name in flask.request.args: filter_value = flask.request.args.get(filter_name) if isinstance(filter_value, str) and filter_value == '0': val = False else: val = True return val @property def request_body_json(self): return flask.request.get_json(silent=True, force=True) or {} @staticmethod def build_driver_hints(supported_filters): """Build list hints based on the context query string. :param supported_filters: list of filters supported, so ignore any keys in query_dict that are not in this list. """ hints = driver_hints.Hints() if not flask.request.args: return hints for key, value in flask.request.args.items(multi=True): # Check if this is an exact filter if supported_filters is None or key in supported_filters: hints.add_filter(key, value) continue # Check if it is an inexact filter for valid_key in supported_filters: # See if this entry in query_dict matches a known key with an # inexact suffix added. If it doesn't match, then that just # means that there is no inexact filter for that key in this # query. if not key.startswith(valid_key + '__'): continue base_key, comparator = key.split('__', 1) # We map the query-style inexact of, for example: # # {'email__contains', 'myISP'} # # into a list directive add filter call parameters of: # # name = 'email' # value = 'myISP' # comparator = 'contains' # case_sensitive = True case_sensitive = True if comparator.startswith('i'): case_sensitive = False comparator = comparator[1:] hints.add_filter( base_key, value, comparator=comparator, case_sensitive=case_sensitive, ) # NOTE(henry-nash): If we were to support pagination, we would pull any # pagination directives out of the query_dict here, and add them into # the hints list. return hints @classmethod def limit(cls, refs, hints): """Limit a list of entities. The underlying driver layer may have already truncated the collection for us, but in case it was unable to handle truncation we check here. :param refs: the list of members of the collection :param hints: hints, containing, among other things, the limit requested :returns: boolean indicating whether the list was truncated, as well as the list of (truncated if necessary) entities. """ NOT_LIMITED = False LIMITED = True if hints is None or hints.limit is None: # No truncation was requested return NOT_LIMITED, refs if hints.limit.get('truncated', False): # The driver did truncate the list return LIMITED, refs if len(refs) > hints.limit['limit']: # The driver layer wasn't able to truncate it for us, so we must # do it here return LIMITED, refs[: hints.limit['limit']] return NOT_LIMITED, refs @classmethod def _normalize_dict(cls, d): return {cls._normalize_arg(k): v for (k, v) in d.items()} @staticmethod def _normalize_arg(arg): return arg.replace(':', '_').replace('-', '_') @classmethod def _get_domain_id_for_list_request(cls): """Get the domain_id for a v3 list call. If we running with multiple domain drivers, then the caller must specify a domain_id either as a filter or as part of the token scope. """ if not CONF.identity.domain_specific_drivers_enabled: # We don't need to specify a domain ID in this case return domain_id = flask.request.args.get('domain_id') if domain_id: return domain_id token_ref = cls.get_token_ref() if token_ref.domain_scoped: return token_ref.domain_id elif token_ref.project_scoped: return token_ref.project_domain['id'] elif token_ref.system_scoped: return else: msg = 'No domain information specified as part of list request' tr_msg = _( 'No domain information specified as part of list request' ) LOG.warning(msg) raise exception.Unauthorized(tr_msg) @classmethod def get_token_ref(cls): """Retrieve KeystoneToken object from the auth context and returns it. :raises keystone.exception.Unauthorized: If auth context cannot be found. :returns: The KeystoneToken object. """ try: # Retrieve the auth context that was prepared by # AuthContextMiddleware. auth_context = flask.request.environ.get( authorization.AUTH_CONTEXT_ENV, {} ) return auth_context['token'] except KeyError: LOG.warning("Couldn't find the auth context.") raise exception.Unauthorized() @classmethod def _normalize_domain_id(cls, ref): """Fill in domain_id if not specified in a v3 call.""" if not ref.get('domain_id'): oslo_ctx = flask.request.environ.get( context.REQUEST_CONTEXT_ENV, None ) if oslo_ctx and oslo_ctx.domain_id: # Domain Scoped Token Scenario. ref['domain_id'] = oslo_ctx.domain_id elif oslo_ctx.is_admin: # Legacy "shared" admin token Scenario raise exception.ValidationError( _( 'You have tried to create a resource using the admin ' 'token. As this token is not within a domain you must ' 'explicitly include a domain for this resource to ' 'belong to.' ) ) else: # TODO(henry-nash): We should issue an exception here since if # a v3 call does not explicitly specify the domain_id in the # entity, it should be using a domain scoped token. However, # the current tempest heat tests issue a v3 call without this. # This is raised as bug #1283539. Once this is fixed, we # should remove the line below and replace it with an error. # # Ahead of actually changing the code to raise an exception, we # issue a deprecation warning. versionutils.report_deprecated_feature( LOG, 'Not specifying a domain during a create user, group or ' 'project call, and relying on falling back to the ' 'default domain, is deprecated as of Liberty. There is no ' 'plan to remove this compatibility, however, future API ' 'versions may remove this, so please specify the domain ' 'explicitly or use a domain-scoped token.', ) ref['domain_id'] = CONF.identity.default_domain_id return ref def base_url(path=''): url = CONF['public_endpoint'] if not url: if not flask.request.environ: raise ValueError('Endpoint cannot be detected') url = wsgiref.util.application_uri(flask.request.environ) # remove version from the URL as it may be part of SCRIPT_NAME but # it should not be part of base URL url = re.sub(r'/v(3|(2\.0))/*$', '', url) # now remove the standard port url = utils.remove_standard_port(url) if path: # Cleanup leading /v3 if needed. path = path.rstrip('/').lstrip('/') if path.startswith('v3'): path = path[2:].lstrip('/') url = url.rstrip('/') url = '/'.join([p for p in (url, 'v3', path) if p]) return url def full_url(path=''): subs = {'url': base_url(path), 'query_string': ''} qs = flask.request.environ.get('QUERY_STRING') if qs: subs['query_string'] = '?%s' % qs return '%(url)s%(query_string)s' % subs def set_unenforced_ok(): # Does the work for unenforced_api. This must be used outside of a # decorator in some limited, such as when a ValidationError is raised up # from a "before_request" function (body_json checker is a prime example) setattr(flask.g, enforcer._ENFORCEMENT_CHECK_ATTR, True) def unenforced_api(f): """Decorate a resource method to mark is as an unenforced API. Explicitly exempts an API from receiving the enforced API check, specifically for cases such as user self-service password changes (or other APIs that must work without already having a token). This decorator may also be used if the API has extended enforcement logic/varying enforcement logic (such as some of the AUTH paths) where the full enforcement will be implemented directly within the methods. """ @functools.wraps(f) def wrapper(*args, **kwargs): set_unenforced_ok() return f(*args, **kwargs) return wrapper ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/core.py0000664000175000017500000001472100000000000021241 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import os from oslo_log import log import stevedore try: # werkzeug 0.15.x from werkzeug.middleware import proxy_fix except ImportError: # werkzeug 0.14.x from werkzeug.contrib import fixers as proxy_fix from keystone.common import profiler import keystone.conf import keystone.server from keystone.server.flask import application from keystone.server.flask.request_processing.middleware import auth_context from keystone.server.flask.request_processing.middleware import url_normalize # NOTE(morgan): Middleware Named Tuple with the following values: # * "namespace": namespace for the entry_point # * "ep": the entry-point name # * "conf": extra config data for the entry_point (None or Dict) _Middleware = collections.namedtuple( '_Middleware', ['namespace', 'ep', 'conf'] ) CONF = keystone.conf.CONF # NOTE(morgan): ORDER HERE IS IMPORTANT! The middleware will process the # request in this list's order. _APP_MIDDLEWARE = ( _Middleware( namespace='keystone.server_middleware', ep='cors', conf={'oslo_config_project': 'keystone'}, ), _Middleware( namespace='keystone.server_middleware', ep='sizelimit', conf={} ), _Middleware( namespace='keystone.server_middleware', ep='http_proxy_to_wsgi', conf={}, ), _Middleware( namespace='keystone.server_middleware', ep='osprofiler', conf={} ), _Middleware( namespace='keystone.server_middleware', ep='request_id', conf={} ), ) # NOTE(morgan): ORDER HERE IS IMPORTANT! Each of these middlewares are # implemented/defined explicitly in Keystone Server. They do some level of # lifting to ensure the request is properly handled. It is importat to note # that these will be processed in the order of this list AND after all # middleware defined in _APP_MIDDLEWARE. AuthContextMiddleware should always # be the last element here as long as it is an actual Middleware. _KEYSTONE_MIDDLEWARE = ( url_normalize.URLNormalizingMiddleware, auth_context.AuthContextMiddleware, ) def _get_config_files(env=None): if env is None: env = os.environ dirname = env.get('OS_KEYSTONE_CONFIG_DIR', '').strip() files = [ s.strip() for s in env.get('OS_KEYSTONE_CONFIG_FILES', '').split(';') if s.strip() ] if dirname: if not files: files = ['keystone.conf'] files = [os.path.join(dirname, fname) for fname in files] return files def setup_app_middleware(app): # NOTE(morgan): Load the middleware, in reverse order, we wrap the app # explicitly; reverse order to ensure the first element in _APP_MIDDLEWARE # processes the request first. MW = _APP_MIDDLEWARE IMW = _KEYSTONE_MIDDLEWARE # Add in optional (config-based) middleware # NOTE(morgan): Each of these may need to be in a specific location # within the pipeline therefore cannot be magically appended/prepended if CONF.wsgi.debug_middleware: # Add in the Debug Middleware MW = ( _Middleware( namespace='keystone.server_middleware', ep='debug', conf={} ), ) + _APP_MIDDLEWARE # Apply internal-only Middleware (e.g. AuthContextMiddleware). These # are below all externally loaded middleware in request processing. for mw in reversed(IMW): app.wsgi_app = mw(app.wsgi_app) # Apply the middleware to the application. for mw in reversed(MW): # TODO(morgan): Explore moving this to ExtensionManager, but we # want to be super careful about what middleware we load and in # what order. DriverManager gives us that capability and only loads # the entry points we care about rather than all of them. # Load via Stevedore, initialize the class via the factory so we can # initialize the "loaded" entrypoint with the currently bound # object pointed at "application". We may need to eventually move away # from the "factory" mechanism. loaded = stevedore.DriverManager( mw.namespace, mw.ep, invoke_on_load=False ) # NOTE(morgan): global_conf (args[0]) to the factory is always empty # and local_conf (args[1]) will be the mw.conf dict. This allows for # configuration to be passed for middleware such as oslo CORS which # expects oslo_config_project or "allowed_origin" to be in the # local_conf, this is all a hold-over from paste-ini and pending # reworking/removal(s) factory_func = loaded.driver.factory({}, **mw.conf) app.wsgi_app = factory_func(app.wsgi_app) # Apply werkzeug specific middleware app.wsgi_app = proxy_fix.ProxyFix(app.wsgi_app) return app def initialize_application( name, post_log_configured_function=lambda: None, config_files=None ): possible_topdir = os.path.normpath( os.path.join( os.path.abspath(__file__), os.pardir, os.pardir, os.pardir, os.pardir, ) ) dev_conf = os.path.join(possible_topdir, 'etc', 'keystone.conf') if not config_files: config_files = None if os.path.exists(dev_conf): config_files = [dev_conf] keystone.server.configure(config_files=config_files) # Log the options used when starting if we're in debug mode... if CONF.debug: CONF.log_opt_values(log.getLogger(CONF.prog), log.DEBUG) post_log_configured_function() # TODO(morgan): Provide a better mechanism than "loadapp", this was for # paste-deploy specific mechanisms. def loadapp(): app = application.application_factory(name) return app _unused, app = keystone.server.setup_backends( startup_application_fn=loadapp ) # setup OSprofiler notifier and enable the profiling if that is configured # in Keystone configuration file. profiler.setup(name) return setup_app_middleware(app) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/server/flask/request_processing/0000775000175000017500000000000000000000000023656 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/request_processing/__init__.py0000664000175000017500000000000000000000000025755 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/request_processing/json_body.py0000664000175000017500000000717700000000000026232 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Before request processing for JSON Body enforcement import flask from werkzeug import exceptions as werkzeug_exceptions from keystone import exception from keystone.i18n import _ from keystone.server.flask import common as ks_flask_common def json_body_before_request(): """Enforce JSON Request Body.""" # TODO(morgan): Allow other content-types when OpenAPI Doc or improved # federation is implemented for known/valid paths. This function should # be removed long term. # exit if there is nothing to be done, (no body) if not flask.request.get_data(): return None elif flask.request.path and flask.request.path.startswith( '/v3/OS-OAUTH2/' ): # When the user makes a request to the OAuth2.0 token endpoint, # the user should use the "application/x-www-form-urlencoded" format # with a character encoding of UTF-8 in the HTTP request entity-body. # At the scenario there is nothing to be done and exit. return None try: # flask does loading for us for json, use the flask default loader # in the case that the data is *not* json or a dict, we should see a # raise of werkzeug.exceptions.BadRequest, re-spin this to the keystone # ValidationError message (as expected by our contract) # Explicitly check if the content is supposed to be json. if ( flask.request.is_json or flask.request.headers.get('Content-Type', '') == '' ): json_decoded = flask.request.get_json(force=True) if not isinstance(json_decoded, dict): # In the case that the returned value was not a dict, force # a raise that will be caught the same way that a Decode error # would be handled. raise werkzeug_exceptions.BadRequest( _('resulting JSON load was not a dict') ) else: # We no longer need enforcement on this API, set unenforced_ok # we already hit a validation error. This is required as the # request is never hitting the resource methods, meaning # @unenforced_api is not called. Without marking the request # as "unenforced_ok" the assertion check to ensure enforcement # was called would raise up causing a 500 error. ks_flask_common.set_unenforced_ok() raise exception.ValidationError( attribute='application/json', target='Content-Type header' ) except werkzeug_exceptions.BadRequest: # We no longer need enforcement on this API, set unenforced_ok # we already hit a validation error. This is required as the # request is never hitting the resource methods, meaning # @unenforced_api is not called. Without marking the request # as "unenforced_ok" the assertion check to ensure enforcement # was called would raise up causing a 500 error. ks_flask_common.set_unenforced_ok() raise exception.ValidationError( attribute='valid JSON', target='request body' ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/server/flask/request_processing/middleware/0000775000175000017500000000000000000000000025773 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/request_processing/middleware/__init__.py0000664000175000017500000000000000000000000030072 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/request_processing/middleware/auth_context.py0000664000175000017500000004725200000000000031064 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import http.client import re import wsgiref.util from keystonemiddleware import auth_token import oslo_i18n from oslo_log import log from oslo_serialization import jsonutils import webob.dec import webob.exc from keystone.common import authorization from keystone.common import context from keystone.common import provider_api from keystone.common import render_token from keystone.common import tokenless_auth from keystone.common import utils import keystone.conf from keystone import exception from keystone.federation import constants as federation_constants from keystone.federation import utils as federation_utils from keystone.i18n import _ from keystone.models import token_model CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs # Environment variable used to pass the request context CONTEXT_ENV = 'openstack.context' __all__ = ('AuthContextMiddleware',) CONF = keystone.conf.CONF LOG = log.getLogger(__name__) JSON_ENCODE_CONTENT_TYPES = {'application/json', 'application/json-home'} # minimum access rules support ACCESS_RULES_MIN_VERSION = token_model.ACCESS_RULES_MIN_VERSION def best_match_language(req): """Determine the best available locale. This returns best available locale based on the Accept-Language HTTP header passed in the request. """ if not req.accept_language: return None return req.accept_language.best_match( oslo_i18n.get_available_languages('keystone') ) def base_url(context): url = CONF['public_endpoint'] if not url: if 'environment' not in context: raise ValueError('Endpoint cannot be detected') url = wsgiref.util.application_uri(context['environment']) # remove version from the URL as it may be part of SCRIPT_NAME but # it should not be part of base URL url = re.sub(r'/v(3|(2\.0))/*$', '', url) # now remove the standard port url = utils.remove_standard_port(url) return url.rstrip('/') def middleware_exceptions(method): @functools.wraps(method) def _inner(self, request): try: return method(self, request) except exception.Error as e: LOG.warning(e) return render_exception( e, request=request, user_locale=best_match_language(request) ) except TypeError as e: LOG.exception(e) return render_exception( exception.ValidationError(e), request=request, user_locale=best_match_language(request), ) except Exception as e: LOG.exception(e) return render_exception( exception.UnexpectedError(exception=e), request=request, user_locale=best_match_language(request), ) return _inner def render_response(body=None, status=None, headers=None, method=None): """Form a WSGI response.""" if headers is None: headers = [] else: headers = list(headers) headers.append(('Vary', 'X-Auth-Token')) if body is None: body = b'' status = status or ( http.client.NO_CONTENT, http.client.responses[http.client.NO_CONTENT], ) else: content_types = [v for h, v in headers if h == 'Content-Type'] if content_types: content_type = content_types[0] else: content_type = None if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES: body = jsonutils.dump_as_bytes(body, cls=utils.SmarterEncoder) if content_type is None: headers.append(('Content-Type', 'application/json')) status = status or ( http.client.OK, http.client.responses[http.client.OK], ) # NOTE(davechen): `mod_wsgi` follows the standards from pep-3333 and # requires the value in response header to be binary type(str) on python2, # unicode based string(str) on python3, or else keystone will not work # under apache with `mod_wsgi`. # keystone needs to check the data type of each header and convert the # type if needed. # see bug: # https://bugs.launchpad.net/keystone/+bug/1528981 # see pep-3333: # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types # see source from mod_wsgi: # https://github.com/GrahamDumpleton/mod_wsgi(methods: # wsgi_convert_headers_to_bytes(...), wsgi_convert_string_to_bytes(...) # and wsgi_validate_header_value(...)). def _convert_to_str(headers): str_headers = [] for header in headers: str_header = [] for value in header: if not isinstance(value, str): str_header.append(str(value)) else: str_header.append(value) # convert the list to the immutable tuple to build the headers. # header's key/value will be guaranteed to be str type. str_headers.append(tuple(str_header)) return str_headers headers = _convert_to_str(headers) resp = webob.Response( body=body, status='%d %s' % status, headerlist=headers, charset='utf-8' ) if method and method.upper() == 'HEAD': # NOTE(morganfainberg): HEAD requests should return the same status # as a GET request and same headers (including content-type and # content-length). The webob.Response object automatically changes # content-length (and other headers) if the body is set to b''. Capture # all headers and reset them on the response object after clearing the # body. The body can only be set to a binary-type (not TextType or # NoneType), so b'' is used here and should be compatible with # both py2x and py3x. stored_headers = resp.headers.copy() resp.body = b'' for header, value in stored_headers.items(): resp.headers[header] = value return resp def render_exception(error, context=None, request=None, user_locale=None): """Form a WSGI response based on the current error.""" error_message = error.args[0] message = oslo_i18n.translate(error_message, desired_locale=user_locale) if message is error_message: # translate() didn't do anything because it wasn't a Message, # convert to a string. message = str(message) body = { 'error': { 'code': error.code, 'title': error.title, 'message': message, } } headers = [] if isinstance(error, exception.AuthPluginException): body['error']['identity'] = error.authentication elif isinstance(error, exception.Unauthorized): # NOTE(gyee): we only care about the request environment in the # context. Also, its OK to pass the environment as it is read-only in # base_url() local_context = {} if request: local_context = {'environment': request.environ} elif context and 'environment' in context: local_context = {'environment': context['environment']} url = base_url(local_context) headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url)) return render_response( status=(error.code, error.title), body=body, headers=headers ) class AuthContextMiddleware( provider_api.ProviderAPIMixin, auth_token.BaseAuthProtocol ): """Build the authentication context from the request auth token.""" kwargs_to_fetch_token = True def __init__(self, app): super().__init__(app, log=LOG, service_type='identity') self.token = None def fetch_token(self, token, **kwargs): try: self.token = self.token_provider_api.validate_token( token, access_rules_support=ACCESS_RULES_MIN_VERSION ) return render_token.render_token_response_from_model(self.token) except exception.TokenNotFound: raise auth_token.InvalidToken(_('Could not find token')) def _build_tokenless_auth_context(self, request): """Build the authentication context. The context is built from the attributes provided in the env, such as certificate and scope attributes. """ tokenless_helper = tokenless_auth.TokenlessAuthHelper(request.environ) (domain_id, project_id, trust_ref, unscoped, system) = ( tokenless_helper.get_scope() ) user_ref = tokenless_helper.get_mapped_user(project_id, domain_id) # NOTE(gyee): if it is an ephemeral user, the # given X.509 SSL client cert does not need to map to # an existing user. if user_ref['type'] == federation_utils.UserType.EPHEMERAL: auth_context = {} auth_context['group_ids'] = user_ref['group_ids'] auth_context[federation_constants.IDENTITY_PROVIDER] = user_ref[ federation_constants.IDENTITY_PROVIDER ] auth_context[federation_constants.PROTOCOL] = user_ref[ federation_constants.PROTOCOL ] if domain_id and project_id: msg = _('Scoping to both domain and project is not allowed') raise ValueError(msg) if domain_id: auth_context['domain_id'] = domain_id if project_id: auth_context['project_id'] = project_id auth_context['roles'] = user_ref['roles'] else: # it's the local user, so token data is needed. token = token_model.TokenModel() token.user_id = user_ref['id'] token.methods = [CONF.tokenless_auth.protocol] token.domain_id = domain_id token.project_id = project_id auth_context = {'user_id': user_ref['id']} auth_context['is_delegated_auth'] = False if domain_id: auth_context['domain_id'] = domain_id if project_id: auth_context['project_id'] = project_id auth_context['roles'] = [role['name'] for role in token.roles] return auth_context def _validate_trusted_issuer(self, request): """To further filter the certificates that are trusted. If the config option 'trusted_issuer' is absent or does not contain the trusted issuer DN, no certificates will be allowed in tokenless authorization. :param env: The env contains the client issuer's attributes :type env: dict :returns: True if client_issuer is trusted; otherwise False """ if not CONF.tokenless_auth.trusted_issuer: return False issuer = request.environ.get(CONF.tokenless_auth.issuer_attribute) if not issuer: msg = ( 'Cannot find client issuer in env by the ' 'issuer attribute - %s.' ) LOG.info(msg, CONF.tokenless_auth.issuer_attribute) return False if issuer in CONF.tokenless_auth.trusted_issuer: return True msg = ( 'The client issuer %(client_issuer)s does not match with ' 'the trusted issuer %(trusted_issuer)s' ) LOG.info( msg, { 'client_issuer': issuer, 'trusted_issuer': CONF.tokenless_auth.trusted_issuer, }, ) return False @middleware_exceptions def process_request(self, request): context_env = request.environ.get(CONTEXT_ENV, {}) # NOTE(notmorgan): This code is merged over from the admin token # middleware and now emits the security warning when the # conf.admin_token value is set. token = request.headers.get(authorization.AUTH_TOKEN_HEADER) if CONF.admin_token and (token == CONF.admin_token): context_env['is_admin'] = True LOG.warning( "The use of the '[DEFAULT] admin_token' configuration" "option presents a significant security risk and should " "not be set. This option is deprecated in favor of using " "'keystone-manage bootstrap' and will be removed in a " "future release." ) request.environ[CONTEXT_ENV] = context_env if not context_env.get('is_admin', False): resp = super().process_request(request) if resp: return resp if ( request.token_auth.has_user_token and not request.user_token_valid ): raise exception.Unauthorized(_('Not authorized.')) if request.token_auth.user is not None: request.set_user_headers(request.token_auth.user) # NOTE(jamielennox): function is split so testing can check errors from # fill_context. There is no actual reason for fill_context to raise # errors rather than return a resp, simply that this is what happened # before refactoring and it was easier to port. This can be fixed up # and the middleware_exceptions helper removed. self.fill_context(request) def _keystone_specific_values(self, token, request_context): request_context.token_reference = ( render_token.render_token_response_from_model(token) ) if token.domain_scoped: # Domain scoped tokens should never have is_admin_project set # Even if KSA defaults it otherwise. The two mechanisms are # parallel; only one or the other should be used for access. request_context.is_admin_project = False request_context.domain_id = token.domain_id request_context.domain_name = token.domain['name'] if token.oauth_scoped: request_context.is_delegated_auth = True request_context.oauth_consumer_id = token.access_token[ 'consumer_id' ] request_context.oauth_access_token_id = token.access_token_id if token.trust_scoped: request_context.is_delegated_auth = True request_context.trust_id = token.trust_id if token.is_federated: request_context.group_ids = [] for group in token.federated_groups: request_context.group_ids.append(group['id']) else: request_context.group_ids = [] def fill_context(self, request): # The request context stores itself in thread-local memory for logging. if authorization.AUTH_CONTEXT_ENV in request.environ: msg = ( 'Auth context already exists in the request ' 'environment; it will be used for authorization ' 'instead of creating a new one.' ) LOG.warning(msg) return kwargs = {'authenticated': False, 'overwrite': True} request_context = context.RequestContext.from_environ( request.environ, **kwargs ) request.environ[context.REQUEST_CONTEXT_ENV] = request_context # NOTE(gyee): token takes precedence over SSL client certificates. # This will preserve backward compatibility with the existing # behavior. Tokenless authorization with X.509 SSL client # certificate is effectively disabled if no trusted issuers are # provided. if request.environ.get(CONTEXT_ENV, {}).get('is_admin', False): request_context.is_admin = True auth_context = {} elif request.token_auth.has_user_token: # Keystone enforces policy on some values that other services # do not, and should not, use. This adds them in to the context. if not self.token: self.token = PROVIDERS.token_provider_api.validate_token( request.user_token, access_rules_support=request.headers.get( authorization.ACCESS_RULES_HEADER ), ) self._keystone_specific_values(self.token, request_context) request_context.auth_token = request.user_token auth_context = request_context.to_policy_values() additional = { 'trust_id': request_context.trust_id, 'trustor_id': request_context.trustor_id, 'trustee_id': request_context.trustee_id, 'domain_id': request_context._domain_id, 'domain_name': request_context.domain_name, 'group_ids': request_context.group_ids, 'token': self.token, } auth_context.update(additional) elif self._validate_trusted_issuer(request): auth_context = self._build_tokenless_auth_context(request) # NOTE(gyee): we are no longer using auth_context when formulating # the credentials for RBAC. Instead, we are using the (Oslo) # request context. So we'll need to set all the necessary # credential attributes in the request context here. token_attributes = frozenset( ( 'user_id', 'project_id', 'domain_id', 'user_domain_id', 'project_domain_id', 'user_domain_name', 'project_domain_name', 'roles', 'is_admin', 'project_name', 'domain_name', 'system_scope', 'is_admin_project', 'service_user_id', 'service_user_name', 'service_project_id', 'service_project_name', 'service_user_domain_id', 'service_user_domain_name', 'service_project_domain_id', 'service_project_domain_name', 'service_roles', ) ) for attr in token_attributes: if attr in auth_context: setattr(request_context, attr, auth_context[attr]) # NOTE(gyee): request_context.token_reference is always # expecting a 'token' key regardless. But in the case of X.509 # tokenless auth, we don't need a token. So setting it to None # should be suffice. request_context.token_reference = {'token': None} else: # There is either no auth token in the request or the certificate # issuer is not trusted. No auth context will be set. This # typically happens on an initial token request. return # set authenticated to flag to keystone that a token has been validated request_context.authenticated = True LOG.debug('RBAC: auth_context: %s', auth_context) request.environ[authorization.AUTH_CONTEXT_ENV] = auth_context @classmethod def factory(cls, global_config, **local_config): """Used for loading in middleware (holdover from paste.deploy).""" def _factory(app): conf = global_config.copy() conf.update(local_config) return cls(app, **local_config) return _factory ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/request_processing/middleware/url_normalize.py0000664000175000017500000000270300000000000031231 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Flask Native URL Normalizing Middleware class URLNormalizingMiddleware: """Middleware filter to handle URL normalization.""" # NOTE(morgan): This must be a middleware as changing 'PATH_INFO' after # the request hits the flask app will not impact routing. def __init__(self, app): self.app = app def __call__(self, environ, start_response): """Normalize URLs.""" # TODO(morgan): evaluate collapsing multiple slashes in this middleware # e.g. '/v3//auth/tokens -> /v3/auth/tokens # Removes a trailing slashes from the given path, if any. if len(environ['PATH_INFO']) > 1 and environ['PATH_INFO'][-1] == '/': environ['PATH_INFO'] = environ['PATH_INFO'].rstrip('/') # Rewrites path to root if no path is given if not environ['PATH_INFO']: environ['PATH_INFO'] = '/' return self.app(environ, start_response) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/flask/request_processing/req_logging.py0000664000175000017500000000210300000000000026521 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # LOG some debug output about the request. This was originally in the # dispatch middleware import flask from oslo_log import log LOG = log.getLogger(__name__) def log_request_info(): # Add in any extra debug logging about the request that is desired # note that this is executed prior to routing the request to a resource # so the data is somewhat raw. LOG.debug('REQUEST_METHOD: `%s`', flask.request.method) LOG.debug('SCRIPT_NAME: `%s`', flask.request.script_root) LOG.debug('PATH_INFO: `%s`', flask.request.path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/server/wsgi.py0000664000175000017500000000235400000000000020161 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.server.flask import core as flask_core # NOTE(morgan): While "_get_config_files" is present in the keystone_flask # module, since it is considered "private", we are going to directly # import core and call it directly, eventually keystone_flask will not # export all the symbols from keystone.flask.core only specific ones that # are meant for public consumption def initialize_public_application(): return flask_core.initialize_application( name='public', config_files=flask_core._get_config_files() ) # Keystone does not differentiate between "admin" and public with the removal # of V2.0 initialize_admin_application = initialize_public_application ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5341136 keystone-26.0.0/keystone/tests/0000775000175000017500000000000000000000000016466 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/__init__.py0000664000175000017500000000000000000000000020565 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5381136 keystone-26.0.0/keystone/tests/common/0000775000175000017500000000000000000000000017756 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/common/__init__.py0000664000175000017500000000000000000000000022055 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/common/auth.py0000664000175000017500000001514500000000000021277 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.i18n import _ class AuthTestMixin: """To hold auth building helper functions.""" def _build_auth_scope( self, system=False, project_id=None, project_name=None, project_domain_id=None, project_domain_name=None, domain_id=None, domain_name=None, trust_id=None, unscoped=None, ): scope_data = {} if system: scope_data['system'] = {'all': True} elif unscoped: scope_data['unscoped'] = {} elif project_id or project_name: scope_data['project'] = {} if project_id: scope_data['project']['id'] = project_id else: scope_data['project']['name'] = project_name if project_domain_id or project_domain_name: project_domain_json = {} if project_domain_id: project_domain_json['id'] = project_domain_id else: project_domain_json['name'] = project_domain_name scope_data['project']['domain'] = project_domain_json elif domain_id or domain_name: scope_data['domain'] = {} if domain_id: scope_data['domain']['id'] = domain_id else: scope_data['domain']['name'] = domain_name elif trust_id: scope_data['OS-TRUST:trust'] = {} scope_data['OS-TRUST:trust']['id'] = trust_id else: raise ValueError( _( 'Programming Error: Invalid arguments supplied ' 'to build scope.' ) ) return scope_data def _build_user( self, user_id=None, username=None, user_domain_id=None, user_domain_name=None, ): user = {} if user_id: user['id'] = user_id else: user['name'] = username if user_domain_id or user_domain_name: user['domain'] = {} if user_domain_id: user['domain']['id'] = user_domain_id else: user['domain']['name'] = user_domain_name return user def _build_auth( self, user_id=None, username=None, user_domain_id=None, user_domain_name=None, **kwargs ): # NOTE(dstanek): just to ensure sanity in the tests self.assertEqual( 1, len(kwargs), message='_build_auth requires 1 (and only 1) ' 'secret type and value', ) secret_type, secret_value = list(kwargs.items())[0] # NOTE(dstanek): just to ensure sanity in the tests self.assertIn( secret_type, ('passcode', 'password'), message="_build_auth only supports 'passcode' " "and 'password' secret types", ) data = {} data['user'] = self._build_user( user_id=user_id, username=username, user_domain_id=user_domain_id, user_domain_name=user_domain_name, ) data['user'][secret_type] = secret_value return data def _build_token_auth(self, token): return {'id': token} def _build_app_cred_auth( self, secret, app_cred_id=None, app_cred_name=None, user_id=None, username=None, user_domain_id=None, user_domain_name=None, ): data = {'secret': secret} if app_cred_id: data['id'] = app_cred_id else: data['name'] = app_cred_name data['user'] = self._build_user( user_id=user_id, username=username, user_domain_id=user_domain_id, user_domain_name=user_domain_name, ) return data def build_authentication_request( self, token=None, user_id=None, username=None, user_domain_id=None, user_domain_name=None, password=None, kerberos=False, passcode=None, app_cred_id=None, app_cred_name=None, secret=None, **kwargs ): """Build auth dictionary. It will create an auth dictionary based on all the arguments that it receives. """ auth_data = {} auth_data['identity'] = {'methods': []} if kerberos: auth_data['identity']['methods'].append('kerberos') auth_data['identity']['kerberos'] = {} if token: auth_data['identity']['methods'].append('token') auth_data['identity']['token'] = self._build_token_auth(token) if password and (user_id or username): auth_data['identity']['methods'].append('password') auth_data['identity']['password'] = self._build_auth( user_id, username, user_domain_id, user_domain_name, password=password, ) if passcode and (user_id or username): auth_data['identity']['methods'].append('totp') auth_data['identity']['totp'] = self._build_auth( user_id, username, user_domain_id, user_domain_name, passcode=passcode, ) if (app_cred_id or app_cred_name) and secret: auth_data['identity']['methods'].append('application_credential') identity = auth_data['identity'] identity['application_credential'] = self._build_app_cred_auth( secret, app_cred_id=app_cred_id, app_cred_name=app_cred_name, user_id=user_id, username=username, user_domain_id=user_domain_id, user_domain_name=user_domain_name, ) if kwargs: auth_data['scope'] = self._build_auth_scope(**kwargs) return {'auth': auth_data} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5381136 keystone-26.0.0/keystone/tests/functional/0000775000175000017500000000000000000000000020630 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/functional/__init__.py0000664000175000017500000000000000000000000022727 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/functional/core.py0000664000175000017500000000631300000000000022135 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import requests import testtools from keystone.tests.common import auth as common_auth class BaseTestCase(testtools.TestCase, common_auth.AuthTestMixin): request_headers = {'content-type': 'application/json'} def setUp(self): self.ADMIN_URL = os.environ.get( 'KSTEST_ADMIN_URL', 'http://localhost:5000' ) self.PUBLIC_URL = os.environ.get( 'KSTEST_PUBLIC_URL', 'http://localhost:5000' ) self.admin = { 'name': os.environ.get('KSTEST_ADMIN_USERNAME', 'admin'), 'password': os.environ.get('KSTEST_ADMIN_PASSWORD', ''), 'domain_id': os.environ.get('KSTEST_ADMIN_DOMAIN_ID', 'default'), } self.user = { 'name': os.environ.get('KSTEST_USER_USERNAME', 'demo'), 'password': os.environ.get('KSTEST_USER_PASSWORD', ''), 'domain_id': os.environ.get('KSTEST_USER_DOMAIN_ID', 'default'), } self.project_id = os.environ.get('KSTEST_PROJECT_ID') self.project_name = os.environ.get('KSTEST_PROJECT_NAME') self.project_domain_id = os.environ.get('KSTEST_PROJECT_DOMAIN_ID') super().setUp() def _http_headers(self, token=None): headers = {'content-type': 'application/json'} if token: headers['X-Auth-Token'] = token return headers def get_scoped_token_response(self, user): """Convenience method so that we can test authenticated requests. :param user: A dictionary with user information like 'username', 'password', 'domain_id' :returns: urllib3.Response object """ body = self.build_authentication_request( username=user['name'], user_domain_name=user['domain_id'], password=user['password'], project_name=self.project_name, project_domain_id=self.project_domain_id, ) return requests.post( self.PUBLIC_URL + '/v3/auth/tokens', headers=self.request_headers, json=body, ) def get_scoped_token(self, user): """Convenience method for getting scoped token. This method doesn't do any token validation. :param user: A dictionary with user information like 'username', 'password', 'domain_id' :returns: An OpenStack token for further use :rtype: str """ r = self.get_scoped_token_response(user) return r.headers.get('X-Subject-Token') def get_scoped_admin_token(self): return self.get_scoped_token(self.admin) def get_scoped_user_token(self): return self.get_scoped_token(self.user) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5381136 keystone-26.0.0/keystone/tests/functional/shared/0000775000175000017500000000000000000000000022076 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/functional/shared/__init__.py0000664000175000017500000000000000000000000024175 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/functional/shared/test_running.py0000664000175000017500000000411200000000000025165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import requests import testtools.matchers from keystone.tests.functional import core as functests is_multiple_choices = testtools.matchers.Equals( requests.status_codes.codes.multiple_choices ) is_ok = testtools.matchers.Equals(requests.status_codes.codes.ok) versions = ['v3'] class TestServerRunning(functests.BaseTestCase): def test_admin_responds_with_multiple_choices(self): resp = requests.get(self.ADMIN_URL) self.assertThat(resp.status_code, is_multiple_choices) def test_admin_versions(self): for version in versions: resp = requests.get(self.ADMIN_URL + '/' + version) self.assertThat( resp.status_code, testtools.matchers.Annotate( 'failed for version %s' % version, is_ok ), ) def test_public_responds_with_multiple_choices(self): resp = requests.get(self.PUBLIC_URL) self.assertThat(resp.status_code, is_multiple_choices) def test_public_versions(self): for version in versions: resp = requests.get(self.PUBLIC_URL + '/' + version) self.assertThat( resp.status_code, testtools.matchers.Annotate( 'failed for version %s' % version, is_ok ), ) def test_get_user_token(self): token = self.get_scoped_user_token() self.assertIsNotNone(token) def test_get_admin_token(self): token = self.get_scoped_admin_token() self.assertIsNotNone(token) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5381136 keystone-26.0.0/keystone/tests/hacking/0000775000175000017500000000000000000000000020072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/hacking/__init__.py0000664000175000017500000000000000000000000022171 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/hacking/checks.py0000664000175000017500000002667200000000000021721 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Keystone's pep8 extensions. In order to make the review process faster and easier for core devs we are adding some Keystone specific pep8 checks. This will catch common errors so that core devs don't have to. There are two types of pep8 extensions. One is a function that takes either a physical or logical line. The physical or logical line is the first param in the function definition and can be followed by other parameters supported by pycodestyle. The second type is a class that parses AST trees. For more info please see pycodestyle.py. """ import ast import re from hacking import core class BaseASTChecker(ast.NodeVisitor): """Provides a simple framework for writing AST-based checks. Subclasses should implement visit_* methods like any other AST visitor implementation. When they detect an error for a particular node the method should call ``self.add_error(offending_node)``. Details about where in the code the error occurred will be pulled from the node object. Subclasses should also provide a class variable named CHECK_DESC to be used for the human readable error message. """ def __init__(self, tree, filename): """Created object automatically by pep8. :param tree: an AST tree :param filename: name of the file being analyzed (ignored by our checks) """ self._tree = tree self._errors = [] def run(self): """Called automatically by pep8.""" self.visit(self._tree) return self._errors def add_error(self, node, message=None): """Add an error caused by a node to the list of errors for pep8.""" message = message or self.CHECK_DESC error = (node.lineno, node.col_offset, message, self.__class__) self._errors.append(error) class CheckForMutableDefaultArgs(BaseASTChecker): """Check for the use of mutable objects as function/method defaults. We are only checking for list and dict literals at this time. This means that a developer could specify an instance of their own and cause a bug. The fix for this is probably more work than it's worth because it will get caught during code review. """ name = "check_for_mutable_default_args" version = "1.0" CHECK_DESC = 'K001 Using mutable as a function/method default' MUTABLES = ( ast.List, ast.ListComp, ast.Dict, ast.DictComp, ast.Set, ast.SetComp, ast.Call, ) def visit_FunctionDef(self, node): for arg in node.args.defaults: if isinstance(arg, self.MUTABLES): self.add_error(arg) super().generic_visit(node) @core.flake8ext def block_comments_begin_with_a_space(physical_line, line_number): """There should be a space after the # of block comments. There is already a check in pep8 that enforces this rule for inline comments. Okay: # this is a comment Okay: #!/usr/bin/python Okay: # this is a comment K002: #this is a comment """ MESSAGE = "K002 block comments should start with '# '" # shebangs are OK if line_number == 1 and physical_line.startswith('#!'): return text = physical_line.strip() if text.startswith('#'): # look for block comments if len(text) > 1 and not text[1].isspace(): return physical_line.index('#'), MESSAGE class CheckForTranslationIssues(BaseASTChecker): name = "check_for_translation_issues" version = "1.0" LOGGING_CHECK_DESC = 'K005 Using translated string in logging' USING_DEPRECATED_WARN = 'K009 Using the deprecated Logger.warn' LOG_MODULES = ('logging', 'oslo_log.log') I18N_MODULES = ('keystone.i18n._',) TRANS_HELPER_MAP = { 'debug': None, 'info': '_LI', 'warning': '_LW', 'error': '_LE', 'exception': '_LE', 'critical': '_LC', } def __init__(self, tree, filename): super().__init__(tree, filename) self.logger_names = [] self.logger_module_names = [] self.i18n_names = {} # NOTE(dstanek): this kinda accounts for scopes when talking # about only leaf node in the graph self.assignments = {} def generic_visit(self, node): """Called if no explicit visitor function exists for a node.""" for field, value in ast.iter_fields(node): if isinstance(value, list): for item in value: if isinstance(item, ast.AST): item._parent = node self.visit(item) elif isinstance(value, ast.AST): value._parent = node self.visit(value) def _filter_imports(self, module_name, alias): """Keep lists of logging and i18n imports.""" if module_name in self.LOG_MODULES: self.logger_module_names.append(alias.asname or alias.name) elif module_name in self.I18N_MODULES: self.i18n_names[alias.asname or alias.name] = alias.name def visit_Import(self, node): for alias in node.names: self._filter_imports(alias.name, alias) return super().generic_visit(node) def visit_ImportFrom(self, node): for alias in node.names: full_name = f'{node.module}.{alias.name}' self._filter_imports(full_name, alias) return super().generic_visit(node) def _find_name(self, node): """Return the fully qualified name or a Name or Attribute.""" if isinstance(node, ast.Name): return node.id elif isinstance(node, ast.Attribute) and isinstance( node.value, (ast.Name, ast.Attribute) ): method_name = node.attr obj_name = self._find_name(node.value) if obj_name is None: return None return obj_name + '.' + method_name elif isinstance(node, str): return node else: # could be Subscript, Call or many more return None def visit_Assign(self, node): """Look for 'LOG = logging.getLogger'. This handles the simple case: name = [logging_module].getLogger(...) - or - name = [i18n_name](...) And some much more comple ones: name = [i18n_name](...) % X - or - self.name = [i18n_name](...) % X """ attr_node_types = (ast.Name, ast.Attribute) if len(node.targets) != 1 or not isinstance( node.targets[0], attr_node_types ): # say no to: "x, y = ..." return super().generic_visit(node) target_name = self._find_name(node.targets[0]) if isinstance(node.value, ast.BinOp) and isinstance( node.value.op, ast.Mod ): if ( isinstance(node.value.left, ast.Call) and isinstance(node.value.left.func, ast.Name) and node.value.left.func.id in self.i18n_names ): # NOTE(dstanek): this is done to match cases like: # `msg = _('something %s') % x` node = ast.Assign(value=node.value.left) if not isinstance(node.value, ast.Call): # node.value must be a call to getLogger self.assignments.pop(target_name, None) return super().generic_visit(node) # is this a call to an i18n function? if ( isinstance(node.value.func, ast.Name) and node.value.func.id in self.i18n_names ): self.assignments[target_name] = node.value.func.id return super().generic_visit(node) if not isinstance(node.value.func, ast.Attribute) or not isinstance( node.value.func.value, attr_node_types ): # function must be an attribute on an object like # logging.getLogger return super().generic_visit(node) object_name = self._find_name(node.value.func.value) func_name = node.value.func.attr if ( object_name in self.logger_module_names and func_name == 'getLogger' ): self.logger_names.append(target_name) return super().generic_visit(node) def visit_Call(self, node): """Look for the 'LOG.*' calls.""" # obj.method if isinstance(node.func, ast.Attribute): obj_name = self._find_name(node.func.value) if isinstance(node.func.value, ast.Name): method_name = node.func.attr elif isinstance(node.func.value, ast.Attribute): obj_name = self._find_name(node.func.value) method_name = node.func.attr else: # could be Subscript, Call or many more return super().generic_visit(node) # if dealing with a logger the method can't be "warn" if obj_name in self.logger_names and method_name == 'warn': msg = node.args[0] # first arg to a logging method is the msg self.add_error(msg, message=self.USING_DEPRECATED_WARN) # must be a logger instance and one of the support logging methods if ( obj_name not in self.logger_names or method_name not in self.TRANS_HELPER_MAP ): return super().generic_visit(node) # the call must have arguments if not node.args: return super().generic_visit(node) self._process_log_messages(node) return super().generic_visit(node) def _process_log_messages(self, node): msg = node.args[0] # first arg to a logging method is the msg # if first arg is a call to a i18n name if ( isinstance(msg, ast.Call) and isinstance(msg.func, ast.Name) and msg.func.id in self.i18n_names ): self.add_error(msg, message=self.LOGGING_CHECK_DESC) # if the first arg is a reference to a i18n call elif isinstance(msg, ast.Name) and msg.id in self.assignments: self.add_error(msg, message=self.LOGGING_CHECK_DESC) @core.flake8ext def dict_constructor_with_sequence_copy(logical_line): """Should use a dict comprehension instead of a dict constructor. PEP-0274 introduced dict comprehension with performance enhancement and it also makes code more readable. Okay: lower_res = {k.lower(): v for k, v in res[1].items()} Okay: fool = dict(a='a', b='b') K008: lower_res = dict((k.lower(), v) for k, v in res[1].items()) K008: attrs = dict([(k, _from_json(v)) K008: dict([[i,i] for i in range(3)]) """ MESSAGE = ( "K008 Must use a dict comprehension instead of a dict" " constructor with a sequence of key-value pairs." ) dict_constructor_with_sequence_re = re.compile( r".*\bdict\((\[)?(\(|\[)(?!\{)" ) if dict_constructor_with_sequence_re.match(logical_line): yield (0, MESSAGE) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5381136 keystone-26.0.0/keystone/tests/protection/0000775000175000017500000000000000000000000020654 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/__init__.py0000664000175000017500000000000000000000000022753 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5421135 keystone-26.0.0/keystone/tests/protection/v3/0000775000175000017500000000000000000000000021204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/__init__.py0000664000175000017500000000000000000000000023303 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_access_rules.py0000664000175000017500000006255600000000000025306 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _UserAccessRuleTests: """Test cases for anyone that has a valid user token.""" def test_user_can_get_their_access_rules(self): access_rule_id = uuid.uuid4().hex app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': self.user_id, 'project_id': self.project_id, 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( self.user_id, app_cred['access_rules'][0]['id'], ) c.get(path, headers=self.headers) def test_user_can_list_their_access_rules(self): app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': self.user_id, 'project_id': self.project_id, 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': uuid.uuid4().hex, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) with self.test_client() as c: r = c.get( '/v3/users/%s/access_rules' % self.user_id, headers=self.headers, ) self.assertEqual(len(r.json['access_rules']), 1) def test_user_can_delete_their_access_rules(self): access_rule_id = uuid.uuid4().hex app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': self.user_id, 'project_id': self.project_id, 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) PROVIDERS.application_credential_api.delete_application_credential( app_cred['id'] ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( self.user_id, access_rule_id, ) c.delete(path, headers=self.headers) class _ProjectUsersTests: """Users who have project role authorization observe the same behavior.""" def test_user_cannot_get_access_rules_for_other_users(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) access_rule_id = uuid.uuid4().hex app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': user['id'], 'project_id': project['id'], 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( user['id'], access_rule_id, ) c.get( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_own_non_existent_access_rule_not_found(self): with self.test_client() as c: c.get( '/v3/users/%s/access_rules/%s' % (self.user_id, uuid.uuid4().hex), headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_cannot_get_non_existent_access_rule_other_user_forbidden(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) with self.test_client() as c: c.get( '/v3/users/%s/access_rules/%s' % (user['id'], uuid.uuid4().hex), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_access_rules_for_other_users(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': user['id'], 'project_id': project['id'], 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': uuid.uuid4().hex, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) with self.test_client() as c: path = '/v3/users/%s/access_rules' % user['id'] c.get( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_access_rules_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) access_rule_id = uuid.uuid4().hex app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': user['id'], 'project_id': project['id'], 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) PROVIDERS.application_credential_api.delete_application_credential( app_cred['id'] ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( user['id'], access_rule_id, ) c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_delete_non_existent_access_rule_other_user_forbidden(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) with self.test_client() as c: c.delete( '/v3/users/%s/access_rules/%s' % (user['id'], uuid.uuid4().hex), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _SystemUserAccessRuleTests: """Tests that are common across all system users.""" def test_user_can_list_access_rules_for_other_users(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': user['id'], 'project_id': project['id'], 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': uuid.uuid4().hex, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) with self.test_client() as c: r = c.get( '/v3/users/%s/access_rules' % user['id'], headers=self.headers ) self.assertEqual(1, len(r.json['access_rules'])) def test_user_cannot_get_non_existent_access_rule_not_found(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) with self.test_client() as c: c.get( '/v3/users/%s/access_rules/%s' % (user['id'], uuid.uuid4().hex), headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserAccessRuleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_cannot_delete_access_rules_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) access_rule_id = uuid.uuid4().hex app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': user['id'], 'project_id': project['id'], 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) PROVIDERS.application_credential_api.delete_application_credential( app_cred['id'] ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( user['id'], access_rule_id, ) c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_access_rule_forbidden(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) with self.test_client() as c: c.delete( '/v3/users/%s/access_rules/%s' % (user['id'], uuid.uuid4().hex), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserAccessRuleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_cannot_delete_access_rules_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) access_rule_id = uuid.uuid4().hex app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': user['id'], 'project_id': project['id'], 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) PROVIDERS.application_credential_api.delete_application_credential( app_cred['id'] ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( user['id'], access_rule_id, ) c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( user['id'], access_rule_id, ) c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_access_rule_forbidden(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) with self.test_client() as c: c.delete( '/v3/users/%s/access_rules/%s' % (user['id'], uuid.uuid4().hex), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserAccessRuleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_delete_access_rules_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) access_rule_id = uuid.uuid4().hex app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'user_id': user['id'], 'project_id': project['id'], 'secret': uuid.uuid4().hex, 'access_rules': [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ], } PROVIDERS.application_credential_api.create_application_credential( app_cred ) PROVIDERS.application_credential_api.delete_application_credential( app_cred['id'] ) with self.test_client() as c: path = '/v3/users/{}/access_rules/{}'.format( user['id'], access_rule_id, ) c.delete(path, headers=self.headers) def test_user_cannot_delete_non_existent_access_rule_not_found(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) with self.test_client() as c: c.delete( '/v3/users/%s/access_rules/%s' % (user['id'], uuid.uuid4().hex), headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) class ProjectReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserAccessRuleTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) project_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id'] project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) self.project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_reader['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserAccessRuleTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) project_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_member)['id'] project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) self.project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_member['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserAccessRuleTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id self.project_id = self.bootstrapper.project_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_application_credential.py0000664000175000017500000006633400000000000027326 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client import uuid from oslo_serialization import jsonutils from oslo_utils import timeutils from keystone.common.policies import base as base_policy from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _TestAppCredBase(base_classes.TestCaseWithBootstrap): """Base class for application credential tests.""" def _new_app_cred_data( self, user_id=None, project_id=None, name=None, expires=None, system=None, ): if not user_id: user_id = self.app_cred_user_id if not name: name = uuid.uuid4().hex if not expires: expires = timeutils.utcnow() + datetime.timedelta(days=365) if not system: system = uuid.uuid4().hex if not project_id: project_id = self.app_cred_project_id app_cred_data = { 'id': uuid.uuid4().hex, 'name': name, 'description': uuid.uuid4().hex, 'user_id': user_id, 'project_id': project_id, 'system': system, 'expires_at': expires, 'roles': [ {'id': self.bootstrapper.member_role_id}, ], 'secret': uuid.uuid4().hex, 'unrestricted': False, } return app_cred_data def setUp(self): super().setUp() # create a user and project for app cred testing new_user_ref = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) app_cred_user_ref = PROVIDERS.identity_api.create_user(new_user_ref) self.app_cred_user_id = app_cred_user_ref['id'] self.app_cred_user_password = new_user_ref['password'] app_cred_project_ref = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) self.app_cred_project_id = app_cred_project_ref['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.app_cred_user_id, project_id=self.app_cred_project_id, ) def _create_application_credential(self): app_cred = self._new_app_cred_data() return ( PROVIDERS.application_credential_api.create_application_credential( app_cred ) ) def _override_policy(self): # TODO(gyee): Remove this once the deprecated policies in # keystone.common.policies.application_credential have been removed. # This is only here to make sure we test the new policies instead of # the deprecated ones. Oslo.policy will OR deprecated policies with # new policies to maintain compatibility and give operators a chance to # update permissions or update policies without breaking users. # This will cause these specific tests to fail since we're trying to # correct this broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_application_credential': ( base_policy.RULE_SYSTEM_READER_OR_OWNER ), 'identity:list_application_credentials': ( base_policy.RULE_SYSTEM_READER_OR_OWNER ), 'identity:create_application_credential': ( base_policy.RULE_OWNER ), 'identity:delete_application_credential': ( base_policy.RULE_SYSTEM_ADMIN_OR_OWNER ), } f.write(jsonutils.dumps(overridden_policies)) class _DomainAndProjectUserTests: """Domain and project user tests. Domain and project users should not be able to manage application credentials other then their own. """ def test_user_cannot_list_application_credentials(self): # create a couple of application credentials self._create_application_credential() self._create_application_credential() with self.test_client() as c: c.get( '/v3/users/%s/application_credentials' % (self.app_cred_user_id), expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) def test_user_cannot_get_application_credential(self): app_cred = self._create_application_credential() with self.test_client() as c: c.get( '/v3/users/%s/application_credentials/%s' % (self.app_cred_user_id, app_cred['id']), expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) def test_user_cannot_lookup_application_credential(self): app_cred = self._create_application_credential() with self.test_client() as c: c.get( '/v3/users/%s/application_credentials?name=%s' % (self.app_cred_user_id, app_cred['name']), expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) def test_user_cannot_delete_application_credential(self): app_cred = self._create_application_credential() with self.test_client() as c: c.delete( '/v3/users/%s/application_credentials/%s' % (self.app_cred_user_id, app_cred['id']), expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) def test_user_cannot_lookup_non_existent_application_credential(self): with self.test_client() as c: c.get( '/v3/users/%s/application_credentials?name=%s' % (self.app_cred_user_id, uuid.uuid4().hex), expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) def test_user_cannot_create_app_credential_for_another_user(self): # create another user another_user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) another_user_id = PROVIDERS.identity_api.create_user(another_user)[ 'id' ] app_cred_body = { 'application_credential': unit.new_application_credential_ref( roles=[{'id': self.bootstrapper.member_role_id}] ) } with self.test_client() as c: c.post( '/v3/users/%s/application_credentials' % another_user_id, json=app_cred_body, expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) class _SystemUserAndOwnerTests: """Common default functionality for all system users and owner.""" def test_user_can_list_application_credentials(self): # create a couple of application credentials self._create_application_credential() self._create_application_credential() with self.test_client() as c: r = c.get( '/v3/users/%s/application_credentials' % (self.app_cred_user_id), headers=self.headers, ) self.assertEqual(2, len(r.json['application_credentials'])) def test_user_can_get_application_credential(self): app_cred = self._create_application_credential() with self.test_client() as c: r = c.get( '/v3/users/%s/application_credentials/%s' % (self.app_cred_user_id, app_cred['id']), headers=self.headers, ) actual_app_cred = r.json['application_credential'] self.assertEqual(app_cred['id'], actual_app_cred['id']) def test_user_can_lookup_application_credential(self): app_cred = self._create_application_credential() with self.test_client() as c: r = c.get( '/v3/users/%s/application_credentials?name=%s' % (self.app_cred_user_id, app_cred['name']), headers=self.headers, ) self.assertEqual(1, len(r.json['application_credentials'])) actual_app_cred = r.json['application_credentials'][0] self.assertEqual(app_cred['id'], actual_app_cred['id']) def _test_delete_application_credential( self, expected_status_code=http.client.NO_CONTENT ): app_cred = self._create_application_credential() with self.test_client() as c: c.delete( '/v3/users/%s/application_credentials/%s' % (self.app_cred_user_id, app_cred['id']), expected_status_code=expected_status_code, headers=self.headers, ) def test_user_cannot_create_app_credential_for_another_user(self): # create another user another_user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) another_user_id = PROVIDERS.identity_api.create_user(another_user)[ 'id' ] app_cred_body = { 'application_credential': unit.new_application_credential_ref( roles=[{'id': self.bootstrapper.member_role_id}] ) } with self.test_client() as c: c.post( '/v3/users/%s/application_credentials' % another_user_id, json=app_cred_body, expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) class SystemReaderTests( _TestAppCredBase, common_auth.AuthTestMixin, _SystemUserAndOwnerTests ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_system_reader_cannot_delete_application_credential_for_user(self): self._test_delete_application_credential( expected_status_code=http.client.FORBIDDEN ) class SystemMemberTests( _TestAppCredBase, common_auth.AuthTestMixin, _SystemUserAndOwnerTests ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_system_reader_cannot_delete_application_credential_for_user(self): self._test_delete_application_credential( expected_status_code=http.client.FORBIDDEN ) class SystemAdminTests( _TestAppCredBase, common_auth.AuthTestMixin, _SystemUserAndOwnerTests ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_system_admin_can_delete_application_credential_for_user(self): self._test_delete_application_credential() class OwnerTests( _TestAppCredBase, common_auth.AuthTestMixin, _SystemUserAndOwnerTests ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) # in this case app_cred_user_id and user_id are the same since we # are testing the owner self.user_id = self.app_cred_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.app_cred_user_password, project_id=self.app_cred_project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_create_application_credential_by_owner(self): app_cred_body = { 'application_credential': unit.new_application_credential_ref() } with self.test_client() as c: c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers=self.headers, ) def test_owner_can_delete_application_credential(self): self._test_delete_application_credential() def test_user_cannot_lookup_application_credential_for_another_user(self): # create another user another_user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) another_user_id = PROVIDERS.identity_api.create_user(another_user)[ 'id' ] auth = self.build_authentication_request( user_id=another_user_id, password=another_user['password'] ) # authenticate for a token as a completely different user with # completely different authorization with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) another_user_token = r.headers['X-Subject-Token'] # create an application credential as the self.user_id user on a # project that the user above doesn't have any authorization on app_cred = self._create_application_credential() # attempt to lookup the application credential as another user with self.test_client() as c: c.get( '/v3/users/%s/application_credentials/%s' % (another_user_id, app_cred['id']), expected_status_code=http.client.FORBIDDEN, headers={'X-Auth-Token': another_user_token}, ) def test_user_cannot_delete_application_credential_for_another_user(self): # create another user another_user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) another_user_id = PROVIDERS.identity_api.create_user(another_user)[ 'id' ] auth = self.build_authentication_request( user_id=another_user_id, password=another_user['password'] ) # authenticate for a token as a completely different user with # completely different authorization with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) another_user_token = r.headers['X-Subject-Token'] # create an application credential as the self.user_id user on a # project that the user above doesn't have any authorization on app_cred = self._create_application_credential() # attempt to delete the application credential as another user with self.test_client() as c: c.delete( '/v3/users/%s/application_credentials/%s' % (another_user_id, app_cred['id']), expected_status_code=http.client.FORBIDDEN, headers={'X-Auth-Token': another_user_token}, ) class DomainAdminTests( _TestAppCredBase, common_auth.AuthTestMixin, _DomainAndProjectUserTests ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain_admin = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=CONF.identity.default_domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=CONF.identity.default_domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainReaderTests( _TestAppCredBase, common_auth.AuthTestMixin, _DomainAndProjectUserTests ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain_admin = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=CONF.identity.default_domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=CONF.identity.default_domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainMemberTests( _TestAppCredBase, common_auth.AuthTestMixin, _DomainAndProjectUserTests ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain_admin = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=CONF.identity.default_domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=CONF.identity.default_domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests( _TestAppCredBase, common_auth.AuthTestMixin, _DomainAndProjectUserTests ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) project_admin = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_admin)['id'] # even project admin of project where the app credential # is intended for cannot perform app credential operations PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, project_id=self.app_cred_project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_admin['password'], project_id=self.app_cred_project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectReaderTests( _TestAppCredBase, common_auth.AuthTestMixin, _DomainAndProjectUserTests ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) project_admin = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_admin)['id'] # even project admin of project where the app credential # is intended for cannot perform app credential operations PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.app_cred_project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_admin['password'], project_id=self.app_cred_project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests( _TestAppCredBase, common_auth.AuthTestMixin, _DomainAndProjectUserTests ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) project_admin = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_admin)['id'] # even project admin of project where the app credential # is intended for cannot perform app credential operations PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.app_cred_project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_admin['password'], project_id=self.app_cred_project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_assignment.py0000664000175000017500000017461300000000000025001 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import role_assignment as rp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _AssignmentTestUtilities: """Useful utilities for setting up test assignments and assertions.""" def _setup_test_role_assignments(self): # Utility to create assignments and return important data for # assertions. # Since the role doesn't really matter too much, we can just re-use an # existing role instead of creating a new one. role_id = self.bootstrapper.reader_role_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) # create a user+project role assignment. PROVIDERS.assignment_api.create_grant( role_id, user_id=user['id'], project_id=project['id'] ) # create a user+domain role assignment. PROVIDERS.assignment_api.create_grant( role_id, user_id=user['id'], domain_id=domain['id'] ) # create a user+system role assignment. PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], role_id ) # create a group+project role assignment. PROVIDERS.assignment_api.create_grant( role_id, group_id=group['id'], project_id=project['id'] ) # create a group+domain role assignment. PROVIDERS.assignment_api.create_grant( role_id, group_id=group['id'], domain_id=domain['id'] ) # create a group+system role assignment. PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], role_id ) return { 'user_id': user['id'], 'group_id': group['id'], 'domain_id': domain['id'], 'project_id': project['id'], 'role_id': role_id, } def _extract_role_assignments_from_response_body(self, r): # Condense the role assignment details into a set of key things we can # use in assertions. assignments = [] for assignment in r.json['role_assignments']: a = {} if 'project' in assignment['scope']: a['project_id'] = assignment['scope']['project']['id'] elif 'domain' in assignment['scope']: a['domain_id'] = assignment['scope']['domain']['id'] elif 'system' in assignment['scope']: a['system'] = 'all' if 'user' in assignment: a['user_id'] = assignment['user']['id'] elif 'group' in assignment: a['group_id'] = assignment['group']['id'] a['role_id'] = assignment['role']['id'] assignments.append(a) return assignments class _SystemUserTests: """Common functionality for system users regardless of default role.""" def test_user_can_list_all_role_assignments_in_the_deployment(self): assignments = self._setup_test_role_assignments() # this assignment is created by keystone-manage bootstrap self.expected.append( { 'user_id': self.bootstrapper.admin_user_id, 'project_id': self.bootstrapper.project_id, 'role_id': self.bootstrapper.admin_role_id, } ) # this assignment is created by keystone-manage bootstrap self.expected.append( { 'user_id': self.bootstrapper.admin_user_id, 'system': 'all', 'role_id': self.bootstrapper.admin_role_id, } ) self.expected.append( { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'user_id': assignments['user_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) with self.test_client() as c: r = c.get('/v3/role_assignments', headers=self.headers) self.assertEqual( len(self.expected), len(r.json['role_assignments']) ) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, self.expected) def test_user_can_list_all_role_names_assignments_in_the_deployment(self): assignments = self._setup_test_role_assignments() # this assignment is created by keystone-manage bootstrap self.expected.append( { 'user_id': self.bootstrapper.admin_user_id, 'project_id': self.bootstrapper.project_id, 'role_id': self.bootstrapper.admin_role_id, } ) # this assignment is created by keystone-manage bootstrap self.expected.append( { 'user_id': self.bootstrapper.admin_user_id, 'system': 'all', 'role_id': self.bootstrapper.admin_role_id, } ) self.expected.append( { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'user_id': assignments['user_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) with self.test_client() as c: r = c.get( '/v3/role_assignments?include_names=True', headers=self.headers ) self.assertEqual( len(self.expected), len(r.json['role_assignments']) ) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, self.expected) def test_user_can_filter_role_assignments_by_project(self): assignments = self._setup_test_role_assignments() expected = [ { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, ] project_id = assignments['project_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.project.id=%s' % project_id, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_domain(self): assignments = self._setup_test_role_assignments() expected = [ { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, ] domain_id = assignments['domain_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.domain.id=%s' % domain_id, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_system(self): assignments = self._setup_test_role_assignments() # this assignment is created by keystone-manage bootstrap self.expected.append( { 'user_id': self.bootstrapper.admin_user_id, 'system': 'all', 'role_id': self.bootstrapper.admin_role_id, } ) self.expected.append( { 'user_id': assignments['user_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.system=all', headers=self.headers ) self.assertEqual( len(self.expected), len(r.json['role_assignments']) ) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, self.expected) def test_user_can_filter_role_assignments_by_user(self): assignments = self._setup_test_role_assignments() expected = [ # assignment of the user running the test case { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, { 'user_id': assignments['user_id'], 'system': 'all', 'role_id': assignments['role_id'], }, ] user_id = assignments['user_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?user.id=%s' % user_id, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_group(self): assignments = self._setup_test_role_assignments() expected = [ { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'system': 'all', 'role_id': assignments['role_id'], }, ] group_id = assignments['group_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?group.id=%s' % group_id, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_role(self): assignments = self._setup_test_role_assignments() self.expected = [ ra for ra in self.expected if ra['role_id'] == assignments['role_id'] ] self.expected.append( { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'user_id': assignments['user_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) role_id = assignments['role_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?role.id=%s&include_names=True' % role_id, headers=self.headers, ) self.assertEqual( len(self.expected), len(r.json['role_assignments']) ) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, self.expected) def test_user_can_filter_role_assignments_by_project_and_role(self): assignments = self._setup_test_role_assignments() expected = [ { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, ] with self.test_client() as c: qs = (assignments['project_id'], assignments['role_id']) r = c.get( '/v3/role_assignments?scope.project.id=%s&role.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_domain_and_role(self): assignments = self._setup_test_role_assignments() expected = [ { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, ] qs = (assignments['domain_id'], assignments['role_id']) with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.domain.id=%s&role.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_system_and_role(self): assignments = self._setup_test_role_assignments() self.expected = [ ra for ra in self.expected if ra['role_id'] == assignments['role_id'] ] self.expected.append( { 'user_id': assignments['user_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) self.expected.append( { 'group_id': assignments['group_id'], 'system': 'all', 'role_id': assignments['role_id'], } ) role_id = assignments['role_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.system=all&role.id=%s' % role_id, headers=self.headers, ) self.assertEqual( len(self.expected), len(r.json['role_assignments']) ) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, self.expected) def test_user_can_filter_role_assignments_by_user_and_role(self): assignments = self._setup_test_role_assignments() expected = [ { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, { 'user_id': assignments['user_id'], 'system': 'all', 'role_id': assignments['role_id'], }, ] qs = (assignments['user_id'], assignments['role_id']) with self.test_client() as c: r = c.get( '/v3/role_assignments?user.id=%s&role.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_group_and_role(self): assignments = self._setup_test_role_assignments() expected = [ { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'system': 'all', 'role_id': assignments['role_id'], }, ] with self.test_client() as c: qs = (assignments['group_id'], assignments['role_id']) r = c.get( '/v3/role_assignments?group.id=%s&role.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_project_and_user(self): assignments = self._setup_test_role_assignments() expected = [ { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ] qs = (assignments['project_id'], assignments['user_id']) with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.project.id=%s&user.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_project_and_group(self): assignments = self._setup_test_role_assignments() expected = [ { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], } ] qs = (assignments['project_id'], assignments['group_id']) with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.project.id=%s&group.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_domain_and_user(self): assignments = self._setup_test_role_assignments() expected = [ { 'user_id': assignments['user_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ] qs = (assignments['domain_id'], assignments['user_id']) with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.domain.id=%s&user.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_domain_and_group(self): assignments = self._setup_test_role_assignments() expected = [ { 'group_id': assignments['group_id'], 'domain_id': assignments['domain_id'], 'role_id': assignments['role_id'], } ] qs = (assignments['domain_id'], assignments['group_id']) with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.domain.id=%s&group.id=%s' % qs, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_list_assignments_for_subtree(self): assignments = self._setup_test_role_assignments() user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=assignments['project_id'], ), ) PROVIDERS.assignment_api.create_grant( assignments['role_id'], user_id=user['id'], project_id=project['id'], ) expected = [ { 'user_id': assignments['user_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'group_id': assignments['group_id'], 'project_id': assignments['project_id'], 'role_id': assignments['role_id'], }, { 'user_id': user['id'], 'project_id': project['id'], 'role_id': assignments['role_id'], }, ] with self.test_client() as c: r = c.get( ( '/v3/role_assignments?scope.project.id=%s&include_subtree' % assignments['project_id'] ), headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) class _DomainUserTests: """Common functionality for domain users.""" def _setup_test_role_assignments_for_domain(self): # Populate role assignment within `self.domain_id` so that we can # assert users can view assignments within the domain they have # authorization on role_id = self.bootstrapper.reader_role_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) # create a user+project role assignment. PROVIDERS.assignment_api.create_grant( role_id, user_id=user['id'], project_id=project['id'] ) # create a user+domain role assignment. PROVIDERS.assignment_api.create_grant( role_id, user_id=user['id'], domain_id=self.domain_id ) # create a group+project role assignment. PROVIDERS.assignment_api.create_grant( role_id, group_id=group['id'], project_id=project['id'] ) # create a group+domain role assignment. PROVIDERS.assignment_api.create_grant( role_id, group_id=group['id'], domain_id=self.domain_id ) return { 'user_id': user['id'], 'group_id': group['id'], 'project_id': project['id'], 'role_id': role_id, } def test_user_can_list_all_assignments_in_their_domain(self): self._setup_test_role_assignments() domain_assignments = self._setup_test_role_assignments_for_domain() self.expected.append( { 'user_id': domain_assignments['user_id'], 'domain_id': self.domain_id, 'role_id': domain_assignments['role_id'], } ) self.expected.append( { 'user_id': domain_assignments['user_id'], 'project_id': domain_assignments['project_id'], 'role_id': domain_assignments['role_id'], } ) self.expected.append( { 'group_id': domain_assignments['group_id'], 'domain_id': self.domain_id, 'role_id': domain_assignments['role_id'], } ) self.expected.append( { 'group_id': domain_assignments['group_id'], 'project_id': domain_assignments['project_id'], 'role_id': domain_assignments['role_id'], } ) with self.test_client() as c: r = c.get('/v3/role_assignments', headers=self.headers) self.assertEqual( len(self.expected), len(r.json['role_assignments']) ) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, self.expected) def test_user_can_filter_role_assignments_by_project_in_domain(self): self._setup_test_role_assignments() domain_assignments = self._setup_test_role_assignments_for_domain() expected = [ { 'user_id': domain_assignments['user_id'], 'project_id': domain_assignments['project_id'], 'role_id': domain_assignments['role_id'], }, { 'group_id': domain_assignments['group_id'], 'project_id': domain_assignments['project_id'], 'role_id': domain_assignments['role_id'], }, ] project_id = domain_assignments['project_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.project.id=%s' % project_id, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_domain(self): # This shouldn't really provide any more value than just calling GET # /v3/role_assignments with a domain-scoped token, but we test it # anyway. self._setup_test_role_assignments() domain_assignments = self._setup_test_role_assignments_for_domain() self.expected.append( { 'user_id': domain_assignments['user_id'], 'domain_id': self.domain_id, 'role_id': domain_assignments['role_id'], } ) self.expected.append( { 'group_id': domain_assignments['group_id'], 'domain_id': self.domain_id, 'role_id': domain_assignments['role_id'], } ) with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.domain.id=%s' % self.domain_id, headers=self.headers, ) self.assertEqual( len(self.expected), len(r.json['role_assignments']) ) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, self.expected) def test_user_can_filter_role_assignments_by_user_of_domain(self): self._setup_test_role_assignments() domain_assignments = self._setup_test_role_assignments_for_domain() expected = [ { 'user_id': domain_assignments['user_id'], 'domain_id': self.domain_id, 'role_id': domain_assignments['role_id'], }, { 'user_id': domain_assignments['user_id'], 'project_id': domain_assignments['project_id'], 'role_id': domain_assignments['role_id'], }, ] user_id = domain_assignments['user_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?user.id=%s' % user_id, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_can_filter_role_assignments_by_group_of_domain(self): self._setup_test_role_assignments() domain_assignments = self._setup_test_role_assignments_for_domain() expected = [ { 'group_id': domain_assignments['group_id'], 'domain_id': self.domain_id, 'role_id': domain_assignments['role_id'], }, { 'group_id': domain_assignments['group_id'], 'project_id': domain_assignments['project_id'], 'role_id': domain_assignments['role_id'], }, ] group_id = domain_assignments['group_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?group.id=%s' % group_id, headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_cannot_filter_role_assignments_by_system(self): self._setup_test_role_assignments() self._setup_test_role_assignments_for_domain() with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.system=all', headers=self.headers ) self.assertEqual(0, len(r.json['role_assignments'])) def test_user_cannot_filter_role_assignments_by_other_domain(self): assignments = self._setup_test_role_assignments() domain = assignments['domain_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.domain.id=%s' % domain, headers=self.headers, ) self.assertEqual([], r.json['role_assignments']) def test_user_cannot_filter_role_assignments_by_other_domain_project(self): assignments = self._setup_test_role_assignments() self._setup_test_role_assignments_for_domain() # This project is in an entirely separate domain that this user doesn't # have authorization to access, so they should only see an empty list project_id = assignments['project_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?scope.project.id=%s' % project_id, headers=self.headers, ) self.assertEqual(0, len(r.json['role_assignments'])) def test_user_cannot_filter_role_assignments_by_other_domain_user(self): assignments = self._setup_test_role_assignments() self._setup_test_role_assignments_for_domain() # This user doesn't have any role assignments on self.domain_id, so the # domain user of self.domain_id should only see an empty list of role # assignments. user_id = assignments['user_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?user.id=%s' % user_id, headers=self.headers, ) self.assertEqual(0, len(r.json['role_assignments'])) def test_user_cannot_filter_role_assignments_by_other_domain_group(self): assignments = self._setup_test_role_assignments() self._setup_test_role_assignments_for_domain() # This group doesn't have any role assignments on self.domain_id, so # the domain user of self.domain_id should only see an empty list of # role assignments. group_id = assignments['group_id'] with self.test_client() as c: r = c.get( '/v3/role_assignments?group.id=%s' % group_id, headers=self.headers, ) self.assertEqual(0, len(r.json['role_assignments'])) def test_user_can_list_assignments_for_subtree_in_their_domain(self): assignments = self._setup_test_role_assignments() domain_assignments = self._setup_test_role_assignments_for_domain() user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref( domain_id=self.domain_id, parent_id=domain_assignments['project_id'], ), ) PROVIDERS.assignment_api.create_grant( assignments['role_id'], user_id=user['id'], project_id=project['id'], ) expected = [ { 'user_id': domain_assignments['user_id'], 'project_id': domain_assignments['project_id'], 'role_id': assignments['role_id'], }, { 'group_id': domain_assignments['group_id'], 'project_id': domain_assignments['project_id'], 'role_id': assignments['role_id'], }, { 'user_id': user['id'], 'project_id': project['id'], 'role_id': assignments['role_id'], }, ] with self.test_client() as c: r = c.get( ( '/v3/role_assignments?scope.project.id=%s&include_subtree' % domain_assignments['project_id'] ), headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_cannot_list_assignments_for_subtree_in_other_domain(self): assignments = self._setup_test_role_assignments() with self.test_client() as c: c.get( ( '/v3/role_assignments?scope.project.id=%s&include_subtree' % assignments['project_id'] ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _ProjectUserTests: def test_user_cannot_list_all_assignments_in_their_project(self): with self.test_client() as c: c.get( '/v3/role_assignments', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_role_assignments_by_user_of_project(self): assignments = self._setup_test_role_assignments() user_id = assignments['user_id'] with self.test_client() as c: c.get( '/v3/role_assignments?user.id=%s' % user_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_role_assignments_by_group_of_project(self): assignments = self._setup_test_role_assignments() group_id = assignments['group_id'] with self.test_client() as c: c.get( '/v3/role_assignments?group.id=%s' % group_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_role_assignments_by_system(self): with self.test_client() as c: c.get( '/v3/role_assignments?scope.system=all', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_role_assignments_by_domain(self): with self.test_client() as c: c.get( '/v3/role_assignments?scope.domain.id=%s' % self.domain_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_role_assignments_by_other_project(self): project1 = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.get( '/v3/role_assignments?scope.project.id=%s' % project1, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_role_assignments_by_other_project_user(self): assignments = self._setup_test_role_assignments() # This user doesn't have any role assignments on self.project_id, so # the project user of self.project_id should only see an empty list of # role assignments. user_id = assignments['user_id'] with self.test_client() as c: c.get( '/v3/role_assignments?user.id=%s' % user_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_role_assignments_by_other_project_group(self): assignments = self._setup_test_role_assignments() # This group doesn't have any role assignments on self.project_id, so # the project user of self.project_id should only see an empty list of # role assignments. group_id = assignments['group_id'] with self.test_client() as c: c.get( '/v3/role_assignments?group.id=%s' % group_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _ProjectReaderMemberTests: def test_user_cannot_list_assignments_for_subtree(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref( domain_id=self.domain_id, parent_id=self.project_id ), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( ( '/v3/role_assignments?scope.project.id=%s&include_subtree' % self.project_id ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _SystemUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'system': 'all', 'role_id': self.bootstrapper.reader_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _SystemUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'system': 'all', 'role_id': self.bootstrapper.member_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _SystemUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id self.expected = [] auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _DomainUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_reader = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_reader)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=self.domain_id, ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'domain_id': self.domain_id, 'role_id': self.bootstrapper.reader_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=domain_reader['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _DomainUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=self.domain_id, ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'domain_id': self.domain_id, 'role_id': self.bootstrapper.member_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _DomainUserTests, ): def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.role_assignment have been removed. This is # only here to make sure we test the new policies instead of the # deprecated ones. Oslo.policy will OR deprecated policies with new # policies to maintain compatibility and give operators a chance to # update permissions or update policies without breaking users. This # will cause these specific tests to fail since we're trying to correct # this broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:list_role_assignments': ( rp.SYSTEM_READER_OR_DOMAIN_READER ), 'identity:list_role_assignments_for_tree': ( rp.SYSTEM_READER_OR_PROJECT_DOMAIN_READER_OR_PROJECT_ADMIN ), } f.write(jsonutils.dumps(overridden_policies)) def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'domain_id': self.domain_id, 'role_id': self.bootstrapper.admin_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _ProjectUserTests, _ProjectReaderMemberTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project = unit.new_project_ref(domain_id=self.domain_id) project = PROVIDERS.resource_api.create_project(project['id'], project) self.project_id = project['id'] project_reader = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.project_id, ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'project_id': self.project_id, 'role_id': self.bootstrapper.reader_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=project_reader['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _ProjectUserTests, _ProjectReaderMemberTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project = unit.new_project_ref(domain_id=self.domain_id) project = PROVIDERS.resource_api.create_project(project['id'], project) self.project_id = project['id'] project_member = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(project_member)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'project_id': self.project_id, 'role_id': self.bootstrapper.member_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=project_member['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _AssignmentTestUtilities, _ProjectUserTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] self.user_id = self.bootstrapper.admin_user_id project = unit.new_project_ref(domain_id=self.domain_id) project = PROVIDERS.resource_api.create_project(project['id'], project) self.project_id = project['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, project_id=self.project_id, ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'project_id': self.project_id, 'role_id': self.bootstrapper.admin_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.role_assignment have been removed. This is # only here to make sure we test the new policies instead of the # deprecated ones. Oslo.policy will OR deprecated policies with new # policies to maintain compatibility and give operators a chance to # update permissions or update policies without breaking users. This # will cause these specific tests to fail since we're trying to correct # this broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:list_role_assignments': ( rp.SYSTEM_READER_OR_DOMAIN_READER ), 'identity:list_role_assignments_for_tree': ( rp.SYSTEM_READER_OR_PROJECT_DOMAIN_READER_OR_PROJECT_ADMIN ), } f.write(jsonutils.dumps(overridden_policies)) def test_user_can_list_assignments_for_subtree_on_own_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref( domain_id=self.domain_id, parent_id=self.project_id ), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) expected = copy.copy(self.expected) expected.append( { 'project_id': project['id'], 'user_id': user['id'], 'role_id': self.bootstrapper.reader_role_id, } ) with self.test_client() as c: r = c.get( ( '/v3/role_assignments?scope.project.id=%s&include_subtree' % self.project_id ), headers=self.headers, ) self.assertEqual(len(expected), len(r.json['role_assignments'])) actual = self._extract_role_assignments_from_response_body(r) for assignment in actual: self.assertIn(assignment, expected) def test_user_cannot_list_assignments_for_subtree_on_other_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( ( '/v3/role_assignments?scope.project.id=%s&include_subtree' % project['id'] ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_consumer.py0000664000175000017500000001544400000000000024460 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserOauth1ConsumerTests: """Common default functionality for all system users.""" def test_user_can_get_consumer(self): ref = PROVIDERS.oauth_api.create_consumer({'id': uuid.uuid4().hex}) with self.test_client() as c: c.get( '/v3/OS-OAUTH1/consumers/%s' % ref['id'], headers=self.headers ) def test_user_can_list_consumers(self): PROVIDERS.oauth_api.create_consumer({'id': uuid.uuid4().hex}) with self.test_client() as c: c.get('/v3/OS-OAUTH1/consumers', headers=self.headers) class _SystemReaderAndMemberOauth1ConsumerTests: def test_user_cannot_create_consumer(self): with self.test_client() as c: c.post( '/v3/OS-OAUTH1/consumers', json={'consumer': {}}, expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) def test_user_cannot_update_consumer(self): ref = PROVIDERS.oauth_api.create_consumer({'id': uuid.uuid4().hex}) with self.test_client() as c: c.patch( '/v3/OS-OAUTH1/consumers/%s' % ref['id'], json={'consumer': {'description': uuid.uuid4().hex}}, expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) def test_user_cannot_delete_consumer(self): ref = PROVIDERS.oauth_api.create_consumer({'id': uuid.uuid4().hex}) with self.test_client() as c: c.delete( '/v3/OS-OAUTH1/consumers/%s' % ref['id'], expected_status_code=http.client.FORBIDDEN, headers=self.headers, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserOauth1ConsumerTests, _SystemReaderAndMemberOauth1ConsumerTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserOauth1ConsumerTests, _SystemReaderAndMemberOauth1ConsumerTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserOauth1ConsumerTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_consumer(self): with self.test_client() as c: c.post( '/v3/OS-OAUTH1/consumers', json={'consumer': {}}, headers=self.headers, ) def test_user_can_update_consumer(self): ref = PROVIDERS.oauth_api.create_consumer({'id': uuid.uuid4().hex}) with self.test_client() as c: c.patch( '/v3/OS-OAUTH1/consumers/%s' % ref['id'], json={'consumer': {'description': uuid.uuid4().hex}}, headers=self.headers, ) def test_user_can_delete_consumer(self): ref = PROVIDERS.oauth_api.create_consumer({'id': uuid.uuid4().hex}) with self.test_client() as c: c.delete( '/v3/OS-OAUTH1/consumers/%s' % ref['id'], headers=self.headers ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_credentials.py0000664000175000017500000013743000000000000025122 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import base as bp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _UserCredentialTests: """Test cases for anyone that has a valid user token.""" def test_user_can_create_credentials_for_themselves(self): create = { 'credential': { 'blob': uuid.uuid4().hex, 'user_id': self.user_id, 'type': uuid.uuid4().hex, } } with self.test_client() as c: c.post('/v3/credentials', json=create, headers=self.headers) def test_user_can_get_their_credentials(self): with self.test_client() as c: create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': self.user_id, } } r = c.post('/v3/credentials', json=create, headers=self.headers) credential_id = r.json['credential']['id'] path = '/v3/credentials/%s' % credential_id r = c.get(path, headers=self.headers) self.assertEqual(self.user_id, r.json['credential']['user_id']) def test_user_can_list_their_credentials(self): with self.test_client() as c: expected = [] for _ in range(2): create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': self.user_id, } } r = c.post( '/v3/credentials', json=create, headers=self.headers ) expected.append(r.json['credential']) r = c.get('/v3/credentials', headers=self.headers) for credential in expected: self.assertIn(credential, r.json['credentials']) def test_user_can_filter_their_credentials_by_type_and_user(self): with self.test_client() as c: credential_type = uuid.uuid4().hex create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': credential_type, 'user_id': self.user_id, } } r = c.post('/v3/credentials', json=create, headers=self.headers) expected_credential_id = r.json['credential']['id'] create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': self.user_id, } } r = c.post('/v3/credentials', json=create, headers=self.headers) path = '/v3/credentials?type=%s' % credential_type r = c.get(path, headers=self.headers) self.assertEqual( expected_credential_id, r.json['credentials'][0]['id'] ) path = '/v3/credentials?user=%s' % self.user_id r = c.get(path, headers=self.headers) self.assertEqual( expected_credential_id, r.json['credentials'][0]['id'] ) def test_user_can_update_their_credential(self): with self.test_client() as c: create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': self.user_id, } } r = c.post('/v3/credentials', json=create, headers=self.headers) credential_id = r.json['credential']['id'] updated_blob = uuid.uuid4().hex update = {'credential': {'blob': updated_blob}} path = '/v3/credentials/%s' % credential_id r = c.patch(path, json=update, headers=self.headers) self.assertEqual(updated_blob, r.json['credential']['blob']) def test_user_can_delete_their_credentials(self): with self.test_client() as c: create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': self.user_id, } } r = c.post('/v3/credentials', json=create, headers=self.headers) credential_id = r.json['credential']['id'] path = '/v3/credentials/%s' % credential_id c.delete(path, headers=self.headers) class _ProjectUsersTests: """Users who have project role authorization observe the same behavior.""" def test_user_cannot_get_credentials_for_other_users(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: path = '/v3/credentials/%s' % credential_id c.get( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_non_existant_credential_forbidden(self): with self.test_client() as c: c.get( '/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_credentials_for_other_users(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } c.post('/v3/credentials', json=create, headers=headers) with self.test_client() as c: path = '/v3/credentials?user_id=%s' % user['id'] r = c.get(path, headers=self.headers) self.assertEqual([], r.json['credentials']) def test_user_cannot_filter_credentials_by_type_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) credential_type = uuid.uuid4().hex with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': credential_type, 'user_id': user['id'], } } c.post('/v3/credentials', json=create, headers=headers) with self.test_client() as c: path = '/v3/credentials?type=%s' % credential_type r = c.get(path, headers=self.headers) self.assertEqual(0, len(r.json['credentials'])) def test_user_cannot_filter_credentials_by_user_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} expected_cred_ids = [] for _ in range(2): create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) expected_cred_ids.append(r.json['credential']['id']) with self.test_client() as c: path = '/v3/credentials?user_id=%s' % user['id'] r = c.get(path, headers=self.headers) self.assertEqual([], r.json['credentials']) def test_user_cannot_update_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: update = {'credential': {'blob': uuid.uuid4().hex}} path = '/v3/credentials/%s' % credential_id c.patch( path, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existant_credential_forbidden(self): with self.test_client() as c: update = {'credential': {'blob': uuid.uuid4().hex}} c.patch( '/v3/credentials/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_credentials_for_other_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } c.post( '/v3/credentials', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: path = '/v3/credentials/%s' % credential_id c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existant_credential_forbidden(self): with self.test_client() as c: c.delete( '/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _SystemUserCredentialTests: """Tests that are common across all system users.""" def test_user_can_list_credentials_for_other_users(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: r = c.get('/v3/credentials', headers=self.headers) self.assertEqual(1, len(r.json['credentials'])) self.assertEqual(credential_id, r.json['credentials'][0]['id']) self.assertEqual(user['id'], r.json['credentials'][0]['user_id']) def test_user_cannot_get_non_existant_credential_not_found(self): with self.test_client() as c: c.get( '/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_filter_credentials_by_type_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) credential_type = uuid.uuid4().hex with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': credential_type, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } c.post('/v3/credentials', json=create, headers=headers) with self.test_client() as c: path = '/v3/credentials?type=%s' % credential_type r = c.get(path, headers=self.headers) self.assertEqual(1, len(r.json['credentials'])) self.assertEqual(credential_id, r.json['credentials'][0]['id']) self.assertEqual(user['id'], r.json['credentials'][0]['user_id']) def test_user_can_filter_credentials_by_user_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} expected_cred_ids = [] for _ in range(2): create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) expected_cred_ids.append(r.json['credential']['id']) with self.test_client() as c: path = '/v3/credentials?user_id=%s' % user['id'] r = c.get(path, headers=self.headers) self.assertEqual(2, len(r.json['credentials'])) for credential in r.json['credentials']: self.assertIn(credential['id'], expected_cred_ids) self.assertEqual(user['id'], credential['user_id']) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _SystemUserCredentialTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_cannot_create_credentials_for_other_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } c.post( '/v3/credentials', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: update = {'credential': {'blob': uuid.uuid4().hex}} path = '/v3/credentials/%s' % credential_id c.patch( path, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existant_credential_forbidden(self): with self.test_client() as c: update = {'credential': {'blob': uuid.uuid4().hex}} c.patch( '/v3/credentials/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: path = '/v3/credentials/%s' % credential_id c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existant_credential_forbidden(self): with self.test_client() as c: c.delete( '/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _SystemUserCredentialTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_cannot_create_credentials_for_other_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } c.post( '/v3/credentials', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: update = {'credential': {'blob': uuid.uuid4().hex}} path = '/v3/credentials/%s' % credential_id c.patch( path, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existant_credential_forbidden(self): with self.test_client() as c: update = {'credential': {'blob': uuid.uuid4().hex}} c.patch( '/v3/credentials/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: path = '/v3/credentials/%s' % credential_id c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existant_credential_forbidden(self): with self.test_client() as c: c.delete( '/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _SystemUserCredentialTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_credentials_for_other_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } c.post('/v3/credentials', json=create, headers=self.headers) def test_user_can_update_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: path = '/v3/credentials/%s' % credential_id updated_blob = uuid.uuid4().hex update = {'credential': {'blob': updated_blob}} r = c.patch(path, json=update, headers=self.headers) self.assertEqual(updated_blob, r.json['credential']['blob']) self.assertEqual(user['id'], r.json['credential']['user_id']) def test_user_cannot_update_non_existant_credential_not_found(self): with self.test_client() as c: update = {'credential': {'blob': uuid.uuid4().hex}} c.patch( '/v3/credentials/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_delete_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} create = { 'credential': { 'blob': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'user_id': user['id'], } } r = c.post('/v3/credentials', json=create, headers=headers) credential_id = r.json['credential']['id'] with self.test_client() as c: path = '/v3/credentials/%s' % credential_id c.delete(path, headers=self.headers) def test_user_cannot_delete_non_existant_credential_not_found(self): with self.test_client() as c: c.delete( '/v3/credentials/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) class ProjectReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) project_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id'] project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) self.project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_reader['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) project_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_member)['id'] project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) self.project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_member['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.credentials have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_credential': bp.ADMIN_OR_SYSTEM_READER_OR_CRED_OWNER, 'identity:list_credentials': bp.ADMIN_OR_SYSTEM_READER_OR_CRED_OWNER, 'identity:create_credential': bp.ADMIN_OR_CRED_OWNER, 'identity:update_credential': bp.ADMIN_OR_CRED_OWNER, 'identity:delete_credential': bp.ADMIN_OR_CRED_OWNER, } f.write(jsonutils.dumps(overridden_policies)) class ProjectReaderTestsEnforceScopeFalse( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=False) project_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id'] project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) self.project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_reader['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTestsEnforceScopeFalse( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _ProjectUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=False) project_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(project_member)['id'] project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) self.project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_member['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTestsEnforceScopeFalse( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserCredentialTests, _SystemUserCredentialTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=False) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_domain_config.py0000664000175000017500000007163700000000000025427 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemDomainAndProjectUserDomainConfigTests: def test_user_can_get_security_compliance_domain_config(self): # Set the security compliance configuration options password_regex = uuid.uuid4().hex password_regex_description = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex=password_regex ) self.config_fixture.config( group='security_compliance', password_regex_description=password_regex_description, ) with self.test_client() as c: c.get( '/v3/domains/%s/config/security_compliance' % CONF.identity.default_domain_id, headers=self.headers, ) def test_user_can_get_security_compliance_domain_config_option(self): password_regex_description = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex_description=password_regex_description, ) with self.test_client() as c: c.get( '/v3/domains/%s/config/security_compliance' '/password_regex_description' % CONF.identity.default_domain_id, headers=self.headers, ) def test_can_get_security_compliance_config_with_user_from_other_domain( self, ): # noqa: E501 domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Create a user in the new domain user = unit.create_user(PROVIDERS.identity_api, domain['id']) # Create a project in the new domain project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # Give the new user a non-admin role on the project PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], project['id'], role['id'] ) password_regex = uuid.uuid4().hex password_regex_description = uuid.uuid4().hex group = 'security_compliance' self.config_fixture.config(group=group, password_regex=password_regex) self.config_fixture.config( group=group, password_regex_description=password_regex_description ) with self.test_client() as c: c.get( '/v3/domains/%s/config/security_compliance' % CONF.identity.default_domain_id, headers=self.headers, ) class _SystemUserDomainConfigTests: def test_user_can_get_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.get('/v3/domains/%s/config' % domain['id'], headers=self.headers) def test_user_can_get_domain_group_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap' % domain['id'], headers=self.headers, ) def test_user_can_get_config_by_group_invalid_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) invalid_domain_id = uuid.uuid4().hex with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap' % invalid_domain_id, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_get_non_existent_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s/config' % domain['id'], headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_get_non_existent_config_group_invalid_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(domain['id'], config) invalid_domain_id = uuid.uuid4().hex with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap' % invalid_domain_id, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_get_domain_config_option(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap/url' % domain['id'], headers=self.headers, ) def test_user_can_get_non_existent_config_option(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(domain['id'], config) with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap/user_tree_dn' % domain['id'], headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_get_non_existent_config_option_invalid_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(domain['id'], config) invalid_domain_id = uuid.uuid4().hex with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap/user_tree_dn' % invalid_domain_id, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_get_security_compliance_domain_config(self): # Set the security compliance configuration options password_regex = uuid.uuid4().hex password_regex_description = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex=password_regex ) self.config_fixture.config( group='security_compliance', password_regex_description=password_regex_description, ) with self.test_client() as c: c.get( '/v3/domains/%s/config/security_compliance' % CONF.identity.default_domain_id, headers=self.headers, ) def test_user_can_get_security_compliance_domain_config_option(self): password_regex_description = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex_description=password_regex_description, ) with self.test_client() as c: c.get( '/v3/domains/%s/config/security_compliance' '/password_regex_description' % CONF.identity.default_domain_id, headers=self.headers, ) def test_can_get_security_compliance_config_with_user_from_other_domain( self, ): # noqa: E501 domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) password_regex = uuid.uuid4().hex password_regex_description = uuid.uuid4().hex group = 'security_compliance' self.config_fixture.config(group=group, password_regex=password_regex) self.config_fixture.config( group=group, password_regex_description=password_regex_description ) with self.test_client() as c: c.get( '/v3/domains/%s/config/security_compliance' % CONF.identity.default_domain_id, headers=self.headers, ) def test_user_can_get_domain_config_default(self): with self.test_client() as c: c.get('/v3/domains/config/default', headers=self.headers) def test_user_can_get_domain_group_config_default(self): with self.test_client() as c: c.get('/v3/domains/config/ldap/default', headers=self.headers) def test_user_can_get_domain_config_option_default(self): with self.test_client() as c: c.get('/v3/domains/config/ldap/url/default', headers=self.headers) class _SystemReaderMemberDomainAndProjectUserDomainConfigTests: def test_user_cannot_create_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/config' % domain['id'], json={'config': unit.new_domain_config_ref()}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) new_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}, } with self.test_client() as c: c.patch( '/v3/domains/%s/config' % domain['id'], json={'config': new_config}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_domain_group_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) new_config = { 'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex} } with self.test_client() as c: c.patch( '/v3/domains/%s/config/ldap' % domain['id'], json={'config': new_config}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_domain_config_option(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) new_config = {'url': uuid.uuid4().hex} PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.patch( '/v3/domains/%s/config/ldap/url' % domain['id'], json={'config': new_config}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.delete( '/v3/domains/%s/config' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_domain_group_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.delete( '/v3/domains/%s/config/ldap' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_domain_config_option(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.delete( '/v3/domains/%s/config/ldap/url' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserDomainConfigTests: def test_user_cannot_get_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s/config' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_domain_group_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_non_existant_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s/config' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_domain_config_option(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s/config/ldap/url' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_domain_config_default(self): with self.test_client() as c: c.get( '/v3/domains/config/default', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_domain_group_config_default(self): with self.test_client() as c: c.get( '/v3/domains/config/ldap/default', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_domain_config_option_default(self): with self.test_client() as c: c.get( '/v3/domains/config/ldap/url/default', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainConfigTests, _SystemReaderMemberDomainAndProjectUserDomainConfigTests, _SystemDomainAndProjectUserDomainConfigTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainConfigTests, _SystemReaderMemberDomainAndProjectUserDomainConfigTests, _SystemDomainAndProjectUserDomainConfigTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainConfigTests, _SystemDomainAndProjectUserDomainConfigTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/config' % domain['id'], json={'config': unit.new_domain_config_ref()}, headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_cannot_create_invalid_domain_config(self): invalid_domain_id = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/domains/%s/config' % invalid_domain_id, json={'config': unit.new_domain_config_ref()}, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_update_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) new_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}, } PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.patch( '/v3/domains/%s/config' % domain['id'], json={'config': new_config}, headers=self.headers, ) def test_user_can_update_domain_group_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) new_config = { 'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex} } PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.patch( '/v3/domains/%s/config/ldap' % domain['id'], json={'config': new_config}, headers=self.headers, ) def test_user_can_update_domain_config_option(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) new_config = {'url': uuid.uuid4().hex} PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.patch( '/v3/domains/%s/config/ldap/url' % domain['id'], json={'config': new_config}, headers=self.headers, ) def test_user_can_delete_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.delete( '/v3/domains/%s/config' % domain['id'], headers=self.headers ) def test_user_can_delete_domain_group_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.delete( '/v3/domains/%s/config/ldap' % domain['id'], headers=self.headers, ) def test_user_can_delete_domain_config_option(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) with self.test_client() as c: c.delete( '/v3/domains/%s/config/ldap/url' % domain['id'], headers=self.headers, ) def test_user_cannot_delete_invalid_domain_config(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.domain_config_api.create_config( domain['id'], unit.new_domain_config_ref() ) invalid_domain_id = uuid.uuid4().hex with self.test_client() as c: c.delete( '/v3/domains/%s/config' % invalid_domain_id, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemDomainAndProjectUserDomainConfigTests, _DomainAndProjectUserDomainConfigTests, _SystemReaderMemberDomainAndProjectUserDomainConfigTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemDomainAndProjectUserDomainConfigTests, _DomainAndProjectUserDomainConfigTests, _SystemReaderMemberDomainAndProjectUserDomainConfigTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemDomainAndProjectUserDomainConfigTests, _DomainAndProjectUserDomainConfigTests, _SystemReaderMemberDomainAndProjectUserDomainConfigTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_domain_roles.py0000664000175000017500000003327300000000000025300 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserDomainRoleTests: """Common default functionality for all system users.""" def test_user_can_list_domain_roles(self): PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: r = c.get( '/v3/roles?domain_id=%s' % CONF.identity.default_domain_id, headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_user_can_get_a_domain_role(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: r = c.get('/v3/roles/%s' % role['id'], headers=self.headers) self.assertEqual(role['id'], r.json['role']['id']) class _SystemReaderAndMemberDomainRoleTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_domain_roles(self): create = { 'role': unit.new_role_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post( '/v3/roles', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_domain_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) update = {'role': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/roles/%s' % role['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_domain_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete( '/v3/roles/%s' % role['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserDomainRoleTests: """Common functionality for all domain and project users.""" def test_user_cannot_list_domain_roles(self): PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.get( '/v3/roles', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_a_domain_role(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.get( '/v3/roles/%s' % role['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_domain_roles(self): create = { 'role': unit.new_role_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post( '/v3/roles', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_domain_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) update = {'role': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/roles/%s' % role['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_domain_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete( '/v3/roles/%s' % role['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainRoleTests, _SystemReaderAndMemberDomainRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainRoleTests, _SystemReaderAndMemberDomainRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_roles(self): create = { 'role': unit.new_role_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post('/v3/roles', json=create, headers=self.headers) def test_user_can_update_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) update = {'role': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/roles/%s' % role['id'], json=update, headers=self.headers, ) def test_user_can_delete_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete('/v3/roles/%s' % role['id'], headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserDomainRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserDomainRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserDomainRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_domains.py0000664000175000017500000005460600000000000024262 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import domain as dp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserDomainTests: def test_user_can_list_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: r = c.get('/v3/domains', headers=self.headers) domain_ids = [] for domain in r.json['domains']: domain_ids.append(domain['id']) self.assertIn(domain['id'], domain_ids) def test_user_can_filter_domains_by_name(self): domain_name = uuid.uuid4().hex domain = unit.new_domain_ref(name=domain_name) domain = PROVIDERS.resource_api.create_domain(domain['id'], domain) PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: r = c.get( '/v3/domains?name=%s' % domain_name, headers=self.headers ) self.assertEqual(1, len(r.json['domains'])) self.assertEqual(domain['id'], r.json['domains'][0]['id']) def test_user_can_filter_domains_by_enabled(self): enabled_domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) disabled_domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref(enabled=False) ) with self.test_client() as c: r = c.get('/v3/domains?enabled=true', headers=self.headers) enabled_domain_ids = [] for domain in r.json['domains']: enabled_domain_ids.append(domain['id']) self.assertIn(enabled_domain['id'], enabled_domain_ids) self.assertNotIn(disabled_domain['id'], enabled_domain_ids) r = c.get('/v3/domains?enabled=false', headers=self.headers) disabled_domain_ids = [] for domain in r.json['domains']: disabled_domain_ids.append(domain['id']) self.assertIn(disabled_domain['id'], disabled_domain_ids) self.assertNotIn(enabled_domain['id'], disabled_domain_ids) def test_user_can_get_a_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: r = c.get('/v3/domains/%s' % domain['id'], headers=self.headers) self.assertEqual(domain['id'], r.json['domain']['id']) class _SystemMemberAndReaderDomainTests: def test_user_cannot_create_a_domain(self): create = {'domain': {'name': uuid.uuid4().hex}} with self.test_client() as c: c.post( '/v3/domains', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_a_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) update = {'domain': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/domains/%s' % domain['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_a_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.delete( '/v3/domains/%s' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainReaderDomainTests: def test_user_can_list_domains(self): # second domain, should be invisible to scoped reader second_domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) # user should only see their own domain with self.test_client() as c: r = c.get('/v3/domains', headers=self.headers) self.assertEqual(1, len(r.json['domains'])) self.assertNotIn( second_domain['id'], [d['id'] for d in r.json['domains']] ) self.assertEqual(self.domain_id, r.json['domains'][0]['id']) def test_user_can_filter_domains_by_name(self): # second domain, should be invisible to domain-scoped reader second_domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: # filtering for own domain should succeed r = c.get( '/v3/domains?name=%s' % self.domain['name'], headers=self.headers, ) self.assertEqual(1, len(r.json['domains'])) self.assertNotIn( second_domain['id'], [d['id'] for d in r.json['domains']] ) self.assertEqual(self.domain['id'], r.json['domains'][0]['id']) # filtering for the second domain should yield no results r = c.get( '/v3/domains?name=%s' % second_domain['name'], headers=self.headers, ) self.assertEqual(0, len(r.json['domains'])) def test_user_can_filter_domains_by_enabled(self): # additional domains, neither should be visible to domain-scoped reader enabled_domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) disabled_domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref(enabled=False) ) # user should only see their own domain when filtering for enabled with self.test_client() as c: r = c.get('/v3/domains?enabled=true', headers=self.headers) enabled_domain_ids = [] for domain in r.json['domains']: enabled_domain_ids.append(domain['id']) self.assertEqual(1, len(r.json['domains'])) self.assertEqual(self.domain_id, r.json['domains'][0]['id']) self.assertNotIn(enabled_domain['id'], enabled_domain_ids) self.assertNotIn(disabled_domain['id'], enabled_domain_ids) # filtering for disabled should yield no results r = c.get('/v3/domains?enabled=false', headers=self.headers) self.assertEqual(0, len(r.json['domains'])) class _ProjectUserDomainTests: def test_user_can_get_a_domain(self): with self.test_client() as c: r = c.get('/v3/domains/%s' % self.domain_id, headers=self.headers) self.assertEqual(self.domain_id, r.json['domain']['id']) def test_user_cannot_get_a_domain_they_are_not_authorized_to_access(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.get( '/v3/domains/%s' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_domains(self): with self.test_client() as c: c.get( '/v3/domains', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_domains_by_name(self): domain_name = uuid.uuid4().hex domain = unit.new_domain_ref(name=domain_name) domain = PROVIDERS.resource_api.create_domain(domain['id'], domain) PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.get( '/v3/domains?name=%s' % domain_name, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_filter_domains_by_enabled(self): with self.test_client() as c: c.get( '/v3/domains?enabled=true', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) c.get( '/v3/domains?enabled=false', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_a_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) update = {'domain': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/domains/%s' % domain['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_a_domain(self): create = {'domain': {'name': uuid.uuid4().hex}} with self.test_client() as c: c.post( '/v3/domains', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_a_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: update = {'domain': {'enabled': False}} path = '/v3/domains/%s' % domain['id'] c.patch( path, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_non_existant_domain_forbidden(self): with self.test_client() as c: c.get( '/v3/domains/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainTests, _SystemMemberAndReaderDomainTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.system_reader_id = PROVIDERS.identity_api.create_user( system_reader )['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.system_reader_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.system_reader_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainTests, _SystemMemberAndReaderDomainTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.system_member_id = PROVIDERS.identity_api.create_user( system_member )['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.system_member_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.system_member_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserDomainTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.system_admin_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.system_admin_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_update_a_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) update = {'domain': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/domains/%s' % domain['id'], json=update, headers=self.headers, ) def test_user_can_create_a_domain(self): create = {'domain': {'name': uuid.uuid4().hex}} with self.test_client() as c: c.post('/v3/domains', json=create, headers=self.headers) def test_user_can_delete_a_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: update = {'domain': {'enabled': False}} path = '/v3/domains/%s' % domain['id'] c.patch(path, json=update, headers=self.headers) c.delete(path, headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainReaderDomainTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = self.domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.domain_user_id = PROVIDERS.identity_api.create_user(domain_user)[ 'id' ] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.domain_user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.domain_user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ProjectUserDomainTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project_reader = unit.new_user_ref(domain_id=self.domain_id) project_reader_id = PROVIDERS.identity_api.create_user(project_reader)[ 'id' ] project = unit.new_project_ref(domain_id=self.domain_id) project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=project_reader_id, project_id=project_id, ) auth = self.build_authentication_request( user_id=project_reader_id, password=project_reader['password'], project_id=project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ProjectUserDomainTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project_member = unit.new_user_ref(domain_id=self.domain_id) project_member_id = PROVIDERS.identity_api.create_user(project_member)[ 'id' ] project = unit.new_project_ref(domain_id=self.domain_id) project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=project_member_id, project_id=project_id, ) auth = self.build_authentication_request( user_id=project_member_id, password=project_member['password'], project_id=project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ProjectUserDomainTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project_admin = unit.new_user_ref(domain_id=self.domain_id) project_admin_id = PROVIDERS.identity_api.create_user(project_admin)[ 'id' ] project = unit.new_project_ref(domain_id=self.domain_id) project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=project_admin_id, project_id=project_id, ) auth = self.build_authentication_request( user_id=project_admin_id, password=project_admin['password'], project_id=project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.domains have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_domain': ( dp.SYSTEM_USER_OR_DOMAIN_USER_OR_PROJECT_USER ) } f.write(jsonutils.dumps(overridden_policies)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_ec2_credential.py0000664000175000017500000004530700000000000025471 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from oslo_serialization import jsonutils from keystone.common.policies import base as bp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _UserEC2CredentialTests: def test_user_can_get_their_ec2_credentials(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=project['id'], ) with self.test_client() as c: r = c.post( '/v3/users/%s/credentials/OS-EC2' % self.user_id, json={'tenant_id': project['id']}, headers=self.headers, ) credential_id = r.json['credential']['access'] path = '/v3/users/{}/credentials/OS-EC2/{}'.format( self.user_id, credential_id, ) r = c.get(path, headers=self.headers) self.assertEqual(self.user_id, r.json['credential']['user_id']) def test_user_can_list_their_ec2_credentials(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=project['id'], ) with self.test_client() as c: c.post( '/v3/users/%s/credentials/OS-EC2' % self.user_id, json={'tenant_id': project['id']}, headers=self.headers, ) path = '/v3/users/%s/credentials/OS-EC2' % self.user_id r = c.get(path, headers=self.headers) for credential in r.json['credentials']: self.assertEqual(self.user_id, credential['user_id']) def test_user_create_their_ec2_credentials(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=project['id'], ) with self.test_client() as c: c.post( '/v3/users/%s/credentials/OS-EC2' % self.user_id, json={'tenant_id': project['id']}, headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_delete_their_ec2_credentials(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=project['id'], ) with self.test_client() as c: r = c.post( '/v3/users/%s/credentials/OS-EC2' % self.user_id, json={'tenant_id': project['id']}, headers=self.headers, ) credential_id = r.json['credential']['access'] c.delete( '/v3/users/%s/credentials/OS-EC2/%s' % (self.user_id, credential_id), headers=self.headers, ) def test_user_cannot_create_ec2_credentials_for_others(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.post( '/v3/users/%s/credentials/OS-EC2' % user['id'], json={'tenant_id': project['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_ec2_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} r = c.post( '/v3/users/%s/credentials/OS-EC2' % user['id'], json={'tenant_id': project['id']}, headers=headers, ) credential_id = r.json['credential']['access'] c.delete( '/v3/users/%s/credentials/OS-EC2/%s' % (self.user_id, credential_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _SystemUserTests: def test_user_can_get_ec2_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} r = c.post( '/v3/users/%s/credentials/OS-EC2' % user['id'], json={'tenant_id': project['id']}, headers=headers, ) credential_id = r.json['credential']['access'] path = '/v3/users/{}/credentials/OS-EC2/{}'.format( self.user_id, credential_id, ) c.get( path, headers=self.headers, expected_status_code=http.client.OK ) class _SystemReaderAndMemberTests: def test_user_cannot_list_ec2_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} c.post( '/v3/users/%s/credentials/OS-EC2' % user['id'], json={'tenant_id': project['id']}, headers=headers, ) path = '/v3/users/%s/credentials/OS-EC2' % self.user_id r = c.get(path, headers=self.headers) self.assertEqual([], r.json['credentials']) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, _SystemReaderAndMemberTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, _SystemReaderAndMemberTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_list_ec2_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} c.post( '/v3/users/%s/credentials/OS-EC2' % user['id'], json={'tenant_id': project['id']}, headers=headers, ) path = '/v3/users/%s/credentials/OS-EC2' % self.user_id r = c.get(path, headers=self.headers) self.assertEqual([], r.json['credentials']) def test_user_can_create_ec2_credentials_for_others(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.post( '/v3/users/%s/credentials/OS-EC2' % user['id'], json={'tenant_id': project['id']}, headers=self.headers, ) def test_user_can_delete_ec2_credentials_for_others(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_password = user['password'] user = PROVIDERS.identity_api.create_user(user) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) user_auth = self.build_authentication_request( user_id=user['id'], password=user_password, project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=user_auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} r = c.post( '/v3/users/%s/credentials/OS-EC2' % user['id'], json={'tenant_id': project['id']}, headers=headers, ) credential_id = r.json['credential']['access'] c.delete( '/v3/users/%s/credentials/OS-EC2/%s' % (self.user_id, credential_id), headers=self.headers, ) class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserEC2CredentialTests, _SystemReaderAndMemberTests, ): def _override_policy(self): # TODO(cmurphy): Remove this once the deprecated policies in # keystone.common.policies.ec2_credential have been removed. This is # only here to make sure we test the new policies instead of the # deprecated ones. Oslo.policy will OR deprecated policies with new # policies to maintain compatibility and give operators a chance to # update permissions or update policies without breaking users. This # will cause these specific tests to fail since we're trying to correct # this broken behavior with better scope checking. reader_or_cred_owner = bp.ADMIN_OR_SYSTEM_READER_OR_CRED_OWNER reader_or_owner = bp.RULE_SYSTEM_READER_OR_OWNER admin_or_cred_owner = bp.ADMIN_OR_CRED_OWNER with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:ec2_get_credential': reader_or_cred_owner, 'identity:ec2_list_credentials': reader_or_owner, 'identity:ec2_create_credential': admin_or_cred_owner, 'identity:ec2_update_credential': admin_or_cred_owner, 'identity:ec2_delete_credential': admin_or_cred_owner, } f.write(jsonutils.dumps(overridden_policies)) def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_endpoint_group.py0000664000175000017500000006600100000000000025654 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserEndpointGroupsTests: """Common default functionality for all system users.""" def test_user_can_list_endpoint_groups(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: r = c.get('/v3/OS-EP-FILTER/endpoint_groups', headers=self.headers) endpoint_groups = [] for endpoint_group in r.json['endpoint_groups']: endpoint_groups.append(endpoint_group['id']) self.assertIn(endpoint_group['id'], endpoint_groups) def test_user_can_get_an_endpoint_group(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], headers=self.headers, ) def test_user_can_list_projects_associated_with_endpoint_groups(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group['id'], project['id'] ) with self.test_client() as c: r = c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects' % endpoint_group['id'], headers=self.headers, ) projects = [] for project in r.json['projects']: projects.append(project['id']) self.assertIn(project['id'], projects) def test_user_can_list_endpoints_associated_with_endpoint_groups(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: r = c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s/endpoints' % endpoint_group['id'], headers=self.headers, ) endpoints = [] for endpoint in r.json['endpoints']: endpoints.append(endpoint['id']) self.assertIn(endpoint['id'], endpoints) def test_user_can_get_endpoints_associated_with_endpoint_groups(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, ) def test_user_can_list_endpoint_groups_with_their_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group['id'], project['id'] ) with self.test_client() as c: r = c.get( '/v3/OS-EP-FILTER/projects/%s/endpoint_groups' % project['id'], headers=self.headers, ) endpoint_groups = [] for endpoint_group in r.json['endpoint_groups']: endpoint_groups.append(endpoint_group['id']) class _SystemReaderAndMemberUserEndpointGroupsTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_endpoint_groups(self): create = { 'endpoint_group': { 'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'filters': {'interface': 'public'}, 'name': uuid.uuid4().hex, } } with self.test_client() as c: c.post( '/v3/OS-EP-FILTER/endpoint_groups', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_endpoint_groups(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) update = {'endpoint_group': {'filters': {'interface': 'internal'}}} with self.test_client() as c: c.patch( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_endpoint_groups(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_add_endpoint_group_to_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.put( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_remove_endpoint_group_from_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserEndpointGroupTests: def test_user_cannot_list_endpoint_groups(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoint_groups', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_an_endpoint_group(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_projects_associated_with_endpoint_groups(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects' % endpoint_group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_endpoints_associated_with_endpoint_groups(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s/endpoints' % endpoint_group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_endpoints_associated_with_endpoint_groups(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_endpoint_groups_with_their_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/projects/%s/endpoint_groups' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_endpoint_groups(self): create = { 'endpoint_group': { 'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'filters': {'interface': 'public'}, 'name': uuid.uuid4().hex, } } with self.test_client() as c: c.post( '/v3/OS-EP-FILTER/endpoint_groups', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_endpoint_groups(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) update = {'endpoint_group': {'filters': {'interface': 'internal'}}} with self.test_client() as c: c.patch( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_endpoint_groups(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_add_endpoint_group_to_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.put( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_remove_endpoint_group_from_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserEndpointGroupsTests, _SystemReaderAndMemberUserEndpointGroupsTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserEndpointGroupsTests, _SystemReaderAndMemberUserEndpointGroupsTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserEndpointGroupsTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_endpoint_group(self): create = { 'endpoint_group': { 'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'filters': {'interface': 'public'}, 'name': uuid.uuid4().hex, } } with self.test_client() as c: c.post( '/v3/OS-EP-FILTER/endpoint_groups', json=create, headers=self.headers, ) def test_user_can_update_endpoint_group(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) update = {'endpoint_group': {'filters': {'interface': 'internal'}}} with self.test_client() as c: c.patch( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], json=update, headers=self.headers, ) def test_user_can_delete_endpoint_group(self): endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/endpoint_groups/%s' % endpoint_group['id'], headers=self.headers, ) def test_user_add_endpoint_group_to_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) with self.test_client() as c: c.put( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, ) def test_remove_endpoint_group_from_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) endpoint_group = unit.new_endpoint_group_ref( filters={'interface': 'public'} ) endpoint_group = PROVIDERS.catalog_api.create_endpoint_group( endpoint_group['id'], endpoint_group ) PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group['id'], project['id'] ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/endpoint_groups/%s/projects/%s' % (endpoint_group['id'], project['id']), headers=self.headers, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserEndpointGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserEndpointGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserEndpointGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_endpoints.py0000664000175000017500000003702300000000000024625 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserEndpointTests: """Common default functionality for all system users.""" def test_user_can_list_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: r = c.get('/v3/endpoints', headers=self.headers) endpoints = [] for endpoint in r.json['endpoints']: endpoints.append(endpoint['id']) self.assertIn(endpoint['id'], endpoints) def test_user_can_get_an_endpoint(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.get('/v3/endpoints/%s' % endpoint['id'], headers=self.headers) class _SystemReaderAndMemberUserEndpointTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_endpoints(self): create = { 'endpoint': { 'interface': 'public', 'service_id': uuid.uuid4().hex, 'url': 'https://' + uuid.uuid4().hex + '.com', } } with self.test_client() as c: c.post( '/v3/endpoints', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) update = {'endpoint': {'interface': 'internal'}} with self.test_client() as c: c.patch( '/v3/endpoints/%s' % endpoint['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.delete( '/v3/endpoints/%s' % endpoint['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserEndpointTests: def test_user_cannot_create_endpoints(self): create = { 'endpoint': { 'interface': 'public', 'service_id': uuid.uuid4().hex, 'url': 'https://' + uuid.uuid4().hex + '.com', } } with self.test_client() as c: c.post( '/v3/endpoints', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_endpoints(self): # Domain and project users should access this information through the # token response they get when they authenticate for or validate a # token. service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.get( '/v3/endpoints', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_an_endpoint(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.get( '/v3/endpoints/%s' % endpoint['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) update = {'endpoint': {'interface': 'internal'}} with self.test_client() as c: c.patch( '/v3/endpoints/%s' % endpoint['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.delete( '/v3/endpoints/%s' % endpoint['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserEndpointTests, _SystemReaderAndMemberUserEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserEndpointTests, _SystemReaderAndMemberUserEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) create = { 'endpoint': { 'interface': 'public', 'service_id': service['id'], 'url': 'https://' + uuid.uuid4().hex + '.com', } } with self.test_client() as c: c.post('/v3/endpoints', json=create, headers=self.headers) def test_user_can_update_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) update = {'endpoint': {'interface': 'internal'}} with self.test_client() as c: c.patch( '/v3/endpoints/%s' % endpoint['id'], json=update, headers=self.headers, ) def test_user_can_delete_endpoints(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.delete( '/v3/endpoints/%s' % endpoint['id'], headers=self.headers, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_grants.py0000664000175000017500000024115100000000000024117 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import grant as gp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserGrantTests: def test_can_list_grants_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: r = c.get( '/v3/projects/{}/users/{}/roles'.format( project['id'], user['id'] ), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_list_grants_for_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) with self.test_client() as c: r = c.get( '/v3/domains/{}/users/{}/roles'.format( domain['id'], user['id'] ), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_list_grants_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: r = c.get( '/v3/projects/%s/groups/%s/roles' % (project['id'], group['id']), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_list_grants_for_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain['id'], ) with self.test_client() as c: r = c.get( '/v3/domains/{}/groups/{}/roles'.format( domain['id'], group['id'] ), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_check_grant_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_can_check_grant_for_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) with self.test_client() as c: c.get( '/v3/domains/%s/users/%s/roles/%s' % (domain['id'], user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_can_check_grant_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_can_check_grant_for_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain['id'], ) with self.test_client() as c: c.get( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) class _SystemMemberAndReaderGrantTests: def test_cannot_create_grant_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.put( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/users/%s/roles/%s' % (domain['id'], user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.put( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) with self.test_client() as c: c.delete( '/v3/domains/%s/users/%s/roles/%s' % (domain['id'], user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain['id'], ) with self.test_client() as c: c.delete( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainUserTests: def test_can_list_grants_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: r = c.get( '/v3/projects/{}/users/{}/roles'.format( project['id'], user['id'] ), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_list_grants_for_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=self.domain_id, ) with self.test_client() as c: r = c.get( '/v3/domains/{}/users/{}/roles'.format( self.domain_id, user['id'] ), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_list_grants_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: r = c.get( '/v3/projects/%s/groups/%s/roles' % (project['id'], group['id']), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_list_grants_for_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=self.domain_id, ) with self.test_client() as c: r = c.get( '/v3/domains/%s/groups/%s/roles' % (self.domain_id, group['id']), headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) def test_can_check_grant_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_can_check_grant_for_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=self.domain_id, ) with self.test_client() as c: c.get( '/v3/domains/%s/users/%s/roles/%s' % ( self.domain_id, user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_can_check_grant_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_can_check_grant_for_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=self.domain_id, ) with self.test_client() as c: c.get( '/v3/domains/%s/groups/%s/roles/%s' % ( self.domain_id, group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_cannot_list_grants_for_user_other_domain_on_project_own_domain( self, ): # noqa: E501 user_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/{}/users/{}/roles'.format( project['id'], user['id'] ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_list_grants_for_user_own_domain_on_project_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/{}/users/{}/roles'.format( project['id'], user['id'] ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_list_grants_for_user_own_domain_on_other_domain(self): user_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/{}/users/{}/roles'.format(domain_id, user['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_list_grants_for_user_other_domain_on_own_domain(self): user_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/{}/users/{}/roles'.format(domain_id, user['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_list_grants_for_group_other_domain_on_project_own_domain( self, ): # noqa: E501 group_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/groups/%s/roles' % (project['id'], group['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_list_grants_for_group_own_domain_on_project_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/groups/%s/roles' % (project['id'], group['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_list_grants_for_group_own_domain_on_other_domain(self): group_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/{}/groups/{}/roles'.format( domain_id, group['id'] ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_list_grants_for_group_other_domain_on_own_domain(self): group_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/{}/groups/{}/roles'.format( domain_id, group['id'] ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_user_other_domain_on_project_own_domain( self, ): # noqa: E501 user_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_user_own_domain_on_project_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_user_own_domain_on_project_own_domain_with_role_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id project_domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) # NOTE(cmurphy) the grant for a domain-specific role cannot be created # for a project in a different domain, so we don't try to create it, # but we still need to test that checking the role results in a 403 and # not a 404 with self.test_client() as c: c.get( '/v3/projects/%s/users/%s/roles/%s' % (project['id'], user['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_user_own_domain_on_other_domain(self): user_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_user_other_domain_on_own_domain(self): user_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_user_own_domain_on_own_domain_with_role_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) # NOTE(cmurphy) the grant for a domain-specific role cannot be created # for a project in a different domain, so we don't try to create it, # but we still need to test that checking the role results in a 403 and # not a 404 with self.test_client() as c: c.get( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_group_other_domain_on_project_own_domain( self, ): # noqa: E501 group_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_group_own_domain_on_project_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_group_own_domain_on_project_own_domain_with_role_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) # NOTE(cmurphy) the grant for a domain-specific role cannot be created # for a project in a different domain, so we don't try to create it, # but we still need to test that checking the role results in a 403 and # not a 404 with self.test_client() as c: c.get( '/v3/projects/%s/groups/%s/roles/%s' % (project['id'], group['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_group_own_domain_on_other_domain(self): group_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_group_other_domain_on_own_domain(self): group_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain_id, ) with self.test_client() as c: c.get( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_check_grant_for_group_own_domain_on_own_domain_with_role_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) # NOTE(cmurphy) the grant for a domain-specific role cannot be created # for a project in a different domain, so we don't try to create it, # but we still need to test that checking the role results in a 403 and # not a 404 with self.test_client() as c: c.get( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_other_domain_on_project_own_domain( self, ): # noqa: E501 user_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_own_domain_on_project_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_own_domain_on_project_own_domain_with_role_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id project_domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/users/%s/roles/%s' % (project['id'], user['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_other_domain_on_own_domain(self): user_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_own_domain_on_other_domain(self): user_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_own_domain_on_own_domain_with_role_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_other_domain_on_project_own_domain( self, ): # noqa: E501 group_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_own_domain_on_project_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_own_domain_on_project_own_domain_with_role_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id project_domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/groups/%s/roles/%s' % (project['id'], group['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_other_domain_on_own_domain(self): group_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_own_domain_on_other_domain(self): group_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_own_domain_on_own_domain_with_role_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_other_domain_on_project_own_domain( self, ): # noqa: E501 user_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_own_domain_on_project_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_other_domain_on_own_domain(self): user_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain_id, ) with self.test_client() as c: c.delete( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_own_domain_on_other_domain(self): user_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain_id, ) with self.test_client() as c: c.delete( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_own_domain_on_own_domain_with_role_other_domain( self, ): # noqa: E501 user_domain_id = self.domain_id domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=user_domain_id) ) PROVIDERS.assignment_api.create_grant( role['id'], user_id=user['id'], domain_id=domain_id ) with self.test_client() as c: c.delete( '/v3/domains/%s/users/%s/roles/%s' % (domain_id, user['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_other_domain_on_project_own_domain( self, ): # noqa: E501 group_domain_id = CONF.identity.default_domain_id project_domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_own_domain_on_project_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id project_domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=project_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_other_domain_on_own_domain(self): group_domain_id = CONF.identity.default_domain_id domain_id = self.domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain_id, ) with self.test_client() as c: c.delete( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_own_domain_on_other_domain(self): group_domain_id = self.domain_id domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain_id, ) with self.test_client() as c: c.delete( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_own_domain_on_own_domain_with_role_other_domain( self, ): # noqa: E501 group_domain_id = self.domain_id domain_id = self.domain_id role_domain_id = CONF.identity.default_domain_id role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref(domain_id=role_domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=group_domain_id) ) PROVIDERS.assignment_api.create_grant( role['id'], group_id=group['id'], domain_id=domain_id ) with self.test_client() as c: c.delete( '/v3/domains/%s/groups/%s/roles/%s' % (domain_id, group['id'], role['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserGrantTests, _SystemMemberAndReaderGrantTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserGrantTests, _SystemMemberAndReaderGrantTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserGrantTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_can_create_grant_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.put( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_create_grant_for_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/users/%s/roles/%s' % (domain['id'], user['id'], self.bootstrapper.reader_role_id), headers=self.headers, ) def test_can_create_grant_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.put( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_create_grant_for_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_revoke_grant_from_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_revoke_grant_from_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) with self.test_client() as c: c.delete( '/v3/domains/%s/users/%s/roles/%s' % (domain['id'], user['id'], self.bootstrapper.reader_role_id), headers=self.headers, ) def test_can_revoke_grant_from_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_revoke_grant_from_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain['id'], ) with self.test_client() as c: c.delete( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) class _DomainMemberAndReaderTests: def test_cannot_create_grant_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.put( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/users/%s/roles/%s' % (domain['id'], user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_create_grant_for_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) with self.test_client() as c: c.put( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_user_on_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) with self.test_client() as c: c.delete( '/v3/domains/%s/users/%s/roles/%s' % (domain['id'], user['id'], self.bootstrapper.reader_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_cannot_revoke_grant_from_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain['id'], ) with self.test_client() as c: c.delete( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class DomainReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserTests, _DomainMemberAndReaderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserTests, _DomainMemberAndReaderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.grant have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:list_grants': gp.SYSTEM_READER_OR_DOMAIN_READER_LIST, 'identity:check_grant': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:create_grant': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:revoke_grant': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, } f.write(jsonutils.dumps(overridden_policies)) def test_can_create_grant_for_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_create_grant_for_user_own_domain_on_own_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/users/%s/roles/%s' % ( self.domain_id, user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_create_grant_for_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_create_grant_for_group_own_domain_on_own_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/domains/%s/groups/%s/roles/%s' % ( self.domain_id, group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_revoke_grant_from_user_on_project(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/users/%s/roles/%s' % ( project['id'], user['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_can_revoke_grant_from_group_on_project(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], project_id=project['id'], ) with self.test_client() as c: c.delete( '/v3/projects/%s/groups/%s/roles/%s' % ( project['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, ) def test_cannot_revoke_grant_from_group_on_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=CONF.identity.default_domain_id) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, group_id=group['id'], domain_id=domain['id'], ) with self.test_client() as c: c.delete( '/v3/domains/%s/groups/%s/roles/%s' % ( domain['id'], group['id'], self.bootstrapper.reader_role_id, ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_groups.py0000664000175000017500000013265000000000000024143 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import group as gp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserGroupTests: """Common default functionality for all system users.""" def test_user_can_list_groups(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: r = c.get('/v3/groups', headers=self.headers) self.assertEqual(1, len(r.json['groups'])) self.assertEqual(group['id'], r.json['groups'][0]['id']) def test_user_can_get_a_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: r = c.get('/v3/groups/%s' % group['id'], headers=self.headers) self.assertEqual(group['id'], r.json['group']['id']) def test_user_can_list_group_members(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: r = c.get( '/v3/groups/%s/users' % group['id'], headers=self.headers ) self.assertEqual(1, len(r.json['users'])) self.assertEqual(user['id'], r.json['users'][0]['id']) def test_user_can_list_groups_for_other_users(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: r = c.get('/v3/users/%s/groups' % user['id'], headers=self.headers) self.assertEqual(1, len(r.json['groups'])) self.assertEqual(group['id'], r.json['groups'][0]['id']) def test_user_can_check_if_user_in_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.get( '/v3/groups/{}/users/{}'.format(group['id'], user['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_cannot_get_non_existent_group_not_found(self): with self.test_client() as c: c.get( '/v3/groups/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) class _SystemAndDomainMemberAndReaderGroupTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'group': {'name': uuid.uuid4().hex, 'domain_id': domain['id']} } with self.test_client() as c: c.post( '/v3/groups', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) update = {'group': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/groups/%s' % group['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/groups/%s' % group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_add_users_to_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.put( '/v3/groups/{}/users/{}'.format(group['id'], user['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_remove_users_from_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.delete( '/v3/groups/{}/users/{}'.format(group['id'], user['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserGroupTests, _SystemAndDomainMemberAndReaderGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserGroupTests, _SystemAndDomainMemberAndReaderGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'group': {'name': uuid.uuid4().hex, 'domain_id': domain['id']} } with self.test_client() as c: c.post('/v3/groups', json=create, headers=self.headers) def test_user_can_update_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) update = {'group': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/groups/%s' % group['id'], json=update, headers=self.headers, ) def test_user_can_delete_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete('/v3/groups/%s' % group['id'], headers=self.headers) def test_user_can_add_users_to_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.put( '/v3/groups/{}/users/{}'.format(group['id'], user['id']), headers=self.headers, ) def test_user_can_remove_users_from_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.delete( '/v3/groups/{}/users/{}'.format(group['id'], user['id']), headers=self.headers, ) class _DomainUserGroupTests: def test_user_can_list_groups_in_domain(self): # second domain domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) # one group in new domain group1 = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) # one group in user's domain group2 = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) # user should only see one group with self.test_client() as c: r = c.get('/v3/groups', headers=self.headers) self.assertEqual(1, len(r.json['groups'])) self.assertNotIn(group1['id'], [g['id'] for g in r.json['groups']]) self.assertEqual(group2['id'], r.json['groups'][0]['id']) def test_user_cannot_list_groups_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: r = c.get( '/v3/groups?domain_id=%s' % domain['id'], headers=self.headers ) self.assertEqual(0, len(r.json['groups'])) def test_user_can_get_group_in_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) with self.test_client() as c: r = c.get('/v3/groups/%s' % group['id'], headers=self.headers) self.assertEqual(group['id'], r.json['group']['id']) def test_user_cannot_get_group_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/groups/%s' % group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_non_existent_group_forbidden(self): with self.test_client() as c: c.get( '/v3/groups/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_list_groups_in_domain_for_user_in_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: r = c.get('/v3/users/%s/groups' % user['id'], headers=self.headers) self.assertEqual(1, len(r.json['groups'])) self.assertEqual(group['id'], r.json['groups'][0]['id']) def test_user_cannot_list_groups_in_own_domain_user_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.get( '/v3/users/%s/groups' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_groups_for_non_existent_user_forbidden(self): with self.test_client() as c: c.get( '/v3/users/%s/groups' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_groups_in_other_domain_user_in_own_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) # one group in other domain group1 = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) # one group in own domain group2 = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user['id'], group2['id']) with self.test_client() as c: r = c.get('/v3/users/%s/groups' % user['id'], headers=self.headers) # only one group should be visible self.assertEqual(1, len(r.json['groups'])) self.assertEqual(group2['id'], r.json['groups'][0]['id']) def test_user_can_list_users_in_own_domain_for_group_in_own_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: r = c.get( '/v3/groups/%s/users' % group['id'], headers=self.headers ) self.assertEqual(1, len(r.json['users'])) self.assertEqual(user['id'], r.json['users'][0]['id']) def test_user_cannot_list_users_in_other_domain_group_in_own_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) # one user in other domain user1 = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) # one user in own domain user2 = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user1['id'], group['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], group['id']) with self.test_client() as c: r = c.get( '/v3/groups/%s/users' % group['id'], headers=self.headers ) # only one user should be visible self.assertEqual(1, len(r.json['users'])) self.assertEqual(user2['id'], r.json['users'][0]['id']) def test_user_cannot_list_users_in_own_domain_group_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.get( '/v3/groups/%s/users' % group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_users_in_non_existent_group_forbidden(self): with self.test_client() as c: c.get( '/v3/groups/%s/users' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_check_user_in_own_domain_group_in_own_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.head( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) c.get( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_cannot_check_user_in_other_domain_group_in_own_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.head( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) c.get( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class DomainReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserGroupTests, _SystemAndDomainMemberAndReaderGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(cmurphy): Remove this once the deprecated policies in # keystone.common.policies.group have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_group': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:list_groups': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:list_groups_for_user': gp.SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_USER_OR_OWNER, 'identity:list_users_in_group': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:check_user_in_group': gp.SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP_USER, } f.write(jsonutils.dumps(overridden_policies)) class DomainMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserGroupTests, _SystemAndDomainMemberAndReaderGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(cmurphy): Remove this once the deprecated policies in # keystone.common.policies.group have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_group': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:list_groups': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:list_groups_for_user': gp.SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_USER_OR_OWNER, 'identity:list_users_in_group': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:check_user_in_group': gp.SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP_USER, } f.write(jsonutils.dumps(overridden_policies)) class DomainAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserGroupTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(cmurphy): Remove this once the deprecated policies in # keystone.common.policies.group have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_group': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:list_groups': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:list_groups_for_user': gp.SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_USER_OR_OWNER, 'identity:create_group': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:update_group': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:delete_group': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:list_users_in_group': gp.SYSTEM_READER_OR_DOMAIN_READER, 'identity:remove_user_from_group': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_FOR_TARGET_GROUP_USER, 'identity:check_user_in_group': gp.SYSTEM_READER_OR_DOMAIN_READER_FOR_TARGET_GROUP_USER, 'identity:add_user_to_group': gp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_FOR_TARGET_GROUP_USER, } f.write(jsonutils.dumps(overridden_policies)) def test_user_can_create_group_for_own_domain(self): create = { 'group': {'name': uuid.uuid4().hex, 'domain_id': self.domain_id} } with self.test_client() as c: c.post('/v3/groups', json=create, headers=self.headers) def test_user_cannot_create_group_for_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'group': {'name': uuid.uuid4().hex, 'domain_id': domain['id']} } with self.test_client() as c: c.post( '/v3/groups', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_update_group_in_own_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) update = {'group': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/groups/%s' % group['id'], json=update, headers=self.headers, ) def test_user_cannot_update_group_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) update = {'group': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/groups/%s' % group['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_delete_group_in_own_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete('/v3/groups/%s' % group['id'], headers=self.headers) def test_user_cannot_delete_group_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/groups/%s' % group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_remove_user_in_own_domain_from_group_in_own_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.delete( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, ) def test_user_cannot_remove_user_other_domain_from_group_own_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.delete( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_remove_user_own_domain_from_group_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.delete( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_remove_non_existent_user_from_group_forbidden(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': uuid.uuid4().hex}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_remove_user_from_non_existent_group_forbidden(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/groups/%(group)s/users/%(user)s' % {'group': uuid.uuid4().hex, 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_add_user_in_own_domain_to_group_in_own_domain(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, ) def test_user_cannot_add_user_other_domain_to_group_own_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.put( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_add_user_own_domain_to_group_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_add_non_existent_user_to_group_forbidden(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/groups/%(group)s/users/%(user)s' % {'group': group['id'], 'user': uuid.uuid4().hex}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_add_user_from_non_existent_group_forbidden(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.put( '/v3/groups/%(group)s/users/%(user)s' % {'group': uuid.uuid4().hex, 'user': user['id']}, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=project['id'], ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=project['id'], ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_get_list_their_own_groups(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=self.domain_id) ) PROVIDERS.identity_api.add_user_to_group(self.user_id, group['id']) with self.test_client() as c: r = c.get( '/v3/users/%s/groups' % self.user_id, headers=self.headers ) self.assertEqual(1, len(r.json['groups'])) self.assertEqual(group['id'], r.json['groups'][0]['id']) def test_user_cannot_list_groups_for_other_users(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.get( '/v3/users/%s/groups' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_groups(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/groups', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_a_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/groups/%s' % group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_group_members(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.get( '/v3/groups/%s/users' % group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_check_if_user_in_group(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain['id']) ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) with self.test_client() as c: c.get( '/v3/groups/{}/users/{}'.format(group['id'], user['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_non_existent_group_forbidden(self): with self.test_client() as c: c.get( '/v3/groups/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_identity_providers.py0000664000175000017500000003363500000000000026555 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserIdentityProviderTests: """Common default functionality for all system users.""" def test_user_can_list_identity_providers(self): expected_idp_ids = [] idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) expected_idp_ids.append(idp['id']) with self.test_client() as c: r = c.get( '/v3/OS-FEDERATION/identity_providers', headers=self.headers ) for idp in r.json['identity_providers']: self.assertIn(idp['id'], expected_idp_ids) def test_user_can_get_an_identity_provider(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], headers=self.headers, ) class _SystemReaderAndMemberIdentityProviderTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_identity_providers(self): create = {'identity_provider': {'remote_ids': [uuid.uuid4().hex]}} with self.test_client() as c: c.put( '/v3/OS-FEDERATION/identity_providers/%s' % uuid.uuid4().hex, json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_identity_providers(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) update = {'identity_provider': {'enabled': False}} with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_identity_providers(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserIdentityProviderTests: """Common functionality for all domain and project users.""" def test_user_cannot_create_identity_providers(self): create = {'identity_provider': {'remote_ids': [uuid.uuid4().hex]}} with self.test_client() as c: c.put( '/v3/OS-FEDERATION/identity_providers/%s' % uuid.uuid4().hex, json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_identity_providers(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) update = {'identity_provider': {'enabled': False}} with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_identity_providers(self): PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/identity_providers', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_an_identity_provider(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_identity_providers(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserIdentityProviderTests, _SystemReaderAndMemberIdentityProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserIdentityProviderTests, _SystemReaderAndMemberIdentityProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserIdentityProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_identity_providers(self): create = {'identity_provider': {'remote_ids': [uuid.uuid4().hex]}} with self.test_client() as c: c.put( '/v3/OS-FEDERATION/identity_providers/%s' % uuid.uuid4().hex, json=create, headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_can_update_identity_providers(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) update = {'identity_provider': {'enabled': False}} with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], json=update, headers=self.headers, ) def test_user_can_delete_identity_providers(self): idp = PROVIDERS.federation_api.create_idp( uuid.uuid4().hex, unit.new_identity_provider_ref() ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/identity_providers/%s' % idp['id'], headers=self.headers, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserIdentityProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserIdentityProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserIdentityProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_implied_roles.py0000664000175000017500000001753400000000000025456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _ImpliedRolesSetupMixin: def _create_test_roles(self): ref = unit.new_role_ref() role = PROVIDERS.role_api.create_role(ref['id'], ref) self.prior_role_id = role['id'] ref = unit.new_role_ref() role = PROVIDERS.role_api.create_role(ref['id'], ref) self.implied_role_id = role['id'] class _SystemUserImpliedRoleTests: """Common default functionality for all system users.""" def test_user_can_list_implied_roles(self): PROVIDERS.role_api.create_implied_role( self.prior_role_id, self.implied_role_id ) with self.test_client() as c: r = c.get( '/v3/roles/%s/implies' % self.prior_role_id, headers=self.headers, ) self.assertEqual(1, len(r.json['role_inference']['implies'])) def test_user_can_get_an_implied_role(self): PROVIDERS.role_api.create_implied_role( self.prior_role_id, self.implied_role_id ) with self.test_client() as c: c.get( '/v3/roles/%s/implies/%s' % (self.prior_role_id, self.implied_role_id), headers=self.headers, ) c.head( '/v3/roles/%s/implies/%s' % (self.prior_role_id, self.implied_role_id), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_list_role_inference_rules(self): PROVIDERS.role_api.create_implied_role( self.prior_role_id, self.implied_role_id ) with self.test_client() as c: r = c.get('/v3/role_inferences', headers=self.headers) # There should be three role inferences: two from the defaults and # one from the test setup self.assertEqual(3, len(r.json['role_inferences'])) class _SystemReaderAndMemberImpliedRoleTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_implied_roles(self): with self.test_client() as c: c.put( '/v3/roles/%s/implies/%s' % (self.prior_role_id, self.implied_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_implied_roles(self): PROVIDERS.role_api.create_implied_role( self.prior_role_id, self.implied_role_id ) with self.test_client() as c: c.delete( '/v3/roles/%s/implies/%s' % (self.prior_role_id, self.implied_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ImpliedRolesSetupMixin, _SystemUserImpliedRoleTests, _SystemReaderAndMemberImpliedRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self._create_test_roles() system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ImpliedRolesSetupMixin, _SystemUserImpliedRoleTests, _SystemReaderAndMemberImpliedRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self._create_test_roles() system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ImpliedRolesSetupMixin, _SystemUserImpliedRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self._create_test_roles() # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_implied_roles(self): with self.test_client() as c: c.put( '/v3/roles/%s/implies/%s' % (self.prior_role_id, self.implied_role_id), headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_can_delete_implied_roles(self): PROVIDERS.role_api.create_implied_role( self.prior_role_id, self.implied_role_id ) with self.test_client() as c: c.delete( '/v3/roles/%s/implies/%s' % (self.prior_role_id, self.implied_role_id), headers=self.headers, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_limits.py0000664000175000017500000007035200000000000024125 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def _create_limits_and_dependencies(domain_id=None): """Create limits and its dependencies for testing.""" if not domain_id: domain_id = CONF.identity.default_domain_id service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) registered_limit = registered_limits[0] domain_limit = unit.new_limit_ref( domain_id=domain_id, service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=10, id=uuid.uuid4().hex, ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain_id) ) project_limit = unit.new_limit_ref( project_id=project['id'], service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, id=uuid.uuid4().hex, ) limits = PROVIDERS.unified_limit_api.create_limits( [domain_limit, project_limit] ) project_limit_id = None domain_limit_id = None for limit in limits: if limit.get('domain_id'): domain_limit_id = limit['id'] else: project_limit_id = limit['id'] return (project_limit_id, domain_limit_id) class _UserLimitTests: """Common default functionality for all users except system admins.""" def test_user_can_get_limit_model(self): with self.test_client() as c: c.get('/v3/limits/model', headers=self.headers) def test_user_can_get_a_limit(self): limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: r = c.get('/v3/limits/%s' % limit_id, headers=self.headers) self.assertEqual(limit_id, r.json['limit']['id']) def test_user_can_list_limits(self): project_limit_id, domain_limit_id = _create_limits_and_dependencies() with self.test_client() as c: r = c.get('/v3/limits', headers=self.headers) self.assertTrue(len(r.json['limits']) == 2) result = [] for limit in r.json['limits']: result.append(limit['id']) self.assertIn(project_limit_id, result) self.assertIn(domain_limit_id, result) def test_user_cannot_create_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = ( PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) ) registered_limit = registered_limits[0] project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) create = { 'limits': [ unit.new_limit_ref( project_id=project['id'], service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, ) ] } with self.test_client() as c: c.post( '/v3/limits', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_limits(self): limit_id, _ = _create_limits_and_dependencies() update = {'limits': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/limits/%s' % limit_id, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_limits(self): limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: c.delete( '/v3/limits/%s' % limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserLimitTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserLimitTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_get_a_limit(self): limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: r = c.get('/v3/limits/%s' % limit_id, headers=self.headers) self.assertEqual(limit_id, r.json['limit']['id']) def test_user_can_list_limits(self): project_limit_id, domain_limit_id = _create_limits_and_dependencies() with self.test_client() as c: r = c.get('/v3/limits', headers=self.headers) self.assertTrue(len(r.json['limits']) == 2) result = [] for limit in r.json['limits']: result.append(limit['id']) self.assertIn(project_limit_id, result) self.assertIn(domain_limit_id, result) def test_user_can_create_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = ( PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) ) registered_limit = registered_limits[0] project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) create = { 'limits': [ unit.new_limit_ref( project_id=project['id'], service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, ) ] } with self.test_client() as c: c.post('/v3/limits', json=create, headers=self.headers) def test_user_can_update_limits(self): limit_id, _ = _create_limits_and_dependencies() update = {'limits': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/limits/%s' % limit_id, json=update, headers=self.headers ) def test_user_can_delete_limits(self): limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: c.delete('/v3/limits/%s' % limit_id, headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_get_project_limits_within_domain(self): project_limit_id, _ = _create_limits_and_dependencies( domain_id=self.domain_id ) with self.test_client() as c: c.get('/v3/limits/%s' % project_limit_id, headers=self.headers) def test_user_can_get_domain_limits(self): _, domain_limit_id = _create_limits_and_dependencies( domain_id=self.domain_id ) with self.test_client() as c: r = c.get('/v3/limits/%s' % domain_limit_id, headers=self.headers) self.assertEqual(self.domain_id, r.json['limit']['domain_id']) def test_user_cannot_get_project_limit_outside_domain(self): project_limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: c.get( '/v3/limits/%s' % project_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_domain_limits_for_other_domain(self): _, domain_limit_id = _create_limits_and_dependencies() with self.test_client() as c: c.get( '/v3/limits/%s' % domain_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_list_limits_within_domain(self): project_limit_id, domain_limit_id = _create_limits_and_dependencies( domain_id=self.domain_id ) with self.test_client() as c: r = c.get('/v3/limits', headers=self.headers) result = [] for limit in r.json['limits']: result.append(limit['id']) self.assertEqual(2, len(r.json['limits'])) self.assertIn(project_limit_id, result) self.assertIn(domain_limit_id, result) def test_user_cannot_list_limits_outside_domain(self): _create_limits_and_dependencies() with self.test_client() as c: r = c.get('/v3/limits', headers=self.headers) self.assertEqual(0, len(r.json['limits'])) def test_user_cannot_create_limits_for_domain(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = ( PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) ) registered_limit = registered_limits[0] create = { 'limits': [ unit.new_limit_ref( domain_id=self.domain_id, service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, ) ] } with self.test_client() as c: c.post( '/v3/limits', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_limits_for_other_domain(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = ( PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) ) registered_limit = registered_limits[0] create = { 'limits': [ unit.new_limit_ref( domain_id=CONF.identity.default_domain_id, service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, ) ] } with self.test_client() as c: c.post( '/v3/limits', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_limits_for_projects_in_domain(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = ( PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) ) registered_limit = registered_limits[0] project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) create = { 'limits': [ unit.new_limit_ref( project_id=project['id'], service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, ) ] } with self.test_client() as c: c.post( '/v3/limits', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_limits_for_projects_outside_domain(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = ( PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) ) registered_limit = registered_limits[0] project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) create = { 'limits': [ unit.new_limit_ref( project_id=project['id'], service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, ) ] } with self.test_client() as c: c.post( '/v3/limits', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_limits_for_domain(self): _, domain_limit_id = _create_limits_and_dependencies( domain_id=self.domain_id ) update = {'limit': {'resource_limit': 1}} with self.test_client() as c: c.patch( '/v3/limits/%s' % domain_limit_id, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_limits_for_other_domain(self): _, domain_limit_id = _create_limits_and_dependencies() update = {'limit': {'resource_limit': 1}} with self.test_client() as c: c.patch( '/v3/limits/%s' % domain_limit_id, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_limits_for_projects_in_domain(self): project_limit_id, _ = _create_limits_and_dependencies( domain_id=self.domain_id ) update = {'limit': {'resource_limit': 1}} with self.test_client() as c: c.patch( '/v3/limits/%s' % project_limit_id, headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_limits_for_projects_outside_domain(self): project_limit_id, _ = _create_limits_and_dependencies() update = {'limit': {'resource_limit': 1}} with self.test_client() as c: c.patch( '/v3/limits/%s' % project_limit_id, headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_limits_for_domain(self): _, domain_limit_id = _create_limits_and_dependencies( domain_id=self.domain_id ) with self.test_client() as c: c.delete( '/v3/limits/%s' % domain_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_limits_for_other_domain(self): _, domain_limit_id = _create_limits_and_dependencies() with self.test_client() as c: c.delete( '/v3/limits/%s' % domain_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_limits_for_projects_in_domain(self): project_limit_id, _ = _create_limits_and_dependencies( domain_id=self.domain_id ) with self.test_client() as c: c.delete( '/v3/limits/%s' % project_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_limits_for_projects_outside_domain(self): project_limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: c.delete( '/v3/limits/%s' % project_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_get_project_limit(self): project_limit_id, _ = _create_limits_and_dependencies() limit = PROVIDERS.unified_limit_api.get_limit(project_limit_id) # NOTE(lbragstad): Project users are only allowed to list limits for a # project if they actually have a role assignment on the project and # call the API with a project-scoped token. PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=limit['project_id'], ) auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=limit['project_id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} with self.test_client() as c: r = c.get('/v3/limits/%s' % project_limit_id, headers=headers) def test_user_cannot_get_project_limit_without_role_assignment(self): project_limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: c.get( '/v3/limits/%s' % project_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_domain_limit(self): _, domain_limit_id = _create_limits_and_dependencies() with self.test_client() as c: c.get( '/v3/limits/%s' % domain_limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_list_limits(self): project_limit_id, _ = _create_limits_and_dependencies() limit = PROVIDERS.unified_limit_api.get_limit(project_limit_id) # NOTE(lbragstad): Project users are only allowed to list limits for a # project if they actually have a role assignment on the project and # call the API with a project-scoped token. PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=limit['project_id'], ) auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=limit['project_id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) token_id = r.headers['X-Subject-Token'] headers = {'X-Auth-Token': token_id} with self.test_client() as c: r = c.get('/v3/limits', headers=headers) self.assertTrue(len(r.json['limits']) == 1) self.assertEqual(project_limit_id, r.json['limits'][0]['id']) def test_user_cannot_list_limits_without_project_role_assignment(self): _create_limits_and_dependencies() with self.test_client() as c: r = c.get('/v3/limits', headers=self.headers) self.assertEqual(0, len(r.json['limits'])) def test_user_can_get_limit_model(self): with self.test_client() as c: c.get('/v3/limits/model', headers=self.headers) def test_user_cannot_create_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) registered_limits = ( PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) ) registered_limit = registered_limits[0] project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) create = { 'limits': [ unit.new_limit_ref( project_id=project['id'], service_id=service['id'], resource_name=registered_limit['resource_name'], resource_limit=5, ) ] } with self.test_client() as c: c.post( '/v3/limits', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_limits(self): limit_id, _ = _create_limits_and_dependencies() update = {'limits': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/limits/%s' % limit_id, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_limits(self): limit_id, _ = _create_limits_and_dependencies() with self.test_client() as c: c.delete( '/v3/limits/%s' % limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class ProjectUserTestsWithoutEnforceScope(ProjectUserTests): def setUp(self): super().setUp() self.config_fixture.config(group='oslo_policy', enforce_scope=False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_mappings.py0000664000175000017500000004165700000000000024450 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserMappingTests: """Common default functionality for all system users.""" def test_user_can_list_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: r = c.get('/v3/OS-FEDERATION/mappings', headers=self.headers) self.assertEqual(1, len(r.json['mappings'])) self.assertEqual(mapping['id'], r.json['mappings'][0]['id']) def test_user_can_get_a_mapping(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], headers=self.headers, ) class _SystemReaderAndMemberUserMappingTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_mappings(self): create = { 'mapping': { 'id': uuid.uuid4().hex, 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ], } } mapping_id = create['mapping']['id'] with self.test_client() as c: c.put( '/v3/OS-FEDERATION/mappings/%s' % mapping_id, json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) update = { 'mapping': { 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ] } } with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserMappingTests: def test_user_cannot_create_mappings(self): create = { 'mapping': { 'id': uuid.uuid4().hex, 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ], } } mapping_id = create['mapping']['id'] with self.test_client() as c: c.put( '/v3/OS-FEDERATION/mappings/%s' % mapping_id, json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/mappings', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_a_mapping(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) update = { 'mapping': { 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ] } } with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserMappingTests, _SystemReaderAndMemberUserMappingTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_cannot_create_mappings(self): create = { 'mapping': { 'id': uuid.uuid4().hex, 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ], } } mapping_id = create['mapping']['id'] with self.test_client() as c: c.put( '/v3/OS-FEDERATION/mappings/%s' % mapping_id, json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) update = { 'mapping': { 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ] } } with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserMappingTests, _SystemReaderAndMemberUserMappingTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserMappingTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_mappings(self): create = { 'mapping': { 'id': uuid.uuid4().hex, 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ], } } mapping_id = create['mapping']['id'] with self.test_client() as c: c.put( '/v3/OS-FEDERATION/mappings/%s' % mapping_id, json=create, headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_can_update_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) update = { 'mapping': { 'rules': [ { 'local': [{'user': {'name': '{0}'}}], 'remote': [{'type': 'UserName'}], } ] } } with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], json=update, headers=self.headers, ) def test_user_can_delete_mappings(self): mapping = unit.new_mapping_ref() mapping = PROVIDERS.federation_api.create_mapping( mapping['id'], mapping ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/mappings/%s' % mapping['id'], headers=self.headers, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserMappingTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserMappingTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserMappingTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicitly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_policy.py0000664000175000017500000003401600000000000024120 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import json import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserPoliciesTests: """Common default functionality for all system users.""" def test_user_can_list_policies(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) with self.test_client() as c: r = c.get('/v3/policies', headers=self.headers) policies = [] for policy in r.json['policies']: policies.append(policy['id']) self.assertIn(policy['id'], policies) def test_user_can_get_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) with self.test_client() as c: c.get('/v3/policies/%s' % policy['id'], headers=self.headers) class _SystemReaderAndMemberPoliciesTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_policy(self): create = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, # Store serialized JSON data as the blob to mimic real world usage. 'blob': json.dumps( { 'data': uuid.uuid4().hex, } ), 'type': uuid.uuid4().hex, } with self.test_client() as c: c.post( '/v3/policies', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) update = {'policy': {'name': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/policies/%s' % policy['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) with self.test_client() as c: c.delete( '/v3/policies/%s' % policy['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserPolicyTests: def test_user_cannot_list_policies(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) with self.test_client() as c: c.get( '/v3/policies', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) with self.test_client() as c: c.get( '/v3/policies/%s' % policy['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_policy(self): create = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, # Store serialized JSON data as the blob to mimic real world usage. 'blob': json.dumps( { 'data': uuid.uuid4().hex, } ), 'type': uuid.uuid4().hex, } with self.test_client() as c: c.post( '/v3/policies', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) update = {'policy': {'name': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/policies/%s' % policy['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) with self.test_client() as c: c.delete( '/v3/policies/%s' % policy['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserPoliciesTests, _SystemReaderAndMemberPoliciesTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserPoliciesTests, _SystemReaderAndMemberPoliciesTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserPoliciesTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_policy(self): create = { 'policy': { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, # Store serialized JSON data as the blob to mimic real world # usage. 'blob': json.dumps( { 'data': uuid.uuid4().hex, } ), 'type': uuid.uuid4().hex, } } with self.test_client() as c: c.post('/v3/policies', json=create, headers=self.headers) def test_user_can_update_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) update = {'policy': {'name': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/policies/%s' % policy['id'], json=update, headers=self.headers, ) def test_user_can_delete_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) with self.test_client() as c: c.delete('/v3/policies/%s' % policy['id'], headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserPolicyTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserPolicyTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserPolicyTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_policy_association.py0000664000175000017500000007063100000000000026517 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserPoliciesAssociationTests: """Common default functionality for all system users.""" def test_user_can_check_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], endpoint['id'] ) with self.test_client() as c: c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_check_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], service_id=service['id'] ) with self.test_client() as c: c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_check_policy_association_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], service_id=service['id'], region_id=region['id'] ) with self.test_client() as c: c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_get_policy_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], endpoint['id'] ) with self.test_client() as c: c.get( '/v3/endpoints/%s/OS-ENDPOINT-POLICY/policy' % (endpoint['id']), headers=self.headers, ) def test_user_list_endpoints_for_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], endpoint['id'] ) with self.test_client() as c: r = c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints' % (policy['id']), headers=self.headers, ) for endpoint_itr in r.json['endpoints']: self.assertIn(endpoint['id'], endpoint_itr['id']) class _SystemReaderAndMemberPoliciesAssociationTests: def test_user_cannot_create_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_policy_assoc_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy_assoc_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserPolicyAssociationsTests: def test_user_cannot_check_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], endpoint['id'] ) with self.test_client() as c: c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_check_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], service_id=service['id'] ) with self.test_client() as c: c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_check_policy_association_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], service_id=service['id'], region_id=region['id'] ) with self.test_client() as c: c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_policy_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], endpoint['id'] ) with self.test_client() as c: c.get( '/v3/endpoints/%s/OS-ENDPOINT-POLICY/policy' % (endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_endpoints_for_policy(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.endpoint_policy_api.create_policy_association( policy['id'], endpoint['id'] ) with self.test_client() as c: c.get( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints' % (policy['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_policy_assoc_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_policy_assoc_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserPoliciesAssociationTests, _SystemReaderAndMemberPoliciesAssociationTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserPoliciesAssociationTests, _SystemReaderAndMemberPoliciesAssociationTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserPoliciesAssociationTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_delete_policy_association_for_endpoint(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/endpoints/%s' % (policy['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_create_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_delete_policy_association_for_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s' % (policy['id'], service['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_create_policy_association_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.put( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_delete_policy_association_for_region_and_service(self): policy = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy(policy['id'], policy) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.delete( '/v3/policies/%s/OS-ENDPOINT-POLICY/services/%s/regions/%s' % (policy['id'], service['id'], region['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserPolicyAssociationsTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserPolicyAssociationsTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserPolicyAssociationsTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_project_endpoint.py0000664000175000017500000004503000000000000026165 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import base as bp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserProjectEndpointTests: """Common default functionality for all system users.""" def test_user_can_list_projects_for_endpoint(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.catalog_api.add_endpoint_to_project( endpoint['id'], project['id'] ) with self.test_client() as c: r = c.get( '/v3/OS-EP-FILTER/endpoints/%s/projects' % endpoint['id'], headers=self.headers, ) for project_itr in r.json['projects']: self.assertIn(project['id'], project_itr['id']) def test_user_can_check_endpoint_in_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.catalog_api.add_endpoint_to_project( endpoint['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/projects/%s/endpoints/%s' % (project['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_list_endpoints_for_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.catalog_api.add_endpoint_to_project( endpoint['id'], project['id'] ) with self.test_client() as c: r = c.get( '/v3/OS-EP-FILTER/projects/%s/endpoints' % project['id'], headers=self.headers, ) for endpoint_itr in r.json['endpoints']: self.assertIn(endpoint['id'], endpoint_itr['id']) class _SystemReaderAndMemberProjectEndpointTests: def test_user_cannot_add_endpoint_to_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.put( '/v3/OS-EP-FILTER/projects/%s/endpoints/%s' % (project['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_remove_endpoint_from_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/projects/%s/endpoints/%s' % (project['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserProjectEndpointTests: def test_user_cannot_list_projects_for_endpoint(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.catalog_api.add_endpoint_to_project( endpoint['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/endpoints/%s/projects' % endpoint['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_check_endpoint_in_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.catalog_api.add_endpoint_to_project( endpoint['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/projects/%s/endpoints/%s' % (project['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_endpoints_for_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.catalog_api.add_endpoint_to_project( endpoint['id'], project['id'] ) with self.test_client() as c: c.get( '/v3/OS-EP-FILTER/projects/%s/endpoints' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserProjectEndpointTests, _SystemReaderAndMemberProjectEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserProjectEndpointTests, _SystemReaderAndMemberProjectEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserProjectEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_add_endpoint_to_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) with self.test_client() as c: c.put( '/v3/OS-EP-FILTER/projects/%s/endpoints/%s' % (project['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_remove_endpoint_from_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) endpoint = unit.new_endpoint_ref(service['id'], region_id=None) endpoint = PROVIDERS.catalog_api.create_endpoint( endpoint['id'], endpoint ) PROVIDERS.catalog_api.add_endpoint_to_project( endpoint['id'], project['id'] ) with self.test_client() as c: c.delete( '/v3/OS-EP-FILTER/projects/%s/endpoints/%s' % (project['id'], endpoint['id']), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserProjectEndpointTests, _SystemReaderAndMemberProjectEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserProjectEndpointTests, _SystemReaderAndMemberProjectEndpointTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserProjectEndpointTests, _SystemReaderAndMemberProjectEndpointTests, ): def _override_policy(self): # TODO(cmurphy): Remove this once the deprecated policies in # keystone.common.policies.project_endpoint have been removed. This is # only here to make sure we test the new policies instead of the # deprecated ones. Oslo.policy will OR deprecated policies with new # policies to maintain compatibility and give operators a chance to # update permissions or update policies without breaking users. This # will cause these specific tests to fail since we're trying to correct # this broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:list_projects_for_endpoint': bp.SYSTEM_READER, 'identity:add_endpoint_to_project': bp.SYSTEM_ADMIN, 'identity:check_endpoint_in_project': bp.SYSTEM_READER, 'identity:list_endpoints_for_project': bp.SYSTEM_READER, 'identity:remove_endpoint_from_project': bp.SYSTEM_ADMIN, } f.write(jsonutils.dumps(overridden_policies)) def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() # Explicity set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_project_tags.py0000664000175000017500000010443300000000000025306 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import project as pp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def _override_policy(policy_file): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.project have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(policy_file, 'w') as f: overridden_policies = { 'identity:get_project_tag': ( pp.SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER ), 'identity:list_project_tags': ( pp.SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER ), 'identity:create_project_tag': ( pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN ), 'identity:update_project_tags': ( pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN ), 'identity:delete_project_tag': ( pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN ), 'identity:delete_project_tags': ( pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN_OR_PROJECT_ADMIN ), } f.write(jsonutils.dumps(overridden_policies)) class _SystemUserTests: def test_user_can_get_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.get( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_list_project_tags(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: r = c.get( '/v3/projects/%s/tags' % project['id'], headers=self.headers ) self.assertTrue(len(r.json['tags']) == 1) self.assertEqual(tag, r.json['tags'][0]) class _SystemMemberAndReaderTagTests: def test_user_cannot_create_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) update = {"tags": [uuid.uuid4().hex]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % project['id'], headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.delete( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserTagTests: def test_user_cannot_create_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) update = {"tags": [uuid.uuid4().hex]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % project['id'], headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.delete( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, _SystemMemberAndReaderTagTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, _SystemMemberAndReaderTagTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_can_update_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) update = {"tags": [uuid.uuid4().hex]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % project['id'], headers=self.headers, json=update, expected_status_code=http.client.OK, ) def test_user_can_delete_project_tag(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.delete( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, ) class _DomainUserTagTests: def test_user_can_get_tag_for_project_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.get( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_list_tags_for_project_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: r = c.get( '/v3/projects/%s/tags' % project['id'], headers=self.headers ) self.assertTrue(len(r.json['tags']) == 1) self.assertEqual(tag, r.json['tags'][0]) def test_user_cannot_create_project_tag_outside_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_project_tag_outside_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) new_tag = uuid.uuid4().hex update = {"tags": [new_tag]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % project['id'], headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_project_tag_outside_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.delete( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_tag_for_project_outside_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.get( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_tags_for_project_outside_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.get( '/v3/projects/%s/tags' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainMemberAndReaderTagTests: def test_user_cannot_create_project_tag_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_project_tag_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) new_tag = uuid.uuid4().hex update = {"tags": [new_tag]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % project['id'], headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_project_tag_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.delete( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class DomainAdminUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserTagTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) _override_policy(self.policy_file_name) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_project_tag_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_can_update_project_tag_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) new_tag = uuid.uuid4().hex update = {"tags": [new_tag]} with self.test_client() as c: r = c.put( '/v3/projects/%s/tags' % project['id'], headers=self.headers, json=update, expected_status_code=http.client.OK, ) self.assertTrue(len(r.json['tags']) == 1) self.assertEqual(new_tag, r.json['tags'][0]) def test_user_can_delete_project_tag_in_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.delete( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, ) class DomainMemberUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserTagTests, _DomainMemberAndReaderTagTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) _override_policy(self.policy_file_name) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainReaderUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUserTagTests, _DomainMemberAndReaderTagTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) _override_policy(self.policy_file_name) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class _ProjectUserTagTests: def test_user_can_get_tag_for_project(self): tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(self.project_id, tag) with self.test_client() as c: c.get( f'/v3/projects/{self.project_id}/tags/{tag}', headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_list_tags_for_project(self): tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(self.project_id, tag) with self.test_client() as c: r = c.get( '/v3/projects/%s/tags' % self.project_id, headers=self.headers ) self.assertTrue(len(r.json['tags']) == 1) self.assertEqual(tag, r.json['tags'][0]) def test_user_cannot_create_tag_for_other_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex with self.test_client() as c: c.put( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_tag_for_other_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) update = {"tags": [uuid.uuid4().hex]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % project['id'], headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_tag_for_other_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.delete( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_tag_for_other_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.get( '/v3/projects/{}/tags/{}'.format(project['id'], tag), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_tags_for_other_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) with self.test_client() as c: c.get( '/v3/projects/%s/tags' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _ProjectMemberAndReaderTagTests: def test_user_cannot_create_project_tag(self): tag = uuid.uuid4().hex with self.test_client() as c: c.put( f'/v3/projects/{self.project_id}/tags/{tag}', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_project_tag(self): tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(self.project_id, tag) update = {"tags": [uuid.uuid4().hex]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % self.project_id, headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_project_tag(self): tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(self.project_id, tag) with self.test_client() as c: c.delete( f'/v3/projects/{self.project_id}/tags/{tag}', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ProjectUserTagTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) _override_policy(self.policy_file_name) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, project_id=self.bootstrapper.project_id, ) self.project_id = self.bootstrapper.project_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_project_tag(self): tag = uuid.uuid4().hex with self.test_client() as c: c.put( f'/v3/projects/{self.project_id}/tags/{tag}', headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_can_update_project_tag(self): tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(self.project_id, tag) update = {"tags": [uuid.uuid4().hex]} with self.test_client() as c: c.put( '/v3/projects/%s/tags' % self.project_id, headers=self.headers, json=update, expected_status_code=http.client.OK, ) def test_user_can_delete_project_tag(self): tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(self.project_id, tag) with self.test_client() as c: c.delete( f'/v3/projects/{self.project_id}/tags/{tag}', headers=self.headers, ) class ProjectMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ProjectUserTagTests, _ProjectMemberAndReaderTagTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) _override_policy(self.policy_file_name) self.config_fixture.config(group='oslo_policy', enforce_scope=True) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) self.project_id = project['id'] self.user_id = self.bootstrapper.admin_user_id PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _ProjectUserTagTests, _ProjectMemberAndReaderTagTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) _override_policy(self.policy_file_name) self.config_fixture.config(group='oslo_policy', enforce_scope=True) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) self.project_id = project['id'] self.user_id = self.bootstrapper.admin_user_id PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_projects.py0000664000175000017500000010504000000000000024446 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import project as pp from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserTests: """Common default functionality for all system users.""" def test_user_can_list_projects(self): PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: r = c.get('/v3/projects', headers=self.headers) self.assertEqual(2, len(r.json['projects'])) def test_user_can_list_projects_for_other_users(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref( CONF.identity.default_domain_id, id=uuid.uuid4().hex ) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: r = c.get( '/v3/users/%s/projects' % user['id'], headers=self.headers, ) self.assertEqual(1, len(r.json['projects'])) self.assertEqual(project['id'], r.json['projects'][0]['id']) def test_user_can_get_a_project(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: r = c.get('/v3/projects/%s' % project['id'], headers=self.headers) self.assertEqual(project['id'], r.json['project']['id']) def test_user_cannot_get_non_existent_project_not_found(self): with self.test_client() as c: c.get( '/v3/projects/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) class _SystemMemberAndReaderProjectTests: """Common default functionality for system members and system readers.""" def test_user_cannot_create_projects(self): create = { 'project': unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post( '/v3/projects', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % project['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_project_forbidden(self): update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete( '/v3/projects/%s' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_project_forbidden(self): with self.test_client() as c: c.delete( '/v3/projects/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainUsersTests: """Common default functionality for all domain users.""" def test_user_can_list_projects_within_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: r = c.get('/v3/projects', headers=self.headers) self.assertEqual(1, len(r.json['projects'])) self.assertEqual(project['id'], r.json['projects'][0]['id']) def test_user_cannot_list_projects_in_other_domain(self): PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: r = c.get('/v3/projects', headers=self.headers) self.assertEqual(0, len(r.json['projects'])) def test_user_can_get_a_project_within_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: r = c.get('/v3/projects/%s' % project['id'], headers=self.headers) self.assertEqual(project['id'], r.json['project']['id']) def test_user_cannot_get_a_project_in_other_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.get( '/v3/projects/%s' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_list_projects_for_user_in_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(self.domain_id, id=uuid.uuid4().hex) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: r = c.get( '/v3/users/%s/projects' % user['id'], headers=self.headers ) self.assertEqual(1, len(r.json['projects'])) self.assertEqual(project['id'], r.json['projects'][0]['id']) def test_user_cannot_list_projects_for_user_in_other_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref( CONF.identity.default_domain_id, id=uuid.uuid4().hex ) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/users/%s/projects' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainMemberAndReaderProjectTests: """Common default functionality for domain member and domain readers.""" def test_user_cannot_create_projects_within_domain(self): create = {'project': unit.new_project_ref(domain_id=self.domain_id)} with self.test_client() as c: c.post( '/v3/projects', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_projects_in_other_domains(self): create = { 'project': unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post( '/v3/projects', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_projects_within_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % project['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_projects_in_other_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % project['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_project_forbidden(self): update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_projects_within_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/projects/%s' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_projects_in_other_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete( '/v3/projects/%s' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_projects_forbidden(self): with self.test_client() as c: c.delete( '/v3/projects/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, _SystemMemberAndReaderProjectTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, _SystemMemberAndReaderProjectTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_projects(self): create = { 'project': unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post('/v3/projects', json=create, headers=self.headers) def test_user_can_update_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % project['id'], json=update, headers=self.headers, ) def test_user_can_update_non_existent_project_not_found(self): update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_delete_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete('/v3/projects/%s' % project['id'], headers=self.headers) def test_user_can_delete_non_existent_project_not_found(self): with self.test_client() as c: c.delete( '/v3/projects/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_list_their_projects(self): other_project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) user_project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=user_project['id'], ) with self.test_client() as c: r = c.get( '/v3/users/%s/projects' % self.user_id, headers=self.headers, ) self.assertEqual(2, len(r.json['projects'])) project_ids = [] for project in r.json['projects']: project_ids.append(project['id']) self.assertIn(user_project['id'], project_ids) self.assertIn(self.bootstrapper.project_id, project_ids) self.assertNotIn(other_project['id'], project_ids) class DomainReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUsersTests, _DomainMemberAndReaderProjectTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUsersTests, _DomainMemberAndReaderProjectTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainUsersTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.project have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_project': ( pp.SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER ), 'identity:list_user_projects': ( pp.SYSTEM_READER_OR_DOMAIN_READER_OR_OWNER ), 'identity:list_projects': (pp.SYSTEM_READER_OR_DOMAIN_READER), 'identity:create_project': pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:update_project': pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:delete_project': pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, } f.write(jsonutils.dumps(overridden_policies)) def test_user_can_create_projects_within_domain(self): create = {'project': unit.new_project_ref(domain_id=self.domain_id)} with self.test_client() as c: c.post('/v3/projects', json=create, headers=self.headers) def test_user_cannot_create_projects_in_other_domains(self): create = { 'project': unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post( '/v3/projects', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_update_projects_within_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % project['id'], json=update, headers=self.headers, ) def test_user_cannot_update_projects_in_other_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % project['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_project_forbidden(self): # Because domain users operate outside of system scope, we can't # confidently return a Not Found here because they aren't system users. # The best we can do is return a Forbidden because we need the # project's domain in order to resolve the policy check, and the # project doesn't exist. This errors on the side of opacity and returns # a 403 instead of a 404. update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_delete_projects_within_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete('/v3/projects/%s' % project['id'], headers=self.headers) def test_user_cannot_delete_projects_in_other_domain(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete( '/v3/projects/%s' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_projects_forbidden(self): # Because domain users operate outside of system scope, we can't # confidently return a Not Found here because they aren't system users. # The best we can do is return a Forbidden because we need the # project's domain in order to resolve the policy check, and the # project doesn't exist. This errors on the side of opacity and returns # a 403 instead of a 404. with self.test_client() as c: c.delete( '/v3/projects/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.bootstrapper.project_id, ) self.project_id = self.bootstrapper.project_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.project have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_project': ( pp.SYSTEM_READER_OR_DOMAIN_READER_OR_PROJECT_USER ), 'identity:list_user_projects': ( pp.SYSTEM_READER_OR_DOMAIN_READER_OR_OWNER ), 'identity:list_projects': (pp.SYSTEM_READER_OR_DOMAIN_READER), 'identity:create_project': pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:update_project': pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:delete_project': pp.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, } f.write(jsonutils.dumps(overridden_policies)) def test_user_cannot_list_projects(self): # This test is assuming the user calling the API has a role assignment # on the project created by ``keystone-manage bootstrap``. with self.test_client() as c: c.get( '/v3/projects', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_projects_for_others(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref( CONF.identity.default_domain_id, id=uuid.uuid4().hex ) ) project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) with self.test_client() as c: c.get( '/v3/users/%s/projects' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_list_their_projects(self): # Users can get this information from the GET /v3/auth/projects API or # the GET /v3/users/{user_id}/projects API. The GET /v3/projects API is # administrative, reserved for system and domain users. with self.test_client() as c: r = c.get( '/v3/users/%s/projects' % self.user_id, headers=self.headers, ) self.assertEqual(1, len(r.json['projects'])) self.assertEqual(self.project_id, r.json['projects'][0]['id']) def test_user_can_get_their_project(self): with self.test_client() as c: c.get('/v3/projects/%s' % self.project_id, headers=self.headers) def test_user_cannot_get_other_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.get( '/v3/projects/%s' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_projects(self): create = { 'project': unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) } with self.test_client() as c: c.post( '/v3/projects', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % project['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_project_forbidden(self): update = {'project': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/projects/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_projects(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) with self.test_client() as c: c.delete( '/v3/projects/%s' % project['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_project_forbidden(self): with self.test_client() as c: c.delete( '/v3/projects/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_protocols.py0000664000175000017500000004172300000000000024650 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _CommonUtilities: def _create_protocol_and_deps(self): identity_provider = unit.new_identity_provider_ref() identity_provider = PROVIDERS.federation_api.create_idp( identity_provider['id'], identity_provider ) mapping = PROVIDERS.federation_api.create_mapping( uuid.uuid4().hex, unit.new_mapping_ref() ) protocol = unit.new_protocol_ref(mapping_id=mapping['id']) protocol = PROVIDERS.federation_api.create_protocol( identity_provider['id'], protocol['id'], protocol ) return (protocol, mapping, identity_provider) class _SystemUserProtocolTests: """Common default functionality for all system users.""" def test_user_can_list_protocols(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/%s/protocols' % identity_provider['id'] ) r = c.get(path, headers=self.headers) self.assertEqual(1, len(r.json['protocols'])) self.assertEqual(protocol['id'], r.json['protocols'][0]['id']) def test_user_can_get_a_protocol(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.get(path, headers=self.headers) class _SystemReaderAndMemberProtocolTests: def test_user_cannot_create_protocols(self): identity_provider = unit.new_identity_provider_ref() identity_provider = PROVIDERS.federation_api.create_idp( identity_provider['id'], identity_provider ) mapping = PROVIDERS.federation_api.create_mapping( uuid.uuid4().hex, unit.new_mapping_ref() ) protocol_id = 'saml2' create = {'protocol': {'mapping_id': mapping['id']}} with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol_id, ) ) c.put( path, json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_protocols(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() new_mapping = PROVIDERS.federation_api.create_mapping( uuid.uuid4().hex, unit.new_mapping_ref() ) update = {'protocol': {'mapping_id': new_mapping['id']}} with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.patch( path, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_protocol(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserProtocolTests: def test_user_cannot_create_protocols(self): identity_provider = unit.new_identity_provider_ref() identity_provider = PROVIDERS.federation_api.create_idp( identity_provider['id'], identity_provider ) mapping = PROVIDERS.federation_api.create_mapping( uuid.uuid4().hex, unit.new_mapping_ref() ) protocol_id = 'saml2' create = {'protocol': {'mapping_id': mapping['id']}} with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol_id, ) ) c.put( path, json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_protocols(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() new_mapping = PROVIDERS.federation_api.create_mapping( uuid.uuid4().hex, unit.new_mapping_ref() ) update = {'protocol': {'mapping_id': new_mapping['id']}} with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.patch( path, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_protocol(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.delete( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_protocols(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/%s/protocols' % identity_provider['id'] ) c.get( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_a_protocol(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.get( path, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUtilities, _SystemUserProtocolTests, _SystemReaderAndMemberProtocolTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUtilities, _SystemUserProtocolTests, _SystemReaderAndMemberProtocolTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUtilities, _SystemUserProtocolTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_protocols(self): identity_provider = unit.new_identity_provider_ref() identity_provider = PROVIDERS.federation_api.create_idp( identity_provider['id'], identity_provider ) mapping = PROVIDERS.federation_api.create_mapping( uuid.uuid4().hex, unit.new_mapping_ref() ) protocol_id = 'saml2' create = {'protocol': {'mapping_id': mapping['id']}} with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol_id, ) ) c.put( path, json=create, headers=self.headers, expected_status_code=http.client.CREATED, ) def test_user_can_update_protocols(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() new_mapping = PROVIDERS.federation_api.create_mapping( uuid.uuid4().hex, unit.new_mapping_ref() ) update = {'protocol': {'mapping_id': new_mapping['id']}} with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.patch(path, json=update, headers=self.headers) def test_user_can_delete_protocol(self): protocol, mapping, identity_provider = self._create_protocol_and_deps() with self.test_client() as c: path = ( '/v3/OS-FEDERATION/identity_providers/{}/protocols/{}'.format( identity_provider['id'], protocol['id'], ) ) c.delete(path, headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUtilities, _DomainAndProjectUserProtocolTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUtilities, _DomainAndProjectUserProtocolTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUtilities, _DomainAndProjectUserProtocolTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_regions.py0000664000175000017500000003017300000000000024267 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _UserRegionTests: """Common default functionality for all users.""" def test_user_can_get_a_region(self): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.get('/v3/regions/%s' % region['id'], headers=self.headers) def test_user_can_list_regions(self): expected_regions = [] for _ in range(2): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) expected_regions.append(region['id']) with self.test_client() as c: r = c.get('/v3/regions', headers=self.headers) for region in r.json['regions']: self.assertIn(region['id'], expected_regions) class _SystemReaderAndMemberUserRegionTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_regions(self): create = {'region': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.post( '/v3/regions', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_regions(self): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: update = {'region': {'description': uuid.uuid4().hex}} c.patch( '/v3/regions/%s' % region['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_regions(self): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.delete( '/v3/regions/%s' % region['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserRegionTests: """Common default functionality for all domain and project users.""" def test_user_cannot_create_regions(self): create = {'region': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.post( '/v3/regions', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_regions(self): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: update = {'region': {'description': uuid.uuid4().hex}} c.patch( '/v3/regions/%s' % region['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_regions(self): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.delete( '/v3/regions/%s' % region['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegionTests, _SystemReaderAndMemberUserRegionTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegionTests, _SystemReaderAndMemberUserRegionTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegionTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_regions(self): create = {'region': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.post('/v3/regions', json=create, headers=self.headers) def test_user_can_update_regions(self): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: update = {'region': {'description': uuid.uuid4().hex}} c.patch( '/v3/regions/%s' % region['id'], json=update, headers=self.headers, ) def test_user_can_delete_regions(self): region = PROVIDERS.catalog_api.create_region(unit.new_region_ref()) with self.test_client() as c: c.delete('/v3/regions/%s' % region['id'], headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegionTests, _DomainAndProjectUserRegionTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegionTests, _DomainAndProjectUserRegionTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegionTests, _DomainAndProjectUserRegionTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_registered_limits.py0000664000175000017500000003466500000000000026351 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _UserRegisteredLimitTests: """Common default functionality for all users except system admins.""" def test_user_can_get_a_registered_limit(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: r = c.get( '/v3/registered_limits/%s' % limit_id, headers=self.headers ) self.assertEqual(limit_id, r.json['registered_limit']['id']) def test_user_can_list_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: r = c.get('/v3/registered_limits', headers=self.headers) self.assertTrue(len(r.json['registered_limits']) == 1) self.assertEqual(limit_id, r.json['registered_limits'][0]['id']) def test_user_cannot_create_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) create = { 'registered_limits': [ unit.new_registered_limit_ref(service_id=service['id']) ] } with self.test_client() as c: c.post( '/v3/registered_limits', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: update = {'registered_limit': {'default_limit': 5}} c.patch( '/v3/registered_limits/%s' % limit_id, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: c.delete( '/v3/registered_limits/%s' % limit_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegisteredLimitTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegisteredLimitTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_get_a_registered_limit(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: r = c.get( '/v3/registered_limits/%s' % limit_id, headers=self.headers ) self.assertEqual(limit_id, r.json['registered_limit']['id']) def test_user_can_list_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: r = c.get('/v3/registered_limits', headers=self.headers) self.assertTrue(len(r.json['registered_limits']) == 1) self.assertEqual(limit_id, r.json['registered_limits'][0]['id']) def test_user_can_create_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) create = { 'registered_limits': [ unit.new_registered_limit_ref(service_id=service['id']) ] } with self.test_client() as c: c.post('/v3/registered_limits', json=create, headers=self.headers) def test_user_can_update_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: update = {'registered_limit': {'default_limit': 5}} c.patch( '/v3/registered_limits/%s' % limit_id, json=update, headers=self.headers, ) def test_user_can_delete_registered_limits(self): service = PROVIDERS.catalog_api.create_service( uuid.uuid4().hex, unit.new_service_ref() ) registered_limit = unit.new_registered_limit_ref( service_id=service['id'], id=uuid.uuid4().hex ) limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) limit_id = limits[0]['id'] with self.test_client() as c: c.delete( '/v3/registered_limits/%s' % limit_id, headers=self.headers ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegisteredLimitTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegisteredLimitTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _UserRegisteredLimitTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_roles.py0000664000175000017500000003151700000000000023750 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserRoleTests: """Common default functionality for all system users.""" def test_user_can_list_roles(self): PROVIDERS.role_api.create_role(uuid.uuid4().hex, unit.new_role_ref()) with self.test_client() as c: r = c.get('/v3/roles', headers=self.headers) # With bootstrap setup and the role we just created, there should # be five roles present in the deployment. Bootstrap creates # ``service``, ``admin``, ``member``, and ``reader``. self.assertEqual(5, len(r.json['roles'])) def test_user_can_get_a_role(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) with self.test_client() as c: r = c.get('/v3/roles/%s' % role['id'], headers=self.headers) self.assertEqual(role['id'], r.json['role']['id']) class _SystemReaderAndMemberRoleTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_roles(self): create = {'role': unit.new_role_ref()} with self.test_client() as c: c.post( '/v3/roles', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) update = {'role': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/roles/%s' % role['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) with self.test_client() as c: c.delete( '/v3/roles/%s' % role['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserRoleTests: """Common functionality for all domain and project users.""" def test_user_cannot_list_roles(self): PROVIDERS.role_api.create_role(uuid.uuid4().hex, unit.new_role_ref()) with self.test_client() as c: c.get( '/v3/roles', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_a_role(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) with self.test_client() as c: c.get( '/v3/roles/%s' % role['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_roles(self): create = {'role': unit.new_role_ref()} with self.test_client() as c: c.post( '/v3/roles', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) update = {'role': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/roles/%s' % role['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) with self.test_client() as c: c.delete( '/v3/roles/%s' % role['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserRoleTests, _SystemReaderAndMemberRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserRoleTests, _SystemReaderAndMemberRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_roles(self): create = {'role': unit.new_role_ref()} with self.test_client() as c: c.post('/v3/roles', json=create, headers=self.headers) def test_user_can_update_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) update = {'role': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/roles/%s' % role['id'], json=update, headers=self.headers, ) def test_user_can_delete_roles(self): role = PROVIDERS.role_api.create_role( uuid.uuid4().hex, unit.new_role_ref() ) with self.test_client() as c: c.delete('/v3/roles/%s' % role['id'], headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserRoleTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_service_providers.py0000664000175000017500000003552200000000000026361 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserServiceProviderTests: """Common default functionality for all system users.""" def test_user_can_list_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) with self.test_client() as c: r = c.get( '/v3/OS-FEDERATION/service_providers', headers=self.headers ) self.assertEqual(1, len(r.json['service_providers'])) self.assertEqual( service_provider['id'], r.json['service_providers'][0]['id'] ) def test_user_can_get_a_service_provider(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) with self.test_client() as c: r = c.get( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, ) self.assertEqual( service_provider['id'], r.json['service_provider']['id'] ) class _SystemReaderAndMemberUserServiceProviderTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) service_provider = unit.new_service_provider_ref() create = {'service_provider': service_provider} with self.test_client() as c: c.put( '/v3/OS-FEDERATION/service_providers/%s' % uuid.uuid4().hex, headers=self.headers, json=create, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) update = {'service_provider': {'enabled': False}} with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserServiceProviderTests: """Common functionality for all domain and project users.""" def test_user_cannot_create_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) service_provider = unit.new_service_provider_ref() create = {'service_provider': service_provider} with self.test_client() as c: c.put( '/v3/OS-FEDERATION/service_providers/%s' % uuid.uuid4().hex, headers=self.headers, json=create, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) update = {'service_provider': {'enabled': False}} with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, json=update, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_service_providers(self): PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/service_providers', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_a_service_provider(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) with self.test_client() as c: c.get( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserServiceProviderTests, _SystemReaderAndMemberUserServiceProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserServiceProviderTests, _SystemReaderAndMemberUserServiceProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserServiceProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) service_provider = unit.new_service_provider_ref() create = {'service_provider': service_provider} with self.test_client() as c: c.put( '/v3/OS-FEDERATION/service_providers/%s' % uuid.uuid4().hex, headers=self.headers, json=create, expected_status_code=http.client.CREATED, ) def test_user_can_update_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) update = {'service_provider': {'enabled': False}} with self.test_client() as c: c.patch( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, json=update, ) def test_user_can_delete_service_providers(self): service_provider = PROVIDERS.federation_api.create_sp( uuid.uuid4().hex, unit.new_service_provider_ref() ) with self.test_client() as c: c.delete( '/v3/OS-FEDERATION/service_providers/%s' % service_provider['id'], headers=self.headers, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserServiceProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserServiceProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserServiceProviderTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_services.py0000664000175000017500000003277000000000000024451 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserServiceTests: """Common default functionality for all system users.""" def test_user_can_list_services(self): expected_service_ids = [] for _ in range(2): s = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(s['id'], s) expected_service_ids.append(service['id']) with self.test_client() as c: r = c.get('/v3/services', headers=self.headers) actual_service_ids = [] for service in r.json['services']: actual_service_ids.append(service['id']) for service_id in expected_service_ids: self.assertIn(service_id, actual_service_ids) def test_user_can_get_a_service(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) with self.test_client() as c: r = c.get('/v3/services/%s' % service['id'], headers=self.headers) self.assertEqual(r.json['service']['id'], service['id']) class _SystemReaderAndMemberUserServiceTests: """Common default functionality for system readers and system members.""" def test_user_cannot_create_services(self): create = { 'service': { 'type': uuid.uuid4().hex, 'name': uuid.uuid4().hex, } } with self.test_client() as c: c.post( '/v3/services', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_services(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) update = {'service': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/services/%s' % service['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_services(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) with self.test_client() as c: c.delete( '/v3/services/%s' % service['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserServiceTests: def test_user_cannot_create_services(self): create = { 'service': { 'type': uuid.uuid4().hex, 'name': uuid.uuid4().hex, } } with self.test_client() as c: c.post( '/v3/services', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_services(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) with self.test_client() as c: c.get( '/v3/services', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_a_service(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) with self.test_client() as c: c.get( '/v3/services/%s' % service['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_services(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) update = {'service': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/services/%s' % service['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_services(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) with self.test_client() as c: c.delete( '/v3/services/%s' % service['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserServiceTests, _SystemReaderAndMemberUserServiceTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserServiceTests, _SystemReaderAndMemberUserServiceTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserServiceTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) # Reuse the system administrator account created during # ``keystone-manage bootstrap`` self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_services(self): create = { 'service': { 'type': uuid.uuid4().hex, 'name': uuid.uuid4().hex, } } with self.test_client() as c: c.post('/v3/services', json=create, headers=self.headers) def test_user_can_update_services(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) update = {'service': {'description': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/services/%s' % service['id'], json=update, headers=self.headers, ) def test_user_can_delete_services(self): service = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service(service['id'], service) with self.test_client() as c: c.delete('/v3/services/%s' % service['id'], headers=self.headers) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserServiceTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserServiceTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTestsWithoutEnforceScope( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserServiceTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) # Explicityly set enforce_scope to False to make sure we maintain # backwards compatibility with project users. self.config_fixture.config(group='oslo_policy', enforce_scope=False) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) self.user_id = PROVIDERS.identity_api.create_user(user)['id'] self.project_id = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=domain['id']) )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=user['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_system_assignments.py0000664000175000017500000005541300000000000026564 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import base from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserSystemAssignmentTests: def test_user_can_list_user_system_role_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: r = c.get( '/v3/system/users/%s/roles' % user['id'], headers=self.headers ) self.assertEqual(1, len(r.json['roles'])) self.assertEqual( self.bootstrapper.member_role_id, r.json['roles'][0]['id'] ) def test_user_can_check_user_system_role_assignment(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.get( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_user_can_list_group_system_role_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: r = c.get( '/v3/system/groups/%s/roles' % group['id'], headers=self.headers, ) self.assertEqual(1, len(r.json['roles'])) self.assertEqual( self.bootstrapper.member_role_id, r.json['roles'][0]['id'] ) def test_user_can_check_group_system_role_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.get( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) class _SystemMemberAndReaderSystemAssignmentTests: def test_user_cannot_grant_system_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) with self.test_client() as c: c.put( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_system_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.delete( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_grant_group_system_assignment(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) with self.test_client() as c: c.put( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_group_system_assignment(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.delete( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainAndProjectUserSystemAssignmentTests: def test_user_cannot_list_system_role_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.get( '/v3/system/users/%s/roles' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_check_user_system_role_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.get( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_grant_system_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) with self.test_client() as c: c.put( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_system_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.delete( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_group_system_role_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.get( '/v3/system/groups/%s/roles' % group['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_check_group_system_role_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.get( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_grant_group_system_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) with self.test_client() as c: c.put( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_group_system_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.delete( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserSystemAssignmentTests, _SystemMemberAndReaderSystemAssignmentTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserSystemAssignmentTests, _SystemMemberAndReaderSystemAssignmentTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) self.expected = [ # assignment of the user running the test case { 'user_id': self.user_id, 'system': 'all', 'role_id': self.bootstrapper.member_role_id, } ] auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserSystemAssignmentTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id self.expected = [] auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_grant_system_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) with self.test_client() as c: c.put( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, ) def test_user_can_revoke_system_assignments(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.delete( '/v3/system/users/%s/roles/%s' % (user['id'], self.bootstrapper.member_role_id), headers=self.headers, ) def test_user_can_grant_group_system_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) with self.test_client() as c: c.put( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, ) def test_user_can_revoke_group_system_assignments(self): group = PROVIDERS.identity_api.create_group( unit.new_group_ref(CONF.identity.default_domain_id) ) PROVIDERS.assignment_api.create_system_grant_for_group( group['id'], self.bootstrapper.member_role_id ) with self.test_client() as c: c.delete( '/v3/system/groups/%s/roles/%s' % (group['id'], self.bootstrapper.member_role_id), headers=self.headers, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserSystemAssignmentTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.domain_user_id = PROVIDERS.identity_api.create_user(domain_user)[ 'id' ] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.domain_user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.domain_user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserSystemAssignmentTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project_reader = unit.new_user_ref(domain_id=self.domain_id) project_reader_id = PROVIDERS.identity_api.create_user(project_reader)[ 'id' ] project = unit.new_project_ref(domain_id=self.domain_id) project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=project_reader_id, project_id=project_id, ) auth = self.build_authentication_request( user_id=project_reader_id, password=project_reader['password'], project_id=project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserSystemAssignmentTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project_member = unit.new_user_ref(domain_id=self.domain_id) project_member_id = PROVIDERS.identity_api.create_user(project_member)[ 'id' ] project = unit.new_project_ref(domain_id=self.domain_id) project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=project_member_id, project_id=project_id, ) auth = self.build_authentication_request( user_id=project_member_id, password=project_member['password'], project_id=project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserSystemAssignmentTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project_admin = unit.new_user_ref(domain_id=self.domain_id) project_admin_id = PROVIDERS.identity_api.create_user(project_admin)[ 'id' ] project = unit.new_project_ref(domain_id=self.domain_id) project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=project_admin_id, project_id=project_id, ) auth = self.build_authentication_request( user_id=project_admin_id, password=project_admin['password'], project_id=project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.grants have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:check_system_grant_for_user': base.SYSTEM_READER, 'identity:list_system_grants_for_user': base.SYSTEM_READER, 'identity:create_system_grant_for_user': base.SYSTEM_ADMIN, 'identity:revoke_system_grant_for_user': base.SYSTEM_ADMIN, } f.write(jsonutils.dumps(overridden_policies)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_tokens.py0000664000175000017500000005414700000000000024133 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _SystemUserTokenTests: def test_user_can_validate_system_scoped_token(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.reader_role_id ) system_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], system=True ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=system_auth) system_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = system_token c.get('/v3/auth/tokens', headers=self.headers) def test_user_can_validate_domain_scoped_token(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) domain_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=domain_auth) domain_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = domain_token c.get('/v3/auth/tokens', headers=self.headers) def test_user_can_validate_project_scoped_token(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) project_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=project_auth) project_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = project_token c.get('/v3/auth/tokens', headers=self.headers) class _SystemMemberAndReaderTokenTests: def test_user_cannot_revoke_a_system_scoped_token(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.reader_role_id ) system_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], system=True ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=system_auth) system_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = system_token c.delete( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_a_domain_scoped_token(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) domain_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=domain_auth) domain_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = domain_token c.delete( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_a_project_scoped_token(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) project_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=project_auth) project_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = project_token c.delete( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTokenTests, _SystemMemberAndReaderTokenTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTokenTests, _SystemMemberAndReaderTokenTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _SystemUserTokenTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_revoke_a_system_scoped_token(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.reader_role_id ) system_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], system=True ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=system_auth) system_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = system_token c.delete('/v3/auth/tokens', headers=self.headers) def test_user_can_revoke_a_domain_scoped_token(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) domain_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=domain_auth) domain_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = domain_token c.delete('/v3/auth/tokens', headers=self.headers) def test_user_can_revoke_a_project_scoped_token(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) project_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=project_auth) project_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = project_token c.delete('/v3/auth/tokens', headers=self.headers) class _DomainAndProjectUserTests: def test_user_can_validate_their_own_tokens(self): with self.test_client() as c: self.headers['X-Subject-Token'] = self.token_id c.get('/v3/auth/tokens', headers=self.headers) def test_user_can_revoke_their_own_tokens(self): with self.test_client() as c: self.headers['X-Subject-Token'] = self.token_id c.delete('/v3/auth/tokens', headers=self.headers) def test_user_cannot_validate_system_scoped_token(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.reader_role_id ) system_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], system=True ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=system_auth) system_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = system_token c.get( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_system_scoped_token(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], self.bootstrapper.reader_role_id ) system_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], system=True ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=system_auth) system_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = system_token c.delete( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_validate_domain_scoped_token(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) domain_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=domain_auth) domain_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = domain_token c.get( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_domain_scoped_token(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = unit.new_user_ref(domain_id=domain['id']) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], domain_id=domain['id'], ) domain_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=domain_auth) domain_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = domain_token c.delete( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_validate_project_scoped_token(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) project_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=project_auth) project_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = project_token c.get( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_revoke_project_scoped_token(self): project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=CONF.identity.default_domain_id), ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user['id'] = PROVIDERS.identity_api.create_user(user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=user['id'], project_id=project['id'], ) project_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id'], ) with self.test_client() as c: r = c.post('/v3/auth/tokens', json=project_auth) project_token = r.headers['X-Subject-Token'] with self.test_client() as c: self.headers['X-Subject-Token'] = project_token c.delete( '/v3/auth/tokens', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class DomainUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.domain_user_id = PROVIDERS.identity_api.create_user(domain_user)[ 'id' ] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.domain_user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.domain_user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectUserTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _DomainAndProjectUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project_reader = unit.new_user_ref(domain_id=self.domain_id) project_reader_id = PROVIDERS.identity_api.create_user(project_reader)[ 'id' ] project = unit.new_project_ref(domain_id=self.domain_id) project_id = PROVIDERS.resource_api.create_project( project['id'], project )['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=project_reader_id, project_id=project_id, ) auth = self.build_authentication_request( user_id=project_reader_id, password=project_reader['password'], project_id=project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_trusts.py0000664000175000017500000011361000000000000024163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TrustTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin ): """Common functionality for all trust tests. Sets up trustor and trustee users and trust. """ def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] trustor_user = unit.new_user_ref(domain_id=self.domain_id) self.trustor_user_id = PROVIDERS.identity_api.create_user( trustor_user )['id'] trustee_user = unit.new_user_ref(domain_id=self.domain_id) self.trustee_user_id = PROVIDERS.identity_api.create_user( trustee_user )['id'] project = PROVIDERS.resource_api.create_project( uuid.uuid4().hex, unit.new_project_ref(domain_id=self.domain_id) ) self.project_id = project['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.trustor_user_id, project_id=self.project_id, ) PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.trustee_user_id, project_id=project['id'], ) self.trust_id = uuid.uuid4().hex self.trust_data = { 'trust': { 'trustor_user_id': self.trustor_user_id, 'trustee_user_id': self.trustee_user_id, 'project_id': self.project_id, 'impersonation': False, }, 'roles': [{"id": self.bootstrapper.member_role_id}], } auth = self.build_authentication_request( user_id=self.trustor_user_id, password=trustor_user['password'], project_id=project['id'], ) # Grab a token using the trustor persona we're testing and prepare # headers for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.trustor_headers = {'X-Auth-Token': self.token_id} auth = self.build_authentication_request( user_id=self.trustee_user_id, password=trustee_user['password'], project_id=project['id'], ) # Grab a token using the trustee persona we're testing and prepare # headers for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.trustee_headers = {'X-Auth-Token': self.token_id} def _override_policy_old_defaults(self): # TODO(cmurphy): This is to simulate what would happen if the operator # had generated a sample policy config, or had never removed their old # policy files since we adopted policy in code, and had explicitly # retained the old "" policy check strings. Remove this once the # hardcoded enforcement is removed from the trusts API. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:list_trusts': '', 'identity:delete_trust': '', 'identity:get_trust': '', 'identity:list_roles_for_trust': '', 'identity:get_role_for_trust': '', } f.write(jsonutils.dumps(overridden_policies)) class _AdminTestsMixin: """Tests for all admin users. This exercises both the is_admin user and users granted the admin role on the system scope. """ def test_admin_cannot_create_trust_for_other_user(self): json = {'trust': self.trust_data['trust']} json['trust']['roles'] = self.trust_data['roles'] with self.test_client() as c: c.post( '/v3/OS-TRUST/trusts', json=json, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_admin_list_all_trusts(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get('/v3/OS-TRUST/trusts', headers=self.headers) self.assertEqual(1, len(r.json['trusts'])) class AdminTokenTests(TrustTests, _AdminTestsMixin): """Tests for the is_admin user. The Trusts API has hardcoded is_admin checks that we need to ensure are preserved through the system-scope transition. """ def setUp(self): super().setUp() self.config_fixture.config(admin_token='ADMIN') self.headers = {'X-Auth-Token': 'ADMIN'} def test_admin_can_delete_trust_for_other_user(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.headers, expected_status_code=http.client.NO_CONTENT, ) def test_admin_can_get_non_existent_trust_not_found(self): trust_id = uuid.uuid4().hex with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % trust_id, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_admin_cannot_get_trust_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % self.trust_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_admin_cannot_list_trust_roles_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_admin_cannot_get_trust_role_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _SystemUserTests: """Tests for system admin, member, and reader.""" def test_user_can_get_non_existent_trust(self): trust_id = uuid.uuid4().hex with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % trust_id, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_get_trust_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s' % self.trust_id, headers=self.headers ) self.assertEqual(r.json['trust']['id'], self.trust_id) def test_user_can_list_trusts_for_trustee(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id ), headers=self.headers, ) def test_user_can_list_trusts_for_trustor(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustor_user_id=%s' % self.trustor_user_id ), headers=self.headers, ) def test_user_can_list_trust_roles_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.headers, ) self.assertEqual( r.json['roles'][0]['id'], self.bootstrapper.member_role_id ) def test_user_can_get_trust_role_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.headers, ) class _SystemReaderMemberTests(_SystemUserTests): """Tests for system readers and members.""" def test_user_cannot_create_trust(self): json = {'trust': self.trust_data['trust']} json['trust']['roles'] = self.trust_data['roles'] with self.test_client() as c: c.post( '/v3/OS-TRUST/trusts', json=json, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_trust(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests(TrustTests, _SystemReaderMemberTests): """Tests for system reader users.""" def setUp(self): super().setUp() self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests(TrustTests, _SystemReaderMemberTests): """Tests for system member users.""" def setUp(self): super().setUp() self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests(TrustTests, _AdminTestsMixin, _SystemUserTests): """Tests for system admin users.""" def setUp(self): super().setUp() self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_admin_can_delete_trust_for_other_user(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.headers ) def test_admin_cannot_delete_trust_for_user_overridden_defaults(self): # only the is_admin admin can do this self._override_policy_old_defaults() ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_admin_cannot_get_trust_for_other_user_overridden_defaults(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % self.trust_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_admin_cannot_list_roles_for_other_user_overridden_defaults(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_admin_cannot_get_trust_role_for_other_user_overridden(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_list_all_trusts_overridden_defaults(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get('/v3/OS-TRUST/trusts', headers=self.headers) self.assertEqual(1, len(r.json['trusts'])) class ProjectUserTests(TrustTests): """Tests for all project users.""" def setUp(self): super().setUp() other_user = unit.new_user_ref(domain_id=self.domain_id) self.other_user_id = PROVIDERS.identity_api.create_user(other_user)[ 'id' ] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.other_user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.other_user_id, password=other_user['password'], project_id=self.project_id, ) # Grab a token using another persona who has no trusts associated with # them with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.other_headers = {'X-Auth-Token': self.token_id} def test_user_can_list_trusts_of_whom_they_are_the_trustor(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( ( '/v3/OS-TRUST/trusts?trustor_user_id=%s' % self.trustor_user_id ), headers=self.trustor_headers, ) self.assertEqual(1, len(r.json['trusts'])) self.assertEqual(self.trust_id, r.json['trusts'][0]['id']) def test_user_can_list_trusts_delegated_to_them(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( ( '/v3/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id ), headers=self.trustee_headers, ) self.assertEqual(1, len(r.json['trusts'])) self.assertEqual(self.trust_id, r.json['trusts'][0]['id']) def test_trustor_cannot_list_trusts_for_trustee(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id ), headers=self.trustor_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustee_cannot_list_trusts_for_trustor(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustor_user_id=%s' % self.trustor_user_id ), headers=self.trustee_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_trusts_for_other_trustor(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustor_user_id=%s' % self.trustor_user_id ), headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_trusts_for_other_trustee(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id ), headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_all_trusts(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts', headers=self.trustee_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_another_users_trust(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_get_non_existent_trust_not_found(self): trust_id = uuid.uuid4().hex with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % trust_id, headers=self.other_headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_get_trust_of_whom_they_are_the_trustor(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustor_headers, ) def test_user_can_get_trust_delegated_to_them(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustee_headers, ) self.assertEqual(r.json['trust']['id'], self.trust_id) def test_trustor_can_create_trust(self): json = {'trust': self.trust_data['trust']} json['trust']['roles'] = self.trust_data['roles'] with self.test_client() as c: c.post( '/v3/OS-TRUST/trusts', json=json, headers=self.trustor_headers ) def test_trustee_cannot_create_trust(self): json = {'trust': self.trust_data['trust']} json['trust']['roles'] = self.trust_data['roles'] with self.test_client() as c: c.post( '/v3/OS-TRUST/trusts', json=json, headers=self.trustee_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustor_can_delete_trust(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustor_headers, ) def test_trustee_cannot_delete_trust(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustee_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_trust_for_other_user(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustor_can_list_trust_roles(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.trustor_headers, ) self.assertEqual( r.json['roles'][0]['id'], self.bootstrapper.member_role_id ) def test_trustee_can_list_trust_roles(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.trustee_headers, ) self.assertEqual( r.json['roles'][0]['id'], self.bootstrapper.member_role_id ) def test_user_cannot_list_trust_roles_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustor_can_get_trust_role(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.head( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.trustor_headers, ) def test_trustee_can_get_trust_role(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.head( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.trustee_headers, ) def test_user_cannot_get_trust_role_for_other_user(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.head( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustor_cannot_list_trusts_for_trustee_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id ), headers=self.trustor_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustee_cannot_list_trusts_for_trustor_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustor_user_id=%s' % self.trustor_user_id ), headers=self.trustee_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_trusts_for_other_trustor_overridden(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustor_user_id=%s' % self.trustor_user_id ), headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_trusts_for_trustee_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id ), headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_all_trusts_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts', headers=self.trustee_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustor_can_delete_trust_overridden_default(self): self._override_policy_old_defaults() ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustor_headers, ) def test_trustee_cannot_delete_trust_overridden_default(self): self._override_policy_old_defaults() ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustee_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_trust_for_other_user_overridden_default(self): self._override_policy_old_defaults() ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_get_trust_of_whom_they_are_the_trustor_overridden(self): self._override_policy_old_defaults() ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustor_headers, ) def test_user_can_get_trust_delegated_to_them_overridden_default(self): self._override_policy_old_defaults() ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.trustee_headers, ) self.assertEqual(r.json['trust']['id'], self.trust_id) def test_trustor_can_list_trust_roles_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.trustor_headers, ) self.assertEqual( r.json['roles'][0]['id'], self.bootstrapper.member_role_id ) def test_trustee_can_list_trust_roles_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: r = c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.trustee_headers, ) self.assertEqual( r.json['roles'][0]['id'], self.bootstrapper.member_role_id ) def test_user_cannot_list_trust_roles_other_user_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustor_can_get_trust_role_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.head( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.trustor_headers, ) def test_trustee_can_get_trust_role_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.head( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.trustee_headers, ) def test_user_cannot_get_trust_role_other_user_overridden_default(self): self._override_policy_old_defaults() PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.head( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.other_headers, expected_status_code=http.client.FORBIDDEN, ) class DomainUserTests(TrustTests): """Tests for all domain users. Domain users should not be able to interact with trusts at all. """ def setUp(self): super().setUp() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using another persona who has no trusts associated with # them with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_trustor_cannot_list_trusts_for_trustee(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_trustee_cannot_list_trusts_for_trustor(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( ( '/v3/OS-TRUST/trusts?trustor_user_id=%s' % self.trustor_user_id ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_all_trusts(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts', headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_trust(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_get_non_existent_trust_not_found(self): trust_id = uuid.uuid4().hex with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s' % trust_id, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_cannot_create_trust(self): trust_data = self.trust_data['trust'] trust_data['trustor_user_id'] = self.user_id json = {'trust': trust_data} json['trust']['roles'] = self.trust_data['roles'] with self.test_client() as c: c.post( '/v3/OS-TRUST/trusts', json=json, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_trust(self): ref = PROVIDERS.trust_api.create_trust( self.trust_id, **self.trust_data ) with self.test_client() as c: c.delete( '/v3/OS-TRUST/trusts/%s' % ref['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_trust_roles(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.get( '/v3/OS-TRUST/trusts/%s/roles' % self.trust_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_trust_role(self): PROVIDERS.trust_api.create_trust(self.trust_id, **self.trust_data) with self.test_client() as c: c.head( ( '/v3/OS-TRUST/trusts/%s/roles/%s' % (self.trust_id, self.bootstrapper.member_role_id) ), headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/protection/v3/test_users.py0000664000175000017500000011076000000000000023763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_serialization import jsonutils from keystone.common.policies import user as up from keystone.common import provider_api import keystone.conf from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import base_classes from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class _CommonUserTests: """Common default functionality for all users.""" def test_user_can_get_their_own_user_reference(self): with self.test_client() as c: r = c.get('/v3/users/%s' % self.user_id, headers=self.headers) self.assertEqual(self.user_id, r.json['user']['id']) class _SystemUserTests: """Common default functionality for all system users.""" def test_user_can_get_other_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: r = c.get('/v3/users/%s' % user['id'], headers=self.headers) self.assertEqual(user['id'], r.json['user']['id']) def test_user_cannot_get_non_existent_user_not_found(self): with self.test_client() as c: c.get( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_list_users(self): expected_user_ids = [] for _ in range(3): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) expected_user_ids.append(user['id']) with self.test_client() as c: r = c.get('/v3/users', headers=self.headers) returned_user_ids = [] for user in r.json['users']: returned_user_ids.append(user['id']) for user_id in expected_user_ids: self.assertIn(user_id, returned_user_ids) class _SystemMemberAndReaderUserTests: """Common functionality for system readers and system members.""" def test_user_cannot_create_users(self): create = { 'user': { 'name': uuid.uuid4().hex, 'domain': CONF.identity.default_domain_id, } } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: update = {'user': {'email': uuid.uuid4().hex}} c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_user_forbidden(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _DomainUserTests: """Commont default functionality for all domain users.""" def test_user_can_get_user_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: r = c.get('/v3/users/%s' % user['id'], headers=self.headers) self.assertEqual(user['id'], r.json['user']['id']) def test_user_cannot_get_user_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_list_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: r = c.get('/v3/users', headers=self.headers) self.assertEqual(2, len(r.json['users'])) user_ids = [] for user in r.json['users']: user_ids.append(user['id']) self.assertIn(self.user_id, user_ids) self.assertIn(user['id'], user_ids) def test_user_cannot_list_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: r = c.get('/v3/users', headers=self.headers) user_ids = [] for u in r.json['users']: user_ids.append(u['id']) self.assertNotIn(user['id'], user_ids) class _DomainMemberAndReaderUserTests: """Functionality for all domain members and domain readers.""" def test_user_cannot_create_users_within_domain(self): create = { 'user': {'domain_id': self.domain_id, 'name': uuid.uuid4().hex} } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': {'domain_id': domain['id'], 'name': uuid.uuid4().hex} } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_user_forbidden(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class _ProjectUserTests: """Common tests cases for all project users.""" def test_user_cannot_get_users_within_their_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.get( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_get_non_existent_user_forbidden(self): with self.test_client() as c: c.get( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_users_within_domain(self): with self.test_client() as c: c.get( '/v3/users?domain_id=%s' % self.domain_id, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_list_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.get( '/v3/users?domain_id=%s' % domain['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_users_within_domain(self): create = { 'user': {'domain_id': self.domain_id, 'name': uuid.uuid4().hex} } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': {'domain_id': domain['id'], 'name': uuid.uuid4().hex} } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_user_forbidden(self): update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_users_in_other_domains(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class SystemReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _SystemUserTests, _SystemMemberAndReaderUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_reader = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_reader)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.reader_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_reader['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _SystemUserTests, _SystemMemberAndReaderUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) system_member = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.user_id = PROVIDERS.identity_api.create_user(system_member)['id'] PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.bootstrapper.member_role_id ) auth = self.build_authentication_request( user_id=self.user_id, password=system_member['password'], system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class SystemAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _SystemUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, system=True, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def test_user_can_create_users(self): create = { 'user': { 'name': uuid.uuid4().hex, 'domain': CONF.identity.default_domain_id, } } with self.test_client() as c: c.post('/v3/users', json=create, headers=self.headers) def test_user_can_update_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers ) def test_user_cannot_update_non_existent_user_not_found(self): update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) def test_user_can_delete_users(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=CONF.identity.default_domain_id) ) with self.test_client() as c: c.delete('/v3/users/%s' % user['id'], headers=self.headers) def test_user_cannot_delete_non_existent_user_not_found(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.NOT_FOUND, ) class DomainReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _DomainUserTests, _DomainMemberAndReaderUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_reader = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_reader)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_reader['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _DomainUserTests, _DomainMemberAndReaderUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_user = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_user)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_user['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class DomainAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _DomainUserTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] domain_admin = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(domain_admin)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.admin_role_id, user_id=self.user_id, domain_id=self.domain_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=domain_admin['password'], domain_id=self.domain_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.users have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will apply a logical OR to deprecated policies with # new policies to maintain compatibility and give operators a chance to # update permissions or update policies without breaking users. This # will cause these specific tests to fail since we're trying to correct # this broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER, 'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER, 'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, } f.write(jsonutils.dumps(overridden_policies)) def test_user_can_create_users_within_domain(self): create = { 'user': {'domain_id': self.domain_id, 'name': uuid.uuid4().hex} } with self.test_client() as c: c.post('/v3/users', json=create, headers=self.headers) def test_user_cannot_create_users_within_domain_hyphened_domain_id(self): # Finally, show that we can create a new user without any surprises. # But if we specify a 'domain-id' instead of a 'domain_id', we get a # Forbidden response because we fail a policy check before # normalization occurs. domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': {'domain-id': domain['id'], 'name': uuid.uuid4().hex} } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_create_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) create = { 'user': {'domain_id': domain['id'], 'name': uuid.uuid4().hex} } with self.test_client() as c: c.post( '/v3/users', json=create, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_update_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers ) def test_user_can_update_users_within_domain_hyphened_domain_id(self): # If we try updating the user's 'domain_id' by specifying a # 'domain-id', then it'll be stored into extras rather than normalized, # and the user's actual 'domain_id' is not affected. domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) update = {'user': {'domain-id': domain['id']}} with self.test_client() as c: r = c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers ) self.assertEqual(domain['id'], r.json['user']['domain-id']) self.assertEqual(self.domain_id, r.json['user']['domain_id']) def test_user_cannot_update_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % user['id'], json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_update_non_existent_user_forbidden(self): update = {'user': {'email': uuid.uuid4().hex}} with self.test_client() as c: c.patch( '/v3/users/%s' % uuid.uuid4().hex, json=update, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_can_delete_users_within_domain(self): user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=self.domain_id) ) with self.test_client() as c: c.delete('/v3/users/%s' % user['id'], headers=self.headers) def test_user_cannot_delete_users_in_other_domain(self): domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) user = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain['id']) ) with self.test_client() as c: c.delete( '/v3/users/%s' % user['id'], headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) def test_user_cannot_delete_non_existent_user_forbidden(self): with self.test_client() as c: c.delete( '/v3/users/%s' % uuid.uuid4().hex, headers=self.headers, expected_status_code=http.client.FORBIDDEN, ) class ProjectReaderTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _ProjectUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project = unit.new_project_ref(domain_id=self.domain_id) project = PROVIDERS.resource_api.create_project(project['id'], project) self.project_id = project['id'] project_reader = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(project_reader)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.reader_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_reader['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectMemberTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _ProjectUserTests, ): def setUp(self): super().setUp() self.loadapp() self.useFixture(ksfixtures.Policy(self.config_fixture)) self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] project = unit.new_project_ref(domain_id=self.domain_id) project = PROVIDERS.resource_api.create_project(project['id'], project) self.project_id = project['id'] project_member = unit.new_user_ref(domain_id=self.domain_id) self.user_id = PROVIDERS.identity_api.create_user(project_member)['id'] PROVIDERS.assignment_api.create_grant( self.bootstrapper.member_role_id, user_id=self.user_id, project_id=self.project_id, ) auth = self.build_authentication_request( user_id=self.user_id, password=project_member['password'], project_id=self.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} class ProjectAdminTests( base_classes.TestCaseWithBootstrap, common_auth.AuthTestMixin, _CommonUserTests, _ProjectUserTests, ): def setUp(self): super().setUp() self.loadapp() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( ksfixtures.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self._override_policy() self.config_fixture.config(group='oslo_policy', enforce_scope=True) domain = PROVIDERS.resource_api.create_domain( uuid.uuid4().hex, unit.new_domain_ref() ) self.domain_id = domain['id'] self.user_id = self.bootstrapper.admin_user_id auth = self.build_authentication_request( user_id=self.user_id, password=self.bootstrapper.admin_password, project_id=self.bootstrapper.project_id, ) # Grab a token using the persona we're testing and prepare headers # for requests we'll be making in the tests. with self.test_client() as c: r = c.post('/v3/auth/tokens', json=auth) self.token_id = r.headers['X-Subject-Token'] self.headers = {'X-Auth-Token': self.token_id} def _override_policy(self): # TODO(lbragstad): Remove this once the deprecated policies in # keystone.common.policies.users have been removed. This is only # here to make sure we test the new policies instead of the deprecated # ones. Oslo.policy will OR deprecated policies with new policies to # maintain compatibility and give operators a chance to update # permissions or update policies without breaking users. This will # cause these specific tests to fail since we're trying to correct this # broken behavior with better scope checking. with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:get_user': up.SYSTEM_READER_OR_DOMAIN_READER_OR_USER, 'identity:list_users': up.SYSTEM_READER_OR_DOMAIN_READER, 'identity:create_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:update_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, 'identity:delete_user': up.SYSTEM_ADMIN_OR_DOMAIN_ADMIN, } f.write(jsonutils.dumps(overridden_policies)) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5541131 keystone-26.0.0/keystone/tests/unit/0000775000175000017500000000000000000000000017445 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/__init__.py0000664000175000017500000000122400000000000021555 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.tests.unit.core import * # noqa ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5541131 keystone-26.0.0/keystone/tests/unit/application_credential/0000775000175000017500000000000000000000000024142 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/application_credential/__init__.py0000664000175000017500000000000000000000000026241 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.5541131 keystone-26.0.0/keystone/tests/unit/application_credential/backends/0000775000175000017500000000000000000000000025714 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/application_credential/backends/__init__.py0000664000175000017500000000000000000000000030013 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/application_credential/backends/test_sql.py0000664000175000017500000000522400000000000030127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.application_credential.backends import sql as sql_driver from keystone.common import provider_api from keystone.common import sql from keystone.tests.unit.application_credential import test_backends from keystone.tests.unit.backend import core_sql from keystone.tests.unit.ksfixtures import database PROVIDERS = provider_api.ProviderAPIs class SQLModelTestCase(core_sql.BaseBackendSqlModels): def test_application_credential_model(self): cols = ( ('internal_id', sql.Integer, None), ('id', sql.String, 64), ('name', sql.String, 255), ('secret_hash', sql.String, 255), ('description', sql.Text, None), ('user_id', sql.String, 64), ('project_id', sql.String, 64), ('system', sql.String, 64), ('expires_at', sql.DateTimeInt, None), ) self.assertExpectedSchema('application_credential', cols) def test_application_credential_role_model(self): cols = ( ('application_credential_id', sql.Integer, None), ('role_id', sql.String, 64), ) self.assertExpectedSchema('application_credential_role', cols) def test_access_rule_model(self): cols = ( ('id', sql.Integer, None), ('external_id', sql.String, 64), ('user_id', sql.String, 64), ('service', sql.String, 64), ('path', sql.String, 128), ('method', sql.String, 16), ) self.assertExpectedSchema('access_rule', cols) def test_application_credential_access_rule_model(self): cols = ( ('application_credential_id', sql.Integer, None), ('access_rule_id', sql.Integer, None), ) self.assertExpectedSchema('application_credential_access_rule', cols) class SQLDriverTestCase( core_sql.BaseBackendSqlTests, test_backends.ApplicationCredentialTests ): def setUp(self): self.useFixture(database.Database()) self.driver = sql_driver.ApplicationCredential() super().setUp() self.app_cred_api = PROVIDERS.application_credential_api ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/application_credential/test_backends.py0000664000175000017500000004211500000000000027330 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_config import fixture as config_fixture from oslo_utils import timeutils from keystone.common import driver_hints from keystone.common import provider_api import keystone.conf from keystone import exception CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class ApplicationCredentialTests: def _new_app_cred_data( self, user_id, project_id=None, name=None, expires=None, system=None ): if not name: name = uuid.uuid4().hex if not expires: expires = timeutils.utcnow() + datetime.timedelta(days=365) if not system: system = uuid.uuid4().hex if not project_id: project_id = uuid.uuid4().hex app_cred_data = { 'id': uuid.uuid4().hex, 'name': name, 'description': uuid.uuid4().hex, 'user_id': user_id, 'project_id': project_id, 'system': system, 'expires_at': expires, 'roles': [ {'id': self.role__member_['id']}, ], 'secret': uuid.uuid4().hex, 'unrestricted': False, } return app_cred_data def test_create_application_credential(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) resp = self.app_cred_api.create_application_credential(app_cred) resp_roles = resp.pop('roles') orig_roles = app_cred.pop('roles') self.assertDictEqual(app_cred, resp) self.assertEqual(orig_roles[0]['id'], resp_roles[0]['id']) def test_create_duplicate_application_credential_fails(self): # Ensure a user can't create two application credentials with the same # name app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) name = app_cred['name'] self.app_cred_api.create_application_credential(app_cred) app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], name=name ) self.assertRaises( exception.Conflict, self.app_cred_api.create_application_credential, app_cred, ) def test_create_application_credential_require_role_assignments(self): # Ensure a user can't create an application credential for a project # they don't have a role assignment on app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_baz['id'] ) self.assertRaises( exception.RoleAssignmentNotFound, self.app_cred_api.create_application_credential, app_cred, ) def test_application_credential_allow_recursion(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) app_cred['unrestricted'] = True resp = self.app_cred_api.create_application_credential(app_cred) resp.pop('roles') app_cred.pop('roles') self.assertDictEqual(app_cred, resp) def test_application_credential_limits(self): config_fixture_ = self.user = self.useFixture(config_fixture.Config()) config_fixture_.config(group='application_credential', user_limit=2) app_cred = self._new_app_cred_data( self.user_foo['id'], self.project_bar['id'] ) self.app_cred_api.create_application_credential(app_cred) app_cred['name'] = 'two' self.app_cred_api.create_application_credential(app_cred) app_cred['name'] = 'three' self.assertRaises( exception.ApplicationCredentialLimitExceeded, self.app_cred_api.create_application_credential, app_cred, ) def test_create_application_credential_with_access_rules(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) app_cred['access_rules'] = [ { 'id': uuid.uuid4().hex, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ] resp = self.app_cred_api.create_application_credential(app_cred) resp.pop('roles') resp_access_rules = resp.pop('access_rules') app_cred.pop('roles') orig_access_rules = app_cred.pop('access_rules') self.assertDictEqual(app_cred, resp) for i, ar in enumerate(resp_access_rules): self.assertDictEqual(orig_access_rules[i], ar) def test_create_application_credential_with_preexisting_access_rules(self): app_cred_1 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) app_cred_1['access_rules'] = [ { 'id': uuid.uuid4().hex, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ] resp = self.app_cred_api.create_application_credential(app_cred_1) resp_access_rules_1 = resp.pop('access_rules') app_cred_2 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) app_cred_2['access_rules'] = [{'id': resp_access_rules_1[0]['id']}] resp = self.app_cred_api.create_application_credential(app_cred_2) resp_access_rules_2 = resp.pop('access_rules') self.assertDictEqual(resp_access_rules_1[0], resp_access_rules_2[0]) def test_get_application_credential(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) create_resp = self.app_cred_api.create_application_credential(app_cred) app_cred_id = create_resp['id'] get_resp = self.app_cred_api.get_application_credential(app_cred_id) create_resp.pop('secret') self.assertDictEqual(create_resp, get_resp) def test_get_application_credential_not_found(self): self.assertRaises( exception.ApplicationCredentialNotFound, self.app_cred_api.get_application_credential, uuid.uuid4().hex, ) def test_list_application_credentials(self): app_cred_1 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], name='app1' ) app_cred_2 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], name='app2' ) app_cred_3 = self._new_app_cred_data( self.user_two['id'], project_id=self.project_baz['id'], name='app3' ) resp1 = self.app_cred_api.create_application_credential(app_cred_1) resp2 = self.app_cred_api.create_application_credential(app_cred_2) resp3 = self.app_cred_api.create_application_credential(app_cred_3) hints = driver_hints.Hints() resp = self.app_cred_api.list_application_credentials( self.user_foo['id'], hints ) resp_ids = [ac['id'] for ac in resp] self.assertIn(resp1['id'], resp_ids) self.assertIn(resp2['id'], resp_ids) self.assertNotIn(resp3['id'], resp_ids) for ac in resp: self.assertNotIn('secret_hash', ac) def _list_ids(self, user): hints = driver_hints.Hints() resp = self.app_cred_api.list_application_credentials( user['id'], hints ) return [ac['id'] for ac in resp] def test_delete_application_credential(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) self.app_cred_api.create_application_credential(app_cred) # cache the information self.app_cred_api.get_application_credential(app_cred['id']) self.assertIn(app_cred['id'], self._list_ids(self.user_foo)) self.app_cred_api.delete_application_credential(app_cred['id']) self.assertNotIn(app_cred['id'], self._list_ids(self.user_foo)) # the cache information has been invalidated. self.assertRaises( exception.ApplicationCredentialNotFound, self.app_cred_api.get_application_credential, app_cred['id'], ) def test_delete_application_credential_not_found(self): self.assertRaises( exception.ApplicationCredentialNotFound, self.app_cred_api.delete_application_credential, uuid.uuid4().hex, ) def test_deleting_a_user_deletes_application_credentials(self): app_cred_1 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], name='app1' ) app_cred_2 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], name='app2' ) self.app_cred_api.create_application_credential(app_cred_1) self.app_cred_api.create_application_credential(app_cred_2) self.assertIn(app_cred_1['id'], self._list_ids(self.user_foo)) self.assertIn(app_cred_2['id'], self._list_ids(self.user_foo)) # cache the information self.app_cred_api.get_application_credential(app_cred_1['id']) self.app_cred_api.get_application_credential(app_cred_2['id']) # This should trigger a notification which should invoke a callback in # the application credential Manager to cleanup user_foo's application # credentials. PROVIDERS.identity_api.delete_user(self.user_foo['id']) hints = driver_hints.Hints() self.assertListEqual( [], self.app_cred_api.list_application_credentials( self.user_foo['id'], hints ), ) # the cache information has been invalidated. self.assertRaises( exception.ApplicationCredentialNotFound, self.app_cred_api.get_application_credential, app_cred_1['id'], ) self.assertRaises( exception.ApplicationCredentialNotFound, self.app_cred_api.get_application_credential, app_cred_2['id'], ) def test_removing_user_from_project_deletes_application_credentials(self): app_cred_proj_A_1 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], name='app1' ) app_cred_proj_A_2 = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], name='app2' ) app_cred_proj_B = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_baz['id'], name='app3' ) PROVIDERS.assignment_api.add_role_to_user_and_project( project_id=self.project_baz['id'], user_id=self.user_foo['id'], role_id=self.role__member_['id'], ) self.app_cred_api.create_application_credential(app_cred_proj_A_1) self.app_cred_api.create_application_credential(app_cred_proj_A_2) self.app_cred_api.create_application_credential(app_cred_proj_B) self.assertIn(app_cred_proj_A_1['id'], self._list_ids(self.user_foo)) self.assertIn(app_cred_proj_A_2['id'], self._list_ids(self.user_foo)) self.assertIn(app_cred_proj_B['id'], self._list_ids(self.user_foo)) # cache the information self.app_cred_api.get_application_credential(app_cred_proj_A_1['id']) self.app_cred_api.get_application_credential(app_cred_proj_A_2['id']) self.app_cred_api.get_application_credential(app_cred_proj_B['id']) # This should trigger a notification which should invoke a callback in # the application credential Manager to cleanup all of user_foo's # application credentials on project bar. PROVIDERS.assignment_api.remove_role_from_user_and_project( user_id=self.user_foo['id'], project_id=self.project_bar['id'], role_id=self.role__member_['id'], ) self.assertNotIn( app_cred_proj_A_1['id'], self._list_ids(self.user_foo) ) self.assertNotIn( app_cred_proj_A_2['id'], self._list_ids(self.user_foo) ) self.assertIn(app_cred_proj_B['id'], self._list_ids(self.user_foo)) # the cache information has been invalidated only for the deleted # application credential. self.assertRaises( exception.ApplicationCredentialNotFound, self.app_cred_api.get_application_credential, app_cred_proj_A_1['id'], ) self.assertRaises( exception.ApplicationCredentialNotFound, self.app_cred_api.get_application_credential, app_cred_proj_A_2['id'], ) self.assertEqual( app_cred_proj_B['id'], self.app_cred_api.get_application_credential( app_cred_proj_B['id'] )['id'], ) def test_authenticate(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) resp = self.app_cred_api.create_application_credential(app_cred) self.app_cred_api.authenticate(resp['id'], resp['secret']) def test_authenticate_not_found(self): self.assertRaises( AssertionError, self.app_cred_api.authenticate, uuid.uuid4().hex, uuid.uuid4().hex, ) def test_authenticate_expired(self): yesterday = timeutils.utcnow() - datetime.timedelta(days=1) app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'], expires=yesterday, ) resp = self.app_cred_api.create_application_credential(app_cred) self.assertRaises( AssertionError, self.app_cred_api.authenticate, resp['id'], resp['secret'], ) def test_authenticate_bad_secret(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) resp = self.app_cred_api.create_application_credential(app_cred) badpass = 'badpass' self.assertNotEqual(badpass, resp['secret']) self.assertRaises( AssertionError, self.app_cred_api.authenticate, resp['id'], badpass ) def test_get_delete_access_rules(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) access_rule_id = uuid.uuid4().hex app_cred['access_rules'] = [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ] self.app_cred_api.create_application_credential(app_cred) self.assertDictEqual( app_cred['access_rules'][0], self.app_cred_api.get_access_rule(access_rule_id), ) self.app_cred_api.delete_application_credential(app_cred['id']) self.app_cred_api.delete_access_rule(access_rule_id) self.assertRaises( exception.AccessRuleNotFound, self.app_cred_api.get_access_rule, access_rule_id, ) def test_list_delete_access_rule_for_user(self): app_cred = self._new_app_cred_data( self.user_foo['id'], project_id=self.project_bar['id'] ) access_rule_id = uuid.uuid4().hex app_cred['access_rules'] = [ { 'id': access_rule_id, 'service': uuid.uuid4().hex, 'path': uuid.uuid4().hex, 'method': uuid.uuid4().hex[16:], } ] self.app_cred_api.create_application_credential(app_cred) self.assertEqual( 1, len( self.app_cred_api.list_access_rules_for_user( self.user_foo['id'] ) ), ) self.app_cred_api.delete_application_credential(app_cred['id']) # access rule should still exist self.assertEqual( 1, len( self.app_cred_api.list_access_rules_for_user( self.user_foo['id'] ) ), ) self.app_cred_api.delete_access_rules_for_user(self.user_foo['id']) self.assertEqual( 0, len( self.app_cred_api.list_access_rules_for_user( self.user_foo['id'] ) ), ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.558113 keystone-26.0.0/keystone/tests/unit/assignment/0000775000175000017500000000000000000000000021615 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/assignment/__init__.py0000664000175000017500000000000000000000000023714 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.558113 keystone-26.0.0/keystone/tests/unit/assignment/role_backends/0000775000175000017500000000000000000000000024410 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/assignment/role_backends/__init__.py0000664000175000017500000000000000000000000026507 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/assignment/role_backends/test_sql.py0000664000175000017500000001134400000000000026623 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import provider_api from keystone.common import sql from keystone import exception from keystone.tests import unit from keystone.tests.unit.assignment import test_core from keystone.tests.unit.backend import core_sql PROVIDERS = provider_api.ProviderAPIs class SqlRoleModels(core_sql.BaseBackendSqlModels): def test_role_model(self): cols = ( ('id', sql.String, 64), ('name', sql.String, 255), ('domain_id', sql.String, 64), ) self.assertExpectedSchema('role', cols) class SqlRole(core_sql.BaseBackendSqlTests, test_core.RoleTests): def test_create_null_role_name(self): role = unit.new_role_ref(name=None) self.assertRaises( exception.UnexpectedError, PROVIDERS.role_api.create_role, role['id'], role, ) self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.get_role, role['id'] ) def test_create_duplicate_role_domain_specific_name_fails(self): domain = unit.new_domain_ref() role1 = unit.new_role_ref(domain_id=domain['id']) PROVIDERS.role_api.create_role(role1['id'], role1) role2 = unit.new_role_ref(name=role1['name'], domain_id=domain['id']) self.assertRaises( exception.Conflict, PROVIDERS.role_api.create_role, role2['id'], role2, ) def test_update_domain_id_of_role_fails(self): # Create a global role role1 = unit.new_role_ref() role1 = PROVIDERS.role_api.create_role(role1['id'], role1) # Try and update it to be domain specific domainA = unit.new_domain_ref() role1['domain_id'] = domainA['id'] self.assertRaises( exception.ValidationError, PROVIDERS.role_api.update_role, role1['id'], role1, ) # Create a domain specific role from scratch role2 = unit.new_role_ref(domain_id=domainA['id']) PROVIDERS.role_api.create_role(role2['id'], role2) # Try to "move" it to another domain domainB = unit.new_domain_ref() role2['domain_id'] = domainB['id'] self.assertRaises( exception.ValidationError, PROVIDERS.role_api.update_role, role2['id'], role2, ) # Now try to make it global role2['domain_id'] = None self.assertRaises( exception.ValidationError, PROVIDERS.role_api.update_role, role2['id'], role2, ) def test_domain_specific_separation(self): domain1 = unit.new_domain_ref() role1 = unit.new_role_ref(domain_id=domain1['id']) role_ref1 = PROVIDERS.role_api.create_role(role1['id'], role1.copy()) self.assertDictEqual(role1, role_ref1) # Check we can have the same named role in a different domain domain2 = unit.new_domain_ref() role2 = unit.new_role_ref(name=role1['name'], domain_id=domain2['id']) role_ref2 = PROVIDERS.role_api.create_role(role2['id'], role2) self.assertDictEqual(role2, role_ref2) # ...and in fact that you can have the same named role as a global role role3 = unit.new_role_ref(name=role1['name']) role_ref3 = PROVIDERS.role_api.create_role(role3['id'], role3) self.assertDictEqual(role3, role_ref3) # Check that updating one doesn't change the others role1['name'] = uuid.uuid4().hex PROVIDERS.role_api.update_role(role1['id'], role1) role_ref1 = PROVIDERS.role_api.get_role(role1['id']) self.assertDictEqual(role1, role_ref1) role_ref2 = PROVIDERS.role_api.get_role(role2['id']) self.assertDictEqual(role2, role_ref2) role_ref3 = PROVIDERS.role_api.get_role(role3['id']) self.assertDictEqual(role3, role_ref3) # Check that deleting one of these, doesn't affect the others PROVIDERS.role_api.delete_role(role1['id']) self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.get_role, role1['id'] ) PROVIDERS.role_api.get_role(role2['id']) PROVIDERS.role_api.get_role(role3['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/assignment/test_backends.py0000664000175000017500000062164500000000000025016 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from testtools import matchers from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class AssignmentTestHelperMixin: """Mixin class to aid testing of assignments. This class supports data driven test plans that enable: - Creation of initial entities, such as domains, users, groups, projects and roles - Creation of assignments referencing the above entities - A set of input parameters and expected outputs to list_role_assignments based on the above test data A test plan is a dict of the form: test_plan = { entities: details and number of entities, group_memberships: group-user entity memberships, assignments: list of assignments to create, tests: list of pairs of input params and expected outputs} An example test plan: test_plan = { # First, create the entities required. Entities are specified by # a dict with the key being the entity type and the value an # entity specification which can be one of: # # - a simple number, e.g. {'users': 3} creates 3 users # - a dict where more information regarding the contents of the entity # is required, e.g. {'domains' : {'users : 3}} creates a domain # with three users # - a list of entity specifications if multiple are required # # The following creates a domain that contains a single user, group and # project, as well as creating three roles. 'entities': {'domains': {'users': 1, 'groups': 1, 'projects': 1}, 'roles': 3}, # If it is required that an existing domain be used for the new # entities, then the id of that domain can be included in the # domain dict. For example, if alternatively we wanted to add 3 users # to the default domain, add a second domain containing 3 projects as # well as 5 additional empty domains, the entities would be defined as: # # 'entities': {'domains': [{'id': DEFAULT_DOMAIN, 'users': 3}, # {'projects': 3}, 5]}, # # A project hierarchy can be specified within the 'projects' section by # nesting the 'project' key, for example to create a project with three # sub-projects you would use: 'projects': {'project': 3} # A more complex hierarchy can also be defined, for example the # following would define three projects each containing a # sub-project, each of which contain a further three sub-projects. 'projects': [{'project': {'project': 3}}, {'project': {'project': 3}}, {'project': {'project': 3}}] # If the 'roles' entity count is defined as top level key in 'entities' # dict then these are global roles. If it is placed within the # 'domain' dict, then they will be domain specific roles. A mix of # domain specific and global roles are allowed, with the role index # being calculated in the order they are defined in the 'entities' # dict. # A set of implied role specifications. In this case, prior role # index 0 implies role index 1, and role 1 implies roles 2 and 3. 'roles': [{'role': 0, 'implied_roles': [1]}, {'role': 1, 'implied_roles': [2, 3]}] # A list of groups and their members. In this case make users with # index 0 and 1 members of group with index 0. Users and Groups are # indexed in the order they appear in the 'entities' key above. 'group_memberships': [{'group': 0, 'users': [0, 1]}] # Next, create assignments between the entities, referencing the # entities by index, i.e. 'user': 0 refers to user[0]. Entities are # indexed in the order they appear in the 'entities' key above within # their entity type. 'assignments': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'user': 0, 'role': 2, 'project': 0}], # Finally, define an array of tests where list_role_assignment() is # called with the given input parameters and the results are then # confirmed to be as given in 'results'. Again, all entities are # referenced by index. 'tests': [ {'params': {}, 'results': [{'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'user': 0, 'role': 2, 'project': 0}]}, {'params': {'role': 2}, 'results': [{'group': 0, 'role': 2, 'domain': 0}, {'user': 0, 'role': 2, 'project': 0}]}] # The 'params' key also supports the 'effective', # 'inherited_to_projects' and 'source_from_group_ids' options to # list_role_assignments.} """ def _handle_project_spec( self, test_data, domain_id, project_spec, parent_id=None ): """Handle the creation of a project or hierarchy of projects. project_spec may either be a count of the number of projects to create, or it may be a list of the form: [{'project': project_spec}, {'project': project_spec}, ...] This method is called recursively to handle the creation of a hierarchy of projects. """ def _create_project(domain_id, parent_id): new_project = unit.new_project_ref( domain_id=domain_id, parent_id=parent_id ) new_project = PROVIDERS.resource_api.create_project( new_project['id'], new_project ) return new_project if isinstance(project_spec, list): for this_spec in project_spec: self._handle_project_spec( test_data, domain_id, this_spec, parent_id=parent_id ) elif isinstance(project_spec, dict): new_proj = _create_project(domain_id, parent_id) test_data['projects'].append(new_proj) self._handle_project_spec( test_data, domain_id, project_spec['project'], parent_id=new_proj['id'], ) else: for _ in range(project_spec): test_data['projects'].append( _create_project(domain_id, parent_id) ) def _create_role(self, domain_id=None): new_role = unit.new_role_ref(domain_id=domain_id) return PROVIDERS.role_api.create_role(new_role['id'], new_role) def _handle_domain_spec(self, test_data, domain_spec): """Handle the creation of domains and their contents. domain_spec may either be a count of the number of empty domains to create, a dict describing the domain contents, or a list of domain_specs. In the case when a list is provided, this method calls itself recursively to handle the list elements. This method will insert any entities created into test_data """ def _create_domain(domain_id=None): if domain_id is None: new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain( new_domain['id'], new_domain ) return new_domain else: # The test plan specified an existing domain to use return PROVIDERS.resource_api.get_domain(domain_id) def _create_entity_in_domain(entity_type, domain_id): """Create a user or group entity in the domain.""" if entity_type == 'users': new_entity = unit.new_user_ref(domain_id=domain_id) new_entity = PROVIDERS.identity_api.create_user(new_entity) elif entity_type == 'groups': new_entity = unit.new_group_ref(domain_id=domain_id) new_entity = PROVIDERS.identity_api.create_group(new_entity) elif entity_type == 'roles': new_entity = self._create_role(domain_id=domain_id) else: # Must be a bad test plan raise exception.NotImplemented() return new_entity if isinstance(domain_spec, list): for x in domain_spec: self._handle_domain_spec(test_data, x) elif isinstance(domain_spec, dict): # If there is a domain ID specified, then use it the_domain = _create_domain(domain_spec.get('id')) test_data['domains'].append(the_domain) for entity_type, value in domain_spec.items(): if entity_type == 'id': # We already used this above to determine whether to # use and existing domain continue if entity_type == 'projects': # If it's projects, we need to handle the potential # specification of a project hierarchy self._handle_project_spec( test_data, the_domain['id'], value ) else: # It's a count of number of entities for _ in range(value): test_data[entity_type].append( _create_entity_in_domain( entity_type, the_domain['id'] ) ) else: for _ in range(domain_spec): test_data['domains'].append(_create_domain()) def create_entities(self, entity_pattern): """Create the entities specified in the test plan. Process the 'entities' key in the test plan, creating the requested entities. Each created entity will be added to the array of entities stored in the returned test_data object, e.g.: test_data['users'] = [user[0], user[1]....] """ test_data = {} for entity in ['users', 'groups', 'domains', 'projects', 'roles']: test_data[entity] = [] # Create any domains requested and, if specified, any entities within # those domains if 'domains' in entity_pattern: self._handle_domain_spec(test_data, entity_pattern['domains']) # Create any roles requested if 'roles' in entity_pattern: for _ in range(entity_pattern['roles']): test_data['roles'].append(self._create_role()) return test_data def _convert_entity_shorthand(self, key, shorthand_data, reference_data): """Convert a shorthand entity description into a full ID reference. In test plan definitions, we allow a shorthand for referencing to an entity of the form: 'user': 0 which is actually shorthand for: 'user_id': reference_data['users'][0]['id'] This method converts the shorthand version into the full reference. """ expanded_key = '%s_id' % key reference_index = '%ss' % key index_value = reference_data[reference_index][shorthand_data[key]][ 'id' ] return expanded_key, index_value def create_implied_roles(self, implied_pattern, test_data): """Create the implied roles specified in the test plan.""" for implied_spec in implied_pattern: # Each implied role specification is a dict of the form: # # {'role': 0, 'implied_roles': list of roles} prior_role = test_data['roles'][implied_spec['role']]['id'] if isinstance(implied_spec['implied_roles'], list): for this_role in implied_spec['implied_roles']: implied_role = test_data['roles'][this_role]['id'] PROVIDERS.role_api.create_implied_role( prior_role, implied_role ) else: implied_role = test_data['roles'][ implied_spec['implied_roles'] ]['id'] PROVIDERS.role_api.create_implied_role( prior_role, implied_role ) def create_group_memberships(self, group_pattern, test_data): """Create the group memberships specified in the test plan.""" for group_spec in group_pattern: # Each membership specification is a dict of the form: # # {'group': 0, 'users': [list of user indexes]} # # Add all users in the list to the specified group, first # converting from index to full entity ID. group_value = test_data['groups'][group_spec['group']]['id'] for user_index in group_spec['users']: user_value = test_data['users'][user_index]['id'] PROVIDERS.identity_api.add_user_to_group( user_value, group_value ) return test_data def create_assignments(self, assignment_pattern, test_data): """Create the assignments specified in the test plan.""" # First store how many assignments are already in the system, # so during the tests we can check the number of new assignments # created. test_data['initial_assignment_count'] = len( PROVIDERS.assignment_api.list_role_assignments() ) # Now create the new assignments in the test plan for assignment in assignment_pattern: # Each assignment is a dict of the form: # # { 'user': 0, 'project':1, 'role': 6} # # where the value of each item is the index into the array of # entities created earlier. # # We process the assignment dict to create the args required to # make the create_grant() call. args = {} for param in assignment: if param == 'inherited_to_projects': args[param] = assignment[param] else: # Turn 'entity : 0' into 'entity_id = ac6736ba873d' # where entity in user, group, project or domain key, value = self._convert_entity_shorthand( param, assignment, test_data ) args[key] = value PROVIDERS.assignment_api.create_grant(**args) return test_data def execute_assignment_cases(self, test_plan, test_data): """Execute the test plan, based on the created test_data.""" def check_results(expected, actual, param_arg_count): if param_arg_count == 0: # It was an unfiltered call, so default fixture assignments # might be polluting our answer - so we take into account # how many assignments there were before the test. self.assertEqual( len(expected) + test_data['initial_assignment_count'], len(actual), ) else: self.assertThat(actual, matchers.HasLength(len(expected))) for each_expected in expected: expected_assignment = {} for param in each_expected: if param == 'inherited_to_projects': expected_assignment[param] = each_expected[param] elif param == 'indirect': # We're expecting the result to contain an indirect # dict with the details how the role came to be placed # on this entity - so convert the key/value pairs of # that dict into real entity references. indirect_term = {} for indirect_param in each_expected[param]: key, value = self._convert_entity_shorthand( indirect_param, each_expected[param], test_data ) indirect_term[key] = value expected_assignment[param] = indirect_term else: # Convert a simple shorthand entry into a full # entity reference key, value = self._convert_entity_shorthand( param, each_expected, test_data ) expected_assignment[key] = value self.assertIn(expected_assignment, actual) def convert_group_ids_sourced_from_list(index_list, reference_data): value_list = [] for group_index in index_list: value_list.append(reference_data['groups'][group_index]['id']) return value_list # Go through each test in the array, processing the input params, which # we build into an args dict, and then call list_role_assignments. Then # check the results against those specified in the test plan. for test in test_plan.get('tests', []): args = {} for param in test['params']: if param in ['effective', 'inherited', 'include_subtree']: # Just pass the value into the args args[param] = test['params'][param] elif param == 'source_from_group_ids': # Convert the list of indexes into a list of IDs args[param] = convert_group_ids_sourced_from_list( test['params']['source_from_group_ids'], test_data ) else: # Turn 'entity : 0' into 'entity_id = ac6736ba873d' # where entity in user, group, project or domain key, value = self._convert_entity_shorthand( param, test['params'], test_data ) args[key] = value results = PROVIDERS.assignment_api.list_role_assignments(**args) check_results(test['results'], results, len(args)) def execute_assignment_plan(self, test_plan): """Create entities, assignments and execute the test plan. The standard method to call to create entities and assignments and execute the tests as specified in the test_plan. The test_data dict is returned so that, if required, the caller can execute additional manual tests with the entities and assignments created. """ test_data = self.create_entities(test_plan['entities']) if 'implied_roles' in test_plan: self.create_implied_roles(test_plan['implied_roles'], test_data) if 'group_memberships' in test_plan: self.create_group_memberships( test_plan['group_memberships'], test_data ) if 'assignments' in test_plan: test_data = self.create_assignments( test_plan['assignments'], test_data ) self.execute_assignment_cases(test_plan, test_data) return test_data class AssignmentTests(AssignmentTestHelperMixin): def _get_domain_fixture(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) return domain def test_project_add_and_remove_user_role(self): user_ids = PROVIDERS.assignment_api.list_user_ids_for_project( self.project_bar['id'] ) self.assertNotIn(self.user_two['id'], user_ids) PROVIDERS.assignment_api.add_role_to_user_and_project( project_id=self.project_bar['id'], user_id=self.user_two['id'], role_id=self.role_other['id'], ) user_ids = PROVIDERS.assignment_api.list_user_ids_for_project( self.project_bar['id'] ) self.assertIn(self.user_two['id'], user_ids) PROVIDERS.assignment_api.remove_role_from_user_and_project( project_id=self.project_bar['id'], user_id=self.user_two['id'], role_id=self.role_other['id'], ) user_ids = PROVIDERS.assignment_api.list_user_ids_for_project( self.project_bar['id'] ) self.assertNotIn(self.user_two['id'], user_ids) def test_remove_user_role_not_assigned(self): # Expect failure if attempt to remove a role that was never assigned to # the user. self.assertRaises( exception.RoleNotFound, PROVIDERS.assignment_api.remove_role_from_user_and_project, project_id=self.project_bar['id'], user_id=self.user_two['id'], role_id=self.role_other['id'], ) def test_list_user_ids_for_project(self): user_ids = PROVIDERS.assignment_api.list_user_ids_for_project( self.project_baz['id'] ) self.assertEqual(2, len(user_ids)) self.assertIn(self.user_two['id'], user_ids) self.assertIn(self.user_badguy['id'], user_ids) def test_list_user_ids_for_project_no_duplicates(self): # Create user user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = PROVIDERS.identity_api.create_user(user_ref) # Create project project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) # Create 2 roles and give user each role in project for i in range(2): role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=user_ref['id'], project_id=project_ref['id'], role_id=role_ref['id'], ) # Get the list of user_ids in project user_ids = PROVIDERS.assignment_api.list_user_ids_for_project( project_ref['id'] ) # Ensure the user is only returned once self.assertEqual(1, len(user_ids)) def test_get_project_user_ids_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.assignment_api.list_user_ids_for_project, uuid.uuid4().hex, ) def test_list_role_assignments_unfiltered(self): """Test unfiltered listing of role assignments.""" test_plan = { # Create a domain, with a user, group & project 'entities': { 'domains': {'users': 1, 'groups': 1, 'projects': 1}, 'roles': 3, }, # Create a grant of each type (user/group on project/domain) 'assignments': [ {'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}, ], 'tests': [ # Check that we get back the 4 assignments { 'params': {}, 'results': [ {'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}, ], } ], } self.execute_assignment_plan(test_plan) def test_list_role_assignments_filtered_by_role(self): """Test listing of role assignments filtered by role ID.""" test_plan = { # Create a user, group & project in the default domain 'entities': { 'domains': { 'id': CONF.identity.default_domain_id, 'users': 1, 'groups': 1, 'projects': 1, }, 'roles': 3, }, # Create a grant of each type (user/group on project/domain) 'assignments': [ {'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, {'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}, ], 'tests': [ # Check that when filtering by role, we only get back those # that match { 'params': {'role': 2}, 'results': [ {'group': 0, 'role': 2, 'domain': 0}, {'group': 0, 'role': 2, 'project': 0}, ], } ], } self.execute_assignment_plan(test_plan) def test_list_group_role_assignment(self): # When a group role assignment is created and the role assignments are # listed then the group role assignment is included in the list. test_plan = { 'entities': { 'domains': { 'id': CONF.identity.default_domain_id, 'groups': 1, 'projects': 1, }, 'roles': 1, }, 'assignments': [{'group': 0, 'role': 0, 'project': 0}], 'tests': [ { 'params': {}, 'results': [{'group': 0, 'role': 0, 'project': 0}], } ], } self.execute_assignment_plan(test_plan) def test_list_role_assignments_bad_role(self): assignment_list = PROVIDERS.assignment_api.list_role_assignments( role_id=uuid.uuid4().hex ) self.assertEqual([], assignment_list) def test_list_role_assignments_user_not_found(self): def _user_not_found(value): raise exception.UserNotFound(user_id=value) # Note(knikolla): Patch get_user to return UserNotFound # this simulates the possibility of a user being deleted # directly in the backend and still having lingering role # assignments. with mock.patch.object( PROVIDERS.identity_api, 'get_user', _user_not_found ): assignment_list = PROVIDERS.assignment_api.list_role_assignments( include_names=True ) self.assertNotEqual([], assignment_list) for assignment in assignment_list: if 'user_name' in assignment: # Note(knikolla): In the case of a not found user we # populate the values with empty strings. self.assertEqual('', assignment['user_name']) self.assertEqual('', assignment['user_domain_id']) self.assertEqual('', assignment['user_domain_name']) def test_list_role_assignments_group_not_found(self): def _group_not_found(value): raise exception.GroupNotFound(group_id=value) # Setup # 1) Remove any pre-existing assignments so we control what's there for a in PROVIDERS.assignment_api.list_role_assignments(): PROVIDERS.assignment_api.delete_grant(**a) # 2) create a group and 2 users in that group domain_id = CONF.identity.default_domain_id group = PROVIDERS.identity_api.create_group( unit.new_group_ref(domain_id=domain_id) ) user1 = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain_id) ) user2 = PROVIDERS.identity_api.create_user( unit.new_user_ref(domain_id=domain_id) ) PROVIDERS.identity_api.add_user_to_group(user1['id'], group['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], group['id']) # 3) create a role assignment for the group PROVIDERS.assignment_api.create_grant( group_id=group['id'], domain_id=domain_id, role_id=default_fixtures.MEMBER_ROLE_ID, ) num_assignments = len(PROVIDERS.assignment_api.list_role_assignments()) self.assertEqual(1, num_assignments) # Patch get_group to return GroupNotFound, allowing us to confirm # that the exception is handled properly when include_names processing # attempts to lookup a group that has been deleted in the backend with mock.patch.object( PROVIDERS.identity_api, 'get_group', _group_not_found ): # Mocking a dependent function makes the cache invalid keystone.assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() assignment_list = PROVIDERS.assignment_api.list_role_assignments( include_names=True ) self.assertEqual(num_assignments, len(assignment_list)) for assignment in assignment_list: includes_group_assignments = False if 'group_name' in assignment: includes_group_assignments = True # Note(knikolla): In the case of a not-found group we # populate the values with empty strings. self.assertEqual('', assignment['group_name']) self.assertEqual('', assignment['group_domain_id']) self.assertEqual('', assignment['group_domain_name']) self.assertTrue(includes_group_assignments) num_effective = len( PROVIDERS.assignment_api.list_role_assignments(effective=True) ) self.assertGreater(num_effective, len(assignment_list)) # Patch list_users_in_group to return GroupNotFound allowing us to # confirm that the exception is handled properly when effective # processing attempts to lookup users for a group that has been deleted # in the backend with mock.patch.object( PROVIDERS.identity_api, 'list_users_in_group', _group_not_found ): # Mocking a dependent function makes the cache invalid keystone.assignment.COMPUTED_ASSIGNMENTS_REGION.invalidate() assignment_list = PROVIDERS.assignment_api.list_role_assignments( effective=True ) self.assertGreater(num_effective, len(assignment_list)) # cleanup PROVIDERS.assignment_api.delete_grant( group_id=group['id'], domain_id=domain_id, role_id=default_fixtures.MEMBER_ROLE_ID, ) # TODO(edmondsw) should cleanup users/groups as well, but that raises # LDAP read-only issues def test_add_duplicate_role_grant(self): roles_ref = PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.project_bar['id'] ) self.assertNotIn(self.role_admin['id'], roles_ref) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.project_bar['id'], self.role_admin['id'] ) self.assertRaises( exception.Conflict, PROVIDERS.assignment_api.add_role_to_user_and_project, self.user_foo['id'], self.project_bar['id'], self.role_admin['id'], ) def test_get_role_by_user_and_project_with_user_in_group(self): """Test for get role by user and project, user was added into a group. Test Plan: - Create a user, a project & a group, add this user to group - Create roles and grant them to user and project - Check the role list get by the user and project was as expected """ user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = PROVIDERS.identity_api.create_user(user_ref) project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group)['id'] PROVIDERS.identity_api.add_user_to_group(user_ref['id'], group_id) role_ref_list = [] for i in range(2): role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) role_ref_list.append(role_ref) PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=user_ref['id'], project_id=project_ref['id'], role_id=role_ref['id'], ) role_list = PROVIDERS.assignment_api.get_roles_for_user_and_project( user_ref['id'], project_ref['id'] ) self.assertEqual({r['id'] for r in role_ref_list}, set(role_list)) def test_get_role_by_user_and_project(self): roles_ref = PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.project_bar['id'] ) self.assertNotIn(self.role_admin['id'], roles_ref) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.project_bar['id'], self.role_admin['id'] ) roles_ref = PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.project_bar['id'] ) self.assertIn(self.role_admin['id'], roles_ref) self.assertNotIn(default_fixtures.MEMBER_ROLE_ID, roles_ref) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.project_bar['id'], default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.project_bar['id'] ) self.assertIn(self.role_admin['id'], roles_ref) self.assertIn(default_fixtures.MEMBER_ROLE_ID, roles_ref) def test_get_role_by_trustor_and_project(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) new_project = unit.new_project_ref(domain_id=new_domain['id']) PROVIDERS.resource_api.create_project(new_project['id'], new_project) role = self._create_role(domain_id=new_domain['id']) # Now create the grants (roles are defined in default_fixtures) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], project_id=new_project['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], domain_id=new_domain['id'], role_id=role['id'], inherited_to_projects=True, ) roles_ids = PROVIDERS.assignment_api.get_roles_for_trustor_and_project( new_user['id'], new_project['id'] ) self.assertEqual(2, len(roles_ids)) self.assertIn(self.role_member['id'], roles_ids) self.assertIn(role['id'], roles_ids) def test_get_roles_for_user_and_domain(self): """Test for getting roles for user on a domain. Test Plan: - Create a domain, with 2 users - Check no roles yet exit - Give user1 two roles on the domain, user2 one role - Get roles on user1 and the domain - maybe sure we only get back the 2 roles on user1 - Delete both roles from user1 - Check we get no roles back for user1 on domain """ new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) new_user1 = unit.new_user_ref(domain_id=new_domain['id']) new_user1 = PROVIDERS.identity_api.create_user(new_user1) new_user2 = unit.new_user_ref(domain_id=new_domain['id']) new_user2 = PROVIDERS.identity_api.create_user(new_user2) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=new_user1['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) # Now create the grants (roles are defined in default_fixtures) PROVIDERS.assignment_api.create_grant( user_id=new_user1['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) PROVIDERS.assignment_api.create_grant( user_id=new_user1['id'], domain_id=new_domain['id'], role_id=default_fixtures.OTHER_ROLE_ID, ) PROVIDERS.assignment_api.create_grant( user_id=new_user2['id'], domain_id=new_domain['id'], role_id=default_fixtures.ADMIN_ROLE_ID, ) # Read back the roles for user1 on domain roles_ids = PROVIDERS.assignment_api.get_roles_for_user_and_domain( new_user1['id'], new_domain['id'] ) self.assertEqual(2, len(roles_ids)) self.assertIn(self.role_member['id'], roles_ids) self.assertIn(self.role_other['id'], roles_ids) # Now delete both grants for user1 PROVIDERS.assignment_api.delete_grant( user_id=new_user1['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) PROVIDERS.assignment_api.delete_grant( user_id=new_user1['id'], domain_id=new_domain['id'], role_id=default_fixtures.OTHER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=new_user1['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) def test_get_roles_for_user_and_domain_returns_not_found(self): """Test errors raised when getting roles for user on a domain. Test Plan: - Check non-existing user gives UserNotFound - Check non-existing domain gives DomainNotFound """ new_domain = self._get_domain_fixture() new_user1 = unit.new_user_ref(domain_id=new_domain['id']) new_user1 = PROVIDERS.identity_api.create_user(new_user1) self.assertRaises( exception.UserNotFound, PROVIDERS.assignment_api.get_roles_for_user_and_domain, uuid.uuid4().hex, new_domain['id'], ) self.assertRaises( exception.DomainNotFound, PROVIDERS.assignment_api.get_roles_for_user_and_domain, new_user1['id'], uuid.uuid4().hex, ) def test_get_roles_for_user_and_project_returns_not_found(self): self.assertRaises( exception.UserNotFound, PROVIDERS.assignment_api.get_roles_for_user_and_project, uuid.uuid4().hex, self.project_bar['id'], ) self.assertRaises( exception.ProjectNotFound, PROVIDERS.assignment_api.get_roles_for_user_and_project, self.user_foo['id'], uuid.uuid4().hex, ) def test_add_role_to_user_and_project_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.assignment_api.add_role_to_user_and_project, self.user_foo['id'], uuid.uuid4().hex, self.role_admin['id'], ) self.assertRaises( exception.RoleNotFound, PROVIDERS.assignment_api.add_role_to_user_and_project, self.user_foo['id'], self.project_bar['id'], uuid.uuid4().hex, ) def test_add_role_to_user_and_project_no_user(self): # If add_role_to_user_and_project and the user doesn't exist, then # no error. user_id_not_exist = uuid.uuid4().hex PROVIDERS.assignment_api.add_role_to_user_and_project( user_id_not_exist, self.project_bar['id'], self.role_admin['id'] ) def test_remove_role_from_user_and_project(self): PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.project_bar['id'], default_fixtures.MEMBER_ROLE_ID, ) PROVIDERS.assignment_api.remove_role_from_user_and_project( self.user_foo['id'], self.project_bar['id'], default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.project_bar['id'] ) self.assertNotIn(default_fixtures.MEMBER_ROLE_ID, roles_ref) self.assertRaises( exception.NotFound, PROVIDERS.assignment_api.remove_role_from_user_and_project, self.user_foo['id'], self.project_bar['id'], default_fixtures.MEMBER_ROLE_ID, ) def test_get_role_grant_by_user_and_project(self): roles_ref = PROVIDERS.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.project_bar['id'] ) self.assertEqual(1, len(roles_ref)) PROVIDERS.assignment_api.create_grant( user_id=self.user_foo['id'], project_id=self.project_bar['id'], role_id=self.role_admin['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.project_bar['id'] ) self.assertIn( self.role_admin['id'], [role_ref['id'] for role_ref in roles_ref] ) PROVIDERS.assignment_api.create_grant( user_id=self.user_foo['id'], project_id=self.project_bar['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.project_bar['id'] ) roles_ref_ids = [] for ref in roles_ref: roles_ref_ids.append(ref['id']) self.assertIn(self.role_admin['id'], roles_ref_ids) self.assertIn(default_fixtures.MEMBER_ROLE_ID, roles_ref_ids) def test_remove_role_grant_from_user_and_project(self): PROVIDERS.assignment_api.create_grant( user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.project_baz['id'] ) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.project_baz['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_role_assignment_by_project_not_found(self): self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_grant_role_id, user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_grant_role_id, group_id=uuid.uuid4().hex, project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_role_assignment_by_domain_not_found(self): self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_grant_role_id, user_id=self.user_foo['id'], domain_id=CONF.identity.default_domain_id, role_id=default_fixtures.MEMBER_ROLE_ID, ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_grant_role_id, group_id=uuid.uuid4().hex, domain_id=CONF.identity.default_domain_id, role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_del_role_assignment_by_project_not_found(self): self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, group_id=uuid.uuid4().hex, project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_del_role_assignment_by_domain_not_found(self): self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, user_id=self.user_foo['id'], domain_id=CONF.identity.default_domain_id, role_id=default_fixtures.MEMBER_ROLE_ID, ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, group_id=uuid.uuid4().hex, domain_id=CONF.identity.default_domain_id, role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_and_remove_role_grant_by_group_and_project(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], project_id=self.project_bar['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], project_id=self.project_bar['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], project_id=self.project_bar['id'] ) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( group_id=new_group['id'], project_id=self.project_bar['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], project_id=self.project_bar['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, group_id=new_group['id'], project_id=self.project_bar['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_and_remove_role_grant_by_group_and_domain(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_and_remove_correct_role_grant_from_a_mix(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) new_project = unit.new_project_ref(domain_id=new_domain['id']) PROVIDERS.resource_api.create_project(new_project['id'], new_project) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_group2 = unit.new_group_ref(domain_id=new_domain['id']) new_group2 = PROVIDERS.identity_api.create_group(new_group2) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) new_user2 = unit.new_user_ref(domain_id=new_domain['id']) new_user2 = PROVIDERS.identity_api.create_user(new_user2) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) # First check we have no grants roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) # Now add the grant we are going to test for, and some others as # well just to make sure we get back the right one PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) PROVIDERS.assignment_api.create_grant( group_id=new_group2['id'], domain_id=new_domain['id'], role_id=self.role_admin['id'], ) PROVIDERS.assignment_api.create_grant( user_id=new_user2['id'], domain_id=new_domain['id'], role_id=self.role_admin['id'], ) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], project_id=new_project['id'], role_id=self.role_admin['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_and_remove_role_grant_by_user_and_domain(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) new_user = unit.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=new_user['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=new_user['id'], domain_id=new_domain['id'] ) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( user_id=new_user['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=new_user['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, user_id=new_user['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_and_remove_role_grant_by_group_and_cross_domain(self): group1_domain1_role = unit.new_role_ref() PROVIDERS.role_api.create_role( group1_domain1_role['id'], group1_domain1_role ) group1_domain2_role = unit.new_role_ref() PROVIDERS.role_api.create_role( group1_domain2_role['id'], group1_domain2_role ) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id'] ) self.assertEqual(0, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain2['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain1['id'], role_id=group1_domain1_role['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain2['id'], role_id=group1_domain2_role['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id'] ) self.assertDictEqual(group1_domain1_role, roles_ref[0]) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain2['id'] ) self.assertDictEqual(group1_domain2_role, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( group_id=group1['id'], domain_id=domain2['id'], role_id=group1_domain2_role['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain2['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, group_id=group1['id'], domain_id=domain2['id'], role_id=group1_domain2_role['id'], ) def test_get_and_remove_role_grant_by_user_and_cross_domain(self): user1_domain1_role = unit.new_role_ref() PROVIDERS.role_api.create_role( user1_domain1_role['id'], user1_domain1_role ) user1_domain2_role = unit.new_role_ref() PROVIDERS.role_api.create_role( user1_domain2_role['id'], user1_domain2_role ) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id'] ) self.assertEqual(0, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain2['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=user1_domain1_role['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain2['id'], role_id=user1_domain2_role['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id'] ) self.assertDictEqual(user1_domain1_role, roles_ref[0]) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain2['id'] ) self.assertDictEqual(user1_domain2_role, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( user_id=user1['id'], domain_id=domain2['id'], role_id=user1_domain2_role['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain2['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, user_id=user1['id'], domain_id=domain2['id'], role_id=user1_domain2_role['id'], ) def test_role_grant_by_group_and_cross_domain_project(self): role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) role2 = unit.new_role_ref() PROVIDERS.role_api.create_role(role2['id'], role2) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) project1 = unit.new_project_ref(domain_id=domain2['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=role1['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=role2['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id'] ) roles_ref_ids = [] for ref in roles_ref: roles_ref_ids.append(ref['id']) self.assertIn(role1['id'], roles_ref_ids) self.assertIn(role2['id'], roles_ref_ids) PROVIDERS.assignment_api.delete_grant( group_id=group1['id'], project_id=project1['id'], role_id=role1['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id'] ) self.assertEqual(1, len(roles_ref)) self.assertDictEqual(role2, roles_ref[0]) def test_role_grant_by_user_and_cross_domain_project(self): role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) role2 = unit.new_role_ref() PROVIDERS.role_api.create_role(role2['id'], role2) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) project1 = unit.new_project_ref(domain_id=domain2['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role1['id'] ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role2['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) roles_ref_ids = [] for ref in roles_ref: roles_ref_ids.append(ref['id']) self.assertIn(role1['id'], roles_ref_ids) self.assertIn(role2['id'], roles_ref_ids) PROVIDERS.assignment_api.delete_grant( user_id=user1['id'], project_id=project1['id'], role_id=role1['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(1, len(roles_ref)) self.assertDictEqual(role2, roles_ref[0]) def test_delete_user_grant_no_user(self): # Can delete a grant where the user doesn't exist. role = unit.new_role_ref() role_id = role['id'] PROVIDERS.role_api.create_role(role_id, role) user_id = uuid.uuid4().hex PROVIDERS.assignment_api.create_grant( role_id, user_id=user_id, project_id=self.project_bar['id'] ) PROVIDERS.assignment_api.delete_grant( role_id, user_id=user_id, project_id=self.project_bar['id'] ) def test_delete_group_grant_no_group(self): # Can delete a grant where the group doesn't exist. role = unit.new_role_ref() role_id = role['id'] PROVIDERS.role_api.create_role(role_id, role) group_id = uuid.uuid4().hex PROVIDERS.assignment_api.create_grant( role_id, group_id=group_id, project_id=self.project_bar['id'] ) PROVIDERS.assignment_api.delete_grant( role_id, group_id=group_id, project_id=self.project_bar['id'] ) def test_grant_crud_throws_exception_if_invalid_role(self): """Ensure RoleNotFound thrown if role does not exist.""" def assert_role_not_found_exception(f, **kwargs): self.assertRaises( exception.RoleNotFound, f, role_id=uuid.uuid4().hex, **kwargs ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_resp = PROVIDERS.identity_api.create_user(user) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_resp = PROVIDERS.identity_api.create_group(group) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project_resp = PROVIDERS.resource_api.create_project( project['id'], project ) for manager_call in [ PROVIDERS.assignment_api.create_grant, PROVIDERS.assignment_api.get_grant, ]: assert_role_not_found_exception( manager_call, user_id=user_resp['id'], project_id=project_resp['id'], ) assert_role_not_found_exception( manager_call, group_id=group_resp['id'], project_id=project_resp['id'], ) assert_role_not_found_exception( manager_call, user_id=user_resp['id'], domain_id=CONF.identity.default_domain_id, ) assert_role_not_found_exception( manager_call, group_id=group_resp['id'], domain_id=CONF.identity.default_domain_id, ) assert_role_not_found_exception( PROVIDERS.assignment_api.delete_grant, user_id=user_resp['id'], project_id=project_resp['id'], ) assert_role_not_found_exception( PROVIDERS.assignment_api.delete_grant, group_id=group_resp['id'], project_id=project_resp['id'], ) assert_role_not_found_exception( PROVIDERS.assignment_api.delete_grant, user_id=user_resp['id'], domain_id=CONF.identity.default_domain_id, ) assert_role_not_found_exception( PROVIDERS.assignment_api.delete_grant, group_id=group_resp['id'], domain_id=CONF.identity.default_domain_id, ) def test_multi_role_grant_by_user_group_on_project_domain(self): role_list = [] for _ in range(10): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain1['id']) group2 = PROVIDERS.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user1['id'], group2['id']) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[3]['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role_list[4]['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role_list[5]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=role_list[6]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=role_list[7]['id'], ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id'] ) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[0], roles_ref) self.assertIn(role_list[1], roles_ref) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id'] ) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[2], roles_ref) self.assertIn(role_list[3], roles_ref) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[4], roles_ref) self.assertIn(role_list[5], roles_ref) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id'] ) self.assertEqual(2, len(roles_ref)) self.assertIn(role_list[6], roles_ref) self.assertIn(role_list[7], roles_ref) # Now test the alternate way of getting back lists of grants, # where user and group roles are combined. These should match # the above results. combined_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id'] ) ) self.assertEqual(4, len(combined_list)) self.assertIn(role_list[4]['id'], combined_list) self.assertIn(role_list[5]['id'], combined_list) self.assertIn(role_list[6]['id'], combined_list) self.assertIn(role_list[7]['id'], combined_list) combined_role_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_domain( user1['id'], domain1['id'] ) ) self.assertEqual(4, len(combined_role_list)) self.assertIn(role_list[0]['id'], combined_role_list) self.assertIn(role_list[1]['id'], combined_role_list) self.assertIn(role_list[2]['id'], combined_role_list) self.assertIn(role_list[3]['id'], combined_role_list) def test_multi_group_grants_on_project_domain(self): """Test multiple group roles for user on project and domain. Test Plan: - Create 6 roles - Create a domain, with a project, user and two groups - Make the user a member of both groups - Check no roles yet exit - Assign a role to each user and both groups on both the project and domain - Get a list of effective roles for the user on both the project and domain, checking we get back the correct three roles """ role_list = [] for _ in range(6): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain1['id']) group2 = PROVIDERS.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user1['id'], group2['id']) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group2['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role_list[3]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=role_list[4]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group2['id'], project_id=project1['id'], role_id=role_list[5]['id'], ) # Read by the roles, ensuring we get the correct 3 roles for # both project and domain combined_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id'] ) ) self.assertEqual(3, len(combined_list)) self.assertIn(role_list[3]['id'], combined_list) self.assertIn(role_list[4]['id'], combined_list) self.assertIn(role_list[5]['id'], combined_list) combined_role_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_domain( user1['id'], domain1['id'] ) ) self.assertEqual(3, len(combined_role_list)) self.assertIn(role_list[0]['id'], combined_role_list) self.assertIn(role_list[1]['id'], combined_role_list) self.assertIn(role_list[2]['id'], combined_role_list) def test_delete_role_with_user_and_group_grants(self): role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role1['id'] ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=role1['id'] ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=role1['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain1['id'], role_id=role1['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(1, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id'] ) self.assertEqual(1, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id'] ) self.assertEqual(1, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id'] ) self.assertEqual(1, len(roles_ref)) PROVIDERS.role_api.delete_role(role1['id']) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id'] ) self.assertEqual(0, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id'] ) self.assertEqual(0, len(roles_ref)) def test_list_role_assignment_by_domain(self): """Test listing of role assignment filtered by domain.""" test_plan = { # A domain with 3 users, 1 group, a spoiler domain and 2 roles. 'entities': { 'domains': [{'users': 3, 'groups': 1}, 1], 'roles': 2, }, # Users 1 & 2 are in the group 'group_memberships': [{'group': 0, 'users': [1, 2]}], # Assign a role for user 0 and the group 'assignments': [ {'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 0}, ], 'tests': [ # List all effective assignments for domain[0]. # Should get one direct user role and user roles for each of # the users in the group. { 'params': {'domain': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'domain': 0}, { 'user': 1, 'role': 1, 'domain': 0, 'indirect': {'group': 0}, }, { 'user': 2, 'role': 1, 'domain': 0, 'indirect': {'group': 0}, }, ], }, # Using domain[1] should return nothing {'params': {'domain': 1, 'effective': True}, 'results': []}, ], } self.execute_assignment_plan(test_plan) def test_list_role_assignment_by_user_with_domain_group_roles(self): """Test listing assignments by user, with group roles on a domain.""" test_plan = { # A domain with 3 users, 3 groups, a spoiler domain # plus 3 roles. 'entities': { 'domains': [{'users': 3, 'groups': 3}, 1], 'roles': 3, }, # Users 1 & 2 are in the group 0, User 1 also in group 1 'group_memberships': [ {'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}, ], 'assignments': [ {'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 0}, {'group': 1, 'role': 2, 'domain': 0}, # ...and two spoiler assignments {'user': 1, 'role': 1, 'domain': 0}, {'group': 2, 'role': 2, 'domain': 0}, ], 'tests': [ # List all effective assignments for user[0]. # Should get one direct user role and a user roles for each of # groups 0 and 1 { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'domain': 0}, { 'user': 0, 'role': 1, 'domain': 0, 'indirect': {'group': 0}, }, { 'user': 0, 'role': 2, 'domain': 0, 'indirect': {'group': 1}, }, ], }, # Adding domain[0] as a filter should return the same data { 'params': {'user': 0, 'domain': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'domain': 0}, { 'user': 0, 'role': 1, 'domain': 0, 'indirect': {'group': 0}, }, { 'user': 0, 'role': 2, 'domain': 0, 'indirect': {'group': 1}, }, ], }, # Using domain[1] should return nothing { 'params': {'user': 0, 'domain': 1, 'effective': True}, 'results': [], }, # Using user[2] should return nothing { 'params': {'user': 2, 'domain': 0, 'effective': True}, 'results': [], }, ], } self.execute_assignment_plan(test_plan) def test_list_role_assignment_using_sourced_groups(self): """Test listing assignments when restricted by source groups.""" test_plan = { # The default domain with 3 users, 3 groups, 3 projects, # plus 3 roles. 'entities': { 'domains': { 'id': CONF.identity.default_domain_id, 'users': 3, 'groups': 3, 'projects': 3, }, 'roles': 3, }, # Users 0 & 1 are in the group 0, User 0 also in group 1 'group_memberships': [ {'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}, ], # Spread the assignments around - we want to be able to show that # if sourced by group, assignments from other sources are excluded 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, {'group': 0, 'role': 1, 'project': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1}, {'user': 2, 'role': 1, 'project': 1}, {'group': 2, 'role': 2, 'project': 2}, ], 'tests': [ # List all effective assignments sourced from groups 0 and 1 { 'params': { 'source_from_group_ids': [0, 1], 'effective': True, }, 'results': [ {'group': 0, 'role': 1, 'project': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1}, ], }, # Adding a role a filter should further restrict the entries { 'params': { 'source_from_group_ids': [0, 1], 'role': 2, 'effective': True, }, 'results': [ {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1}, ], }, ], } self.execute_assignment_plan(test_plan) def test_list_role_assignment_using_sourced_groups_with_domains(self): """Test listing domain assignments when restricted by source groups.""" test_plan = { # A domain with 3 users, 3 groups, 3 projects, a second domain, # plus 3 roles. 'entities': { 'domains': [{'users': 3, 'groups': 3, 'projects': 3}, 1], 'roles': 3, }, # Users 0 & 1 are in the group 0, User 0 also in group 1 'group_memberships': [ {'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}, ], # Spread the assignments around - we want to be able to show that # if sourced by group, assignments from other sources are excluded 'assignments': [ {'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1}, {'user': 2, 'role': 1, 'project': 1}, {'group': 2, 'role': 2, 'project': 2}, ], 'tests': [ # List all effective assignments sourced from groups 0 and 1 { 'params': { 'source_from_group_ids': [0, 1], 'effective': True, }, 'results': [ {'group': 0, 'role': 1, 'domain': 1}, {'group': 1, 'role': 2, 'project': 0}, {'group': 1, 'role': 2, 'project': 1}, ], }, # Adding a role a filter should further restrict the entries { 'params': { 'source_from_group_ids': [0, 1], 'role': 1, 'effective': True, }, 'results': [ {'group': 0, 'role': 1, 'domain': 1}, ], }, ], } self.execute_assignment_plan(test_plan) def test_list_role_assignment_fails_with_userid_and_source_groups(self): """Show we trap this unsupported internal combination of params.""" group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) self.assertRaises( exception.UnexpectedError, PROVIDERS.assignment_api.list_role_assignments, effective=True, user_id=self.user_foo['id'], source_from_group_ids=[group['id']], ) def test_list_user_project_ids_returns_not_found(self): self.assertRaises( exception.UserNotFound, PROVIDERS.assignment_api.list_projects_for_user, uuid.uuid4().hex, ) def test_delete_user_with_project_association(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], self.project_bar['id'], role_member['id'] ) PROVIDERS.identity_api.delete_user(user['id']) self.assertRaises( exception.UserNotFound, PROVIDERS.assignment_api.list_projects_for_user, user['id'], ) def test_delete_user_with_project_roles(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], self.project_bar['id'], self.role_member['id'] ) PROVIDERS.identity_api.delete_user(user['id']) self.assertRaises( exception.UserNotFound, PROVIDERS.assignment_api.list_projects_for_user, user['id'], ) def test_delete_role_returns_not_found(self): self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.delete_role, uuid.uuid4().hex, ) def test_delete_project_with_role_assignments(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_foo['id'], project['id'], default_fixtures.MEMBER_ROLE_ID ) PROVIDERS.resource_api.delete_project(project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.assignment_api.list_user_ids_for_project, project['id'], ) def test_delete_role_check_role_grant(self): role = unit.new_role_ref() alt_role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) PROVIDERS.role_api.create_role(alt_role['id'], alt_role) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.project_bar['id'], role['id'] ) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_foo['id'], self.project_bar['id'], alt_role['id'] ) PROVIDERS.role_api.delete_role(role['id']) roles_ref = PROVIDERS.assignment_api.get_roles_for_user_and_project( self.user_foo['id'], self.project_bar['id'] ) self.assertNotIn(role['id'], roles_ref) self.assertIn(alt_role['id'], roles_ref) def test_list_projects_for_user(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertEqual(0, len(user_projects)) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_baz['id'], role_id=self.role_member['id'], ) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertEqual(2, len(user_projects)) def test_list_projects_for_user_with_grants(self): # Create two groups each with a role on a different project, and # make user1 a member of both groups. Both these new projects # should now be included, along with any direct user grants. domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain['id']) group2 = PROVIDERS.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user1['id'], group2['id']) # Create 3 grants, one user grant, the other two as group grants PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=self.role_admin['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group2['id'], project_id=project2['id'], role_id=self.role_admin['id'], ) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertEqual(3, len(user_projects)) def test_create_grant_no_user(self): # If call create_grant with a user that doesn't exist, doesn't fail. PROVIDERS.assignment_api.create_grant( self.role_other['id'], user_id=uuid.uuid4().hex, project_id=self.project_bar['id'], ) def test_create_grant_no_group(self): # If call create_grant with a group that doesn't exist, doesn't fail. PROVIDERS.assignment_api.create_grant( self.role_other['id'], group_id=uuid.uuid4().hex, project_id=self.project_bar['id'], ) def test_delete_group_removes_role_assignments(self): # When a group is deleted any role assignments for the group are # removed. def get_member_assignments(): assignments = PROVIDERS.assignment_api.list_role_assignments() return [ x for x in assignments if x['role_id'] == default_fixtures.MEMBER_ROLE_ID ] orig_member_assignments = get_member_assignments() # Create a group. new_group = unit.new_group_ref( domain_id=CONF.identity.default_domain_id ) new_group = PROVIDERS.identity_api.create_group(new_group) # Create a project. new_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(new_project['id'], new_project) # Assign a role to the group. PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], project_id=new_project['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) # Delete the group. PROVIDERS.identity_api.delete_group(new_group['id']) # Check that the role assignment for the group is gone member_assignments = get_member_assignments() self.assertThat( member_assignments, matchers.Equals(orig_member_assignments) ) def test_get_roles_for_groups_on_domain(self): """Test retrieving group domain roles. Test Plan: - Create a domain, three groups and three roles - Assign one an inherited and the others a non-inherited group role to the domain - Ensure that only the non-inherited roles are returned on the domain """ domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) group_list = [] group_id_list = [] role_list = [] for _ in range(3): group = unit.new_group_ref(domain_id=domain1['id']) group = PROVIDERS.identity_api.create_group(group) group_list.append(group) group_id_list.append(group['id']) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) # Assign the roles - one is inherited PROVIDERS.assignment_api.create_grant( group_id=group_list[0]['id'], domain_id=domain1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[1]['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[2]['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], inherited_to_projects=True, ) # Now get the effective roles for the groups on the domain project. We # shouldn't get back the inherited role. role_refs = PROVIDERS.assignment_api.get_roles_for_groups( group_id_list, domain_id=domain1['id'] ) self.assertThat(role_refs, matchers.HasLength(2)) self.assertIn(role_list[0], role_refs) self.assertIn(role_list[1], role_refs) def test_get_roles_for_groups_on_project(self): """Test retrieving group project roles. Test Plan: - Create two domains, two projects, six groups and six roles - Project1 is in Domain1, Project2 is in Domain2 - Domain2/Project2 are spoilers - Assign a different direct group role to each project as well as both an inherited and non-inherited role to each domain - Get the group roles for Project 1 - depending on whether we have enabled inheritance, we should either get back just the direct role or both the direct one plus the inherited domain role from Domain 1 """ domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain2['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) group_list = [] group_id_list = [] role_list = [] for _ in range(6): group = unit.new_group_ref(domain_id=domain1['id']) group = PROVIDERS.identity_api.create_group(group) group_list.append(group) group_id_list.append(group['id']) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) # Assign the roles - one inherited and one non-inherited on Domain1, # plus one on Project1 PROVIDERS.assignment_api.create_grant( group_id=group_list[0]['id'], domain_id=domain1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[1]['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], inherited_to_projects=True, ) PROVIDERS.assignment_api.create_grant( group_id=group_list[2]['id'], project_id=project1['id'], role_id=role_list[2]['id'], ) # ...and a duplicate set of spoiler assignments to Domain2/Project2 PROVIDERS.assignment_api.create_grant( group_id=group_list[3]['id'], domain_id=domain2['id'], role_id=role_list[3]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[4]['id'], domain_id=domain2['id'], role_id=role_list[4]['id'], inherited_to_projects=True, ) PROVIDERS.assignment_api.create_grant( group_id=group_list[5]['id'], project_id=project2['id'], role_id=role_list[5]['id'], ) # With inheritance on, we should also get back the inherited role from # its owning domain. role_refs = PROVIDERS.assignment_api.get_roles_for_groups( group_id_list, project_id=project1['id'] ) self.assertThat(role_refs, matchers.HasLength(2)) self.assertIn(role_list[1], role_refs) self.assertIn(role_list[2], role_refs) def test_list_domains_for_groups(self): """Test retrieving domains for a list of groups. Test Plan: - Create three domains, three groups and one role - Assign a non-inherited group role to two domains, and an inherited group role to the third - Ensure only the domains with non-inherited roles are returned """ domain_list = [] group_list = [] group_id_list = [] for _ in range(3): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) domain_list.append(domain) group = unit.new_group_ref(domain_id=domain['id']) group = PROVIDERS.identity_api.create_group(group) group_list.append(group) group_id_list.append(group['id']) role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) # Assign the roles - one is inherited PROVIDERS.assignment_api.create_grant( group_id=group_list[0]['id'], domain_id=domain_list[0]['id'], role_id=role1['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[1]['id'], domain_id=domain_list[1]['id'], role_id=role1['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[2]['id'], domain_id=domain_list[2]['id'], role_id=role1['id'], inherited_to_projects=True, ) # Now list the domains that have roles for any of the 3 groups # We shouldn't get back domain[2] since that had an inherited role. domain_refs = PROVIDERS.assignment_api.list_domains_for_groups( group_id_list ) self.assertThat(domain_refs, matchers.HasLength(2)) self.assertIn(domain_list[0], domain_refs) self.assertIn(domain_list[1], domain_refs) def test_list_projects_for_groups(self): """Test retrieving projects for a list of groups. Test Plan: - Create two domains, four projects, seven groups and seven roles - Project1-3 are in Domain1, Project4 is in Domain2 - Domain2/Project4 are spoilers - Project1 and 2 have direct group roles, Project3 has no direct roles but should inherit a group role from Domain1 - Get the projects for the group roles that are assigned to Project1 Project2 and the inherited one on Domain1. Depending on whether we have enabled inheritance, we should either get back just the projects with direct roles (Project 1 and 2) or also Project3 due to its inherited role from Domain1. """ domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) project1 = unit.new_project_ref(domain_id=domain1['id']) project1 = PROVIDERS.resource_api.create_project( project1['id'], project1 ) project2 = unit.new_project_ref(domain_id=domain1['id']) project2 = PROVIDERS.resource_api.create_project( project2['id'], project2 ) project3 = unit.new_project_ref(domain_id=domain1['id']) project3 = PROVIDERS.resource_api.create_project( project3['id'], project3 ) project4 = unit.new_project_ref(domain_id=domain2['id']) project4 = PROVIDERS.resource_api.create_project( project4['id'], project4 ) group_list = [] role_list = [] for _ in range(7): group = unit.new_group_ref(domain_id=domain1['id']) group = PROVIDERS.identity_api.create_group(group) group_list.append(group) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) # Assign the roles - one inherited and one non-inherited on Domain1, # plus one on Project1 and Project2 PROVIDERS.assignment_api.create_grant( group_id=group_list[0]['id'], domain_id=domain1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[1]['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], inherited_to_projects=True, ) PROVIDERS.assignment_api.create_grant( group_id=group_list[2]['id'], project_id=project1['id'], role_id=role_list[2]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[3]['id'], project_id=project2['id'], role_id=role_list[3]['id'], ) # ...and a few of spoiler assignments to Domain2/Project4 PROVIDERS.assignment_api.create_grant( group_id=group_list[4]['id'], domain_id=domain2['id'], role_id=role_list[4]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group_list[5]['id'], domain_id=domain2['id'], role_id=role_list[5]['id'], inherited_to_projects=True, ) PROVIDERS.assignment_api.create_grant( group_id=group_list[6]['id'], project_id=project4['id'], role_id=role_list[6]['id'], ) group_id_list = [ group_list[1]['id'], group_list[2]['id'], group_list[3]['id'], ] # With inheritance on, we should also get back the Project3 due to the # inherited role from its owning domain. project_refs = PROVIDERS.assignment_api.list_projects_for_groups( group_id_list ) self.assertThat(project_refs, matchers.HasLength(3)) self.assertIn(project1, project_refs) self.assertIn(project2, project_refs) self.assertIn(project3, project_refs) def test_update_role_no_name(self): # A user can update a role and not include the name. # description is picked just because it's not name. PROVIDERS.role_api.update_role( self.role_member['id'], {'description': uuid.uuid4().hex} ) # If the previous line didn't raise an exception then the test passes. def test_update_role_same_name(self): # A user can update a role and set the name to be the same as it was. PROVIDERS.role_api.update_role( self.role_member['id'], {'name': self.role_member['name']} ) # If the previous line didn't raise an exception then the test passes. def _test_list_role_assignment_containing_names(self, domain_role=False): # Create Refs new_domain = self._get_domain_fixture() if domain_role: new_role = unit.new_role_ref(domain_id=new_domain['id']) else: new_role = unit.new_role_ref() new_user = unit.new_user_ref(domain_id=new_domain['id']) new_project = unit.new_project_ref(domain_id=new_domain['id']) new_group = unit.new_group_ref(domain_id=new_domain['id']) # Create entities new_role = PROVIDERS.role_api.create_role(new_role['id'], new_role) new_user = PROVIDERS.identity_api.create_user(new_user) new_group = PROVIDERS.identity_api.create_group(new_group) PROVIDERS.resource_api.create_project(new_project['id'], new_project) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], project_id=new_project['id'], role_id=new_role['id'], ) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], project_id=new_project['id'], role_id=new_role['id'], ) PROVIDERS.assignment_api.create_grant( domain_id=new_domain['id'], user_id=new_user['id'], role_id=new_role['id'], ) # Get the created assignments with the include_names flag _asgmt_prj = PROVIDERS.assignment_api.list_role_assignments( user_id=new_user['id'], project_id=new_project['id'], include_names=True, ) _asgmt_grp = PROVIDERS.assignment_api.list_role_assignments( group_id=new_group['id'], project_id=new_project['id'], include_names=True, ) _asgmt_dmn = PROVIDERS.assignment_api.list_role_assignments( domain_id=new_domain['id'], user_id=new_user['id'], include_names=True, ) # Make sure we can get back the correct number of assignments self.assertThat(_asgmt_prj, matchers.HasLength(1)) self.assertThat(_asgmt_grp, matchers.HasLength(1)) self.assertThat(_asgmt_dmn, matchers.HasLength(1)) # get the first assignment first_asgmt_prj = _asgmt_prj[0] first_asgmt_grp = _asgmt_grp[0] first_asgmt_dmn = _asgmt_dmn[0] # Assert the names are correct in the project response self.assertEqual(new_project['name'], first_asgmt_prj['project_name']) self.assertEqual( new_project['domain_id'], first_asgmt_prj['project_domain_id'] ) self.assertEqual(new_user['name'], first_asgmt_prj['user_name']) self.assertEqual( new_user['domain_id'], first_asgmt_prj['user_domain_id'] ) self.assertEqual(new_role['name'], first_asgmt_prj['role_name']) if domain_role: self.assertEqual( new_role['domain_id'], first_asgmt_prj['role_domain_id'] ) # Assert the names are correct in the group response self.assertEqual(new_group['name'], first_asgmt_grp['group_name']) self.assertEqual( new_group['domain_id'], first_asgmt_grp['group_domain_id'] ) self.assertEqual(new_project['name'], first_asgmt_grp['project_name']) self.assertEqual( new_project['domain_id'], first_asgmt_grp['project_domain_id'] ) self.assertEqual(new_role['name'], first_asgmt_grp['role_name']) if domain_role: self.assertEqual( new_role['domain_id'], first_asgmt_grp['role_domain_id'] ) # Assert the names are correct in the domain response self.assertEqual(new_domain['name'], first_asgmt_dmn['domain_name']) self.assertEqual(new_user['name'], first_asgmt_dmn['user_name']) self.assertEqual( new_user['domain_id'], first_asgmt_dmn['user_domain_id'] ) self.assertEqual(new_role['name'], first_asgmt_dmn['role_name']) if domain_role: self.assertEqual( new_role['domain_id'], first_asgmt_dmn['role_domain_id'] ) def test_list_role_assignment_containing_names_global_role(self): self._test_list_role_assignment_containing_names() def test_list_role_assignment_containing_names_domain_role(self): self._test_list_role_assignment_containing_names(domain_role=True) def test_list_role_assignment_does_not_contain_names(self): """Test names are not included with list role assignments. Scenario: - names are NOT included by default - names are NOT included when include_names=False """ def assert_does_not_contain_names(assignment): first_asgmt_prj = assignment[0] self.assertNotIn('project_name', first_asgmt_prj) self.assertNotIn('project_domain_id', first_asgmt_prj) self.assertNotIn('user_name', first_asgmt_prj) self.assertNotIn('user_domain_id', first_asgmt_prj) self.assertNotIn('role_name', first_asgmt_prj) self.assertNotIn('role_domain_id', first_asgmt_prj) # Create Refs new_role = unit.new_role_ref() new_domain = self._get_domain_fixture() new_user = unit.new_user_ref(domain_id=new_domain['id']) new_project = unit.new_project_ref(domain_id=new_domain['id']) # Create entities new_role = PROVIDERS.role_api.create_role(new_role['id'], new_role) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.resource_api.create_project(new_project['id'], new_project) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], project_id=new_project['id'], role_id=new_role['id'], ) # Get the created assignments with NO include_names flag role_assign_without_names = ( PROVIDERS.assignment_api.list_role_assignments( user_id=new_user['id'], project_id=new_project['id'] ) ) assert_does_not_contain_names(role_assign_without_names) # Get the created assignments with include_names=False role_assign_without_names = ( PROVIDERS.assignment_api.list_role_assignments( user_id=new_user['id'], project_id=new_project['id'], include_names=False, ) ) assert_does_not_contain_names(role_assign_without_names) def test_delete_user_assignments_user_same_id_as_group(self): """Test deleting user assignments when user_id == group_id. In this scenario, only user assignments must be deleted (i.e. USER_DOMAIN or USER_PROJECT). Test plan: * Create a user and a group with the same ID; * Create four roles and assign them to both user and group; * Delete all user assignments; * Group assignments must stay intact. """ # Create a common ID common_id = uuid.uuid4().hex # Create a project project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) # Create a user user = unit.new_user_ref( id=common_id, domain_id=CONF.identity.default_domain_id ) user = PROVIDERS.identity_api.driver.create_user(common_id, user) self.assertEqual(common_id, user['id']) # Create a group group = unit.new_group_ref( id=common_id, domain_id=CONF.identity.default_domain_id ) group = PROVIDERS.identity_api.driver.create_group(common_id, group) self.assertEqual(common_id, group['id']) # Create four roles roles = [] for _ in range(4): role = unit.new_role_ref() roles.append(PROVIDERS.role_api.create_role(role['id'], role)) # Assign roles for user PROVIDERS.assignment_api.driver.create_grant( user_id=user['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[0]['id'], ) PROVIDERS.assignment_api.driver.create_grant( user_id=user['id'], project_id=project['id'], role_id=roles[1]['id'], ) # Assign roles for group PROVIDERS.assignment_api.driver.create_grant( group_id=group['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[2]['id'], ) PROVIDERS.assignment_api.driver.create_grant( group_id=group['id'], project_id=project['id'], role_id=roles[3]['id'], ) # Make sure they were assigned user_assignments = PROVIDERS.assignment_api.list_role_assignments( user_id=user['id'] ) self.assertThat(user_assignments, matchers.HasLength(2)) group_assignments = PROVIDERS.assignment_api.list_role_assignments( group_id=group['id'] ) self.assertThat(group_assignments, matchers.HasLength(2)) # Delete user assignments PROVIDERS.assignment_api.delete_user_assignments(user_id=user['id']) # Assert only user assignments were deleted user_assignments = PROVIDERS.assignment_api.list_role_assignments( user_id=user['id'] ) self.assertThat(user_assignments, matchers.HasLength(0)) group_assignments = PROVIDERS.assignment_api.list_role_assignments( group_id=group['id'] ) self.assertThat(group_assignments, matchers.HasLength(2)) # Make sure these remaining assignments are group-related for assignment in group_assignments: self.assertThat(assignment.keys(), matchers.Contains('group_id')) def test_delete_group_assignments_group_same_id_as_user(self): """Test deleting group assignments when group_id == user_id. In this scenario, only group assignments must be deleted (i.e. GROUP_DOMAIN or GROUP_PROJECT). Test plan: * Create a group and a user with the same ID; * Create four roles and assign them to both group and user; * Delete all group assignments; * User assignments must stay intact. """ # Create a common ID common_id = uuid.uuid4().hex # Create a project project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) # Create a user user = unit.new_user_ref( id=common_id, domain_id=CONF.identity.default_domain_id ) user = PROVIDERS.identity_api.driver.create_user(common_id, user) self.assertEqual(common_id, user['id']) # Create a group group = unit.new_group_ref( id=common_id, domain_id=CONF.identity.default_domain_id ) group = PROVIDERS.identity_api.driver.create_group(common_id, group) self.assertEqual(common_id, group['id']) # Create four roles roles = [] for _ in range(4): role = unit.new_role_ref() roles.append(PROVIDERS.role_api.create_role(role['id'], role)) # Assign roles for user PROVIDERS.assignment_api.driver.create_grant( user_id=user['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[0]['id'], ) PROVIDERS.assignment_api.driver.create_grant( user_id=user['id'], project_id=project['id'], role_id=roles[1]['id'], ) # Assign roles for group PROVIDERS.assignment_api.driver.create_grant( group_id=group['id'], domain_id=CONF.identity.default_domain_id, role_id=roles[2]['id'], ) PROVIDERS.assignment_api.driver.create_grant( group_id=group['id'], project_id=project['id'], role_id=roles[3]['id'], ) # Make sure they were assigned user_assignments = PROVIDERS.assignment_api.list_role_assignments( user_id=user['id'] ) self.assertThat(user_assignments, matchers.HasLength(2)) group_assignments = PROVIDERS.assignment_api.list_role_assignments( group_id=group['id'] ) self.assertThat(group_assignments, matchers.HasLength(2)) # Delete group assignments PROVIDERS.assignment_api.delete_group_assignments(group_id=group['id']) # Assert only group assignments were deleted group_assignments = PROVIDERS.assignment_api.list_role_assignments( group_id=group['id'] ) self.assertThat(group_assignments, matchers.HasLength(0)) user_assignments = PROVIDERS.assignment_api.list_role_assignments( user_id=user['id'] ) self.assertThat(user_assignments, matchers.HasLength(2)) # Make sure these remaining assignments are user-related for assignment in group_assignments: self.assertThat(assignment.keys(), matchers.Contains('user_id')) def test_remove_foreign_assignments_when_deleting_a_domain(self): # A user and a group are in default domain and have assigned a role on # two new domains. This test makes sure that when one of the new # domains is deleted, the role assignments for the user and the group # from the default domain are deleted only on that domain. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) role = unit.new_role_ref() role = PROVIDERS.role_api.create_role(role['id'], role) new_domains = [unit.new_domain_ref(), unit.new_domain_ref()] for new_domain in new_domains: PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) PROVIDERS.assignment_api.create_grant( group_id=group['id'], domain_id=new_domain['id'], role_id=role['id'], ) PROVIDERS.assignment_api.create_grant( user_id=self.user_two['id'], domain_id=new_domain['id'], role_id=role['id'], ) # Check there are 4 role assignments for that role role_assignments = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertThat(role_assignments, matchers.HasLength(4)) # Delete first new domain and check only 2 assignments were left PROVIDERS.resource_api.update_domain( new_domains[0]['id'], {'enabled': False} ) PROVIDERS.resource_api.delete_domain(new_domains[0]['id']) role_assignments = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertThat(role_assignments, matchers.HasLength(2)) # Delete second new domain and check no assignments were left PROVIDERS.resource_api.update_domain( new_domains[1]['id'], {'enabled': False} ) PROVIDERS.resource_api.delete_domain(new_domains[1]['id']) role_assignments = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertEqual([], role_assignments) class InheritanceTests(AssignmentTestHelperMixin): def test_role_assignments_user_domain_to_project_inheritance(self): test_plan = { 'entities': {'domains': {'users': 2, 'projects': 1}, 'roles': 3}, 'assignments': [ {'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, { 'user': 0, 'role': 2, 'domain': 0, 'inherited_to_projects': True, }, {'user': 1, 'role': 1, 'project': 0}, ], 'tests': [ # List all direct assignments for user[0] { 'params': {'user': 0}, 'results': [ {'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, { 'user': 0, 'role': 2, 'domain': 0, 'inherited_to_projects': 'projects', }, ], }, # Now the effective ones - so the domain role should turn into # a project role { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'domain': 0}, {'user': 0, 'role': 1, 'project': 0}, { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0}, }, ], }, # Narrow down to effective roles for user[0] and project[0] { 'params': {'user': 0, 'project': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 1, 'project': 0}, { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0}, }, ], }, ], } self.execute_assignment_plan(test_plan) def _test_crud_inherited_and_direct_assignment(self, **kwargs): """Test inherited and direct assignments for the actor and target. Ensure it is possible to create both inherited and direct role assignments for the same actor on the same target. The actor and the target are specified in the kwargs as ('user_id' or 'group_id') and ('project_id' or 'domain_id'), respectively. """ # Create a new role to avoid assignments loaded from default fixtures role = unit.new_role_ref() role = PROVIDERS.role_api.create_role(role['id'], role) # Define the common assignment entity assignment_entity = {'role_id': role['id']} assignment_entity.update(kwargs) # Define assignments under test direct_assignment_entity = assignment_entity.copy() inherited_assignment_entity = assignment_entity.copy() inherited_assignment_entity['inherited_to_projects'] = 'projects' # Create direct assignment and check grants PROVIDERS.assignment_api.create_grant( inherited_to_projects=False, **assignment_entity ) grants = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertThat(grants, matchers.HasLength(1)) self.assertIn(direct_assignment_entity, grants) # Now add inherited assignment and check grants PROVIDERS.assignment_api.create_grant( inherited_to_projects=True, **assignment_entity ) grants = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertThat(grants, matchers.HasLength(2)) self.assertIn(direct_assignment_entity, grants) self.assertIn(inherited_assignment_entity, grants) # Delete both and check grants PROVIDERS.assignment_api.delete_grant( inherited_to_projects=False, **assignment_entity ) PROVIDERS.assignment_api.delete_grant( inherited_to_projects=True, **assignment_entity ) grants = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertEqual([], grants) def test_crud_inherited_and_direct_assignment_for_user_on_domain(self): self._test_crud_inherited_and_direct_assignment( user_id=self.user_foo['id'], domain_id=CONF.identity.default_domain_id, ) def test_crud_inherited_and_direct_assignment_for_group_on_domain(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) self._test_crud_inherited_and_direct_assignment( group_id=group['id'], domain_id=CONF.identity.default_domain_id ) def test_crud_inherited_and_direct_assignment_for_user_on_project(self): self._test_crud_inherited_and_direct_assignment( user_id=self.user_foo['id'], project_id=self.project_baz['id'] ) def test_crud_inherited_and_direct_assignment_for_group_on_project(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) self._test_crud_inherited_and_direct_assignment( group_id=group['id'], project_id=self.project_baz['id'] ) def test_inherited_role_grants_for_user(self): """Test inherited user roles. Test Plan: - Enable OS-INHERIT extension - Create 3 roles - Create a domain, with a project and a user - Check no roles yet exit - Assign a direct user role to the project and a (non-inherited) user role to the domain - Get a list of effective roles - should only get the one direct role - Now add an inherited user role to the domain - Get a list of effective roles - should have two roles, one direct and one by virtue of the inherited user role - Also get effective roles for the domain - the role marked as inherited should not show up """ role_list = [] for _ in range(3): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) # Create the first two roles - the domain one is not inherited PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], ) # Now get the effective roles for the user and project, this # should only include the direct role assignment on the project combined_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id'] ) ) self.assertEqual(1, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) # Now add an inherited role on the domain PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], inherited_to_projects=True, ) # Now get the effective roles for the user and project again, this # should now include the inherited role on the domain combined_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id'] ) ) self.assertEqual(2, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) self.assertIn(role_list[2]['id'], combined_list) # Finally, check that the inherited role does not appear as a valid # directly assigned role on the domain itself combined_role_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_domain( user1['id'], domain1['id'] ) ) self.assertEqual(1, len(combined_role_list)) self.assertIn(role_list[1]['id'], combined_role_list) # TODO(henry-nash): The test above uses get_roles_for_user_and_project # and get_roles_for_user_and_domain, which will, in a subsequent patch, # be re-implemented to simply call list_role_assignments (see blueprint # remove-role-metadata). # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once get_roles_for_user_and # project/domain have been re-implemented then the manual tests above # can be refactored to simply ensure it gives the same answers. test_plan = { # A domain with a user & project, plus 3 roles. 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 3}, 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 1, 'domain': 0}, { 'user': 0, 'role': 2, 'domain': 0, 'inherited_to_projects': True, }, ], 'tests': [ # List all effective assignments for user[0] on project[0]. # Should get one direct role and one inherited role. { 'params': {'user': 0, 'project': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0}, }, ], }, # Ensure effective mode on the domain does not list the # inherited role on that domain { 'params': {'user': 0, 'domain': 0, 'effective': True}, 'results': [{'user': 0, 'role': 1, 'domain': 0}], }, # Ensure non-inherited mode also only returns the non-inherited # role on the domain { 'params': {'user': 0, 'domain': 0, 'inherited': False}, 'results': [{'user': 0, 'role': 1, 'domain': 0}], }, ], } self.execute_assignment_plan(test_plan) def test_inherited_role_grants_for_group(self): """Test inherited group roles. Test Plan: - Enable OS-INHERIT extension - Create 4 roles - Create a domain, with a project, user and two groups - Make the user a member of both groups - Check no roles yet exit - Assign a direct user role to the project and a (non-inherited) group role on the domain - Get a list of effective roles - should only get the one direct role - Now add two inherited group roles to the domain - Get a list of effective roles - should have three roles, one direct and two by virtue of inherited group roles """ role_list = [] for _ in range(4): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain1['id']) group2 = PROVIDERS.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user1['id'], group2['id']) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(0, len(roles_ref)) # Create two roles - the domain one is not inherited PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain1['id'], role_id=role_list[1]['id'], ) # Now get the effective roles for the user and project, this # should only include the direct role assignment on the project combined_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id'] ) ) self.assertEqual(1, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) # Now add to more group roles, both inherited, to the domain PROVIDERS.assignment_api.create_grant( group_id=group2['id'], domain_id=domain1['id'], role_id=role_list[2]['id'], inherited_to_projects=True, ) PROVIDERS.assignment_api.create_grant( group_id=group2['id'], domain_id=domain1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) # Now get the effective roles for the user and project again, this # should now include the inherited roles on the domain combined_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id'] ) ) self.assertEqual(3, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) self.assertIn(role_list[2]['id'], combined_list) self.assertIn(role_list[3]['id'], combined_list) # TODO(henry-nash): The test above uses get_roles_for_user_and_project # which will, in a subsequent patch, be re-implemented to simply call # list_role_assignments (see blueprint remove-role-metadata). # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once # get_roles_for_user_and_project has been re-implemented then the # manual tests above can be refactored to simply ensure it gives # the same answers. test_plan = { # A domain with a user and project, 2 groups, plus 4 roles. 'entities': { 'domains': {'users': 1, 'projects': 1, 'groups': 2}, 'roles': 4, }, 'group_memberships': [ {'group': 0, 'users': [0]}, {'group': 1, 'users': [0]}, ], 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, {'group': 0, 'role': 1, 'domain': 0}, { 'group': 1, 'role': 2, 'domain': 0, 'inherited_to_projects': True, }, { 'group': 1, 'role': 3, 'domain': 0, 'inherited_to_projects': True, }, ], 'tests': [ # List all effective assignments for user[0] on project[0]. # Should get one direct role and both inherited roles, but # not the direct one on domain[0], even though user[0] is # in group[0]. { 'params': {'user': 0, 'project': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'domain': 0, 'group': 1}, }, { 'user': 0, 'role': 3, 'project': 0, 'indirect': {'domain': 0, 'group': 1}, }, ], } ], } self.execute_assignment_plan(test_plan) def test_list_projects_for_user_with_inherited_grants(self): """Test inherited user roles. Test Plan: - Enable OS-INHERIT extension - Create a domain, with two projects and a user - Assign an inherited user role on the domain, as well as a direct user role to a separate project in a different domain - Get a list of projects for user, should return all three projects """ domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Create 2 grants, one on a project and one inherited grant # on the domain PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain['id'], role_id=self.role_admin['id'], inherited_to_projects=True, ) # Should get back all three projects, one by virtue of the direct # grant, plus both projects in the domain user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertEqual(3, len(user_projects)) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with 1 project, plus a second domain with 2 projects, # as well as a user. Also, create 2 roles. 'entities': { 'domains': [{'projects': 1}, {'users': 1, 'projects': 2}], 'roles': 2, }, 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 1, 'domain': 1, 'inherited_to_projects': True, }, ], 'tests': [ # List all effective assignments for user[0] # Should get one direct role plus one inherited role for each # project in domain { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 1, 'project': 1, 'indirect': {'domain': 1}, }, { 'user': 0, 'role': 1, 'project': 2, 'indirect': {'domain': 1}, }, ], } ], } self.execute_assignment_plan(test_plan) def test_list_projects_for_user_with_inherited_user_project_grants(self): """Test inherited role assignments for users on nested projects. Test Plan: - Enable OS-INHERIT extension - Create a hierarchy of projects with one root and one leaf project - Assign an inherited user role on root project - Assign a non-inherited user role on root project - Get a list of projects for user, should return both projects - Disable OS-INHERIT extension - Get a list of projects for user, should return only root project """ # Enable OS-INHERIT extension root_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) root_project = PROVIDERS.resource_api.create_project( root_project['id'], root_project ) leaf_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=root_project['id'], ) leaf_project = PROVIDERS.resource_api.create_project( leaf_project['id'], leaf_project ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) # Grant inherited user role PROVIDERS.assignment_api.create_grant( user_id=user['id'], project_id=root_project['id'], role_id=self.role_admin['id'], inherited_to_projects=True, ) # Grant non-inherited user role PROVIDERS.assignment_api.create_grant( user_id=user['id'], project_id=root_project['id'], role_id=self.role_member['id'], ) # Should get back both projects: because the direct role assignment for # the root project and inherited role assignment for leaf project user_projects = PROVIDERS.assignment_api.list_projects_for_user( user['id'] ) self.assertEqual(2, len(user_projects)) self.assertIn(root_project, user_projects) self.assertIn(leaf_project, user_projects) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with a project and sub-project, plus a user. # Also, create 2 roles. 'entities': { 'domains': { 'id': CONF.identity.default_domain_id, 'users': 1, 'projects': {'project': 1}, }, 'roles': 2, }, # A direct role and an inherited role on the parent 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 1, 'project': 0, 'inherited_to_projects': True, }, ], 'tests': [ # List all effective assignments for user[0] - should get back # one direct role plus one inherited role. { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 1, 'project': 1, 'indirect': {'project': 0}, }, ], } ], } self.execute_assignment_plan(test_plan) def test_list_projects_for_user_with_inherited_group_grants(self): """Test inherited group roles. Test Plan: - Enable OS-INHERIT extension - Create two domains, each with two projects - Create a user and group - Make the user a member of the group - Assign a user role two projects, an inherited group role to one domain and an inherited regular role on the other domain - Get a list of projects for user, should return both pairs of projects from the domain, plus the one separate project """ domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) project3 = unit.new_project_ref(domain_id=domain2['id']) PROVIDERS.resource_api.create_project(project3['id'], project3) project4 = unit.new_project_ref(domain_id=domain2['id']) PROVIDERS.resource_api.create_project(project4['id'], project4) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) # Create 4 grants: # - one user grant on a project in domain2 # - one user grant on a project in the default domain # - one inherited user grant on domain # - one inherited group grant on domain2 PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project3['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain['id'], role_id=self.role_admin['id'], inherited_to_projects=True, ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain2['id'], role_id=self.role_admin['id'], inherited_to_projects=True, ) # Should get back all five projects, but without a duplicate for # project3 (since it has both a direct user role and an inherited role) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertEqual(5, len(user_projects)) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with a 1 project, plus a second domain with 2 projects, # as well as a user & group and a 3rd domain with 2 projects. # Also, created 2 roles. 'entities': { 'domains': [ {'projects': 1}, {'users': 1, 'groups': 1, 'projects': 2}, {'projects': 2}, ], 'roles': 2, }, 'group_memberships': [{'group': 0, 'users': [0]}], 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 0, 'project': 3}, { 'user': 0, 'role': 1, 'domain': 1, 'inherited_to_projects': True, }, { 'user': 0, 'role': 1, 'domain': 2, 'inherited_to_projects': True, }, ], 'tests': [ # List all effective assignments for user[0] # Should get back both direct roles plus roles on both projects # from each domain. Duplicates should not be filtered out. { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 3}, {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 1, 'project': 1, 'indirect': {'domain': 1}, }, { 'user': 0, 'role': 1, 'project': 2, 'indirect': {'domain': 1}, }, { 'user': 0, 'role': 1, 'project': 3, 'indirect': {'domain': 2}, }, { 'user': 0, 'role': 1, 'project': 4, 'indirect': {'domain': 2}, }, ], } ], } self.execute_assignment_plan(test_plan) def test_list_projects_for_user_with_inherited_group_project_grants(self): """Test inherited role assignments for groups on nested projects. Test Plan: - Enable OS-INHERIT extension - Create a hierarchy of projects with one root and one leaf project - Assign an inherited group role on root project - Assign a non-inherited group role on root project - Get a list of projects for user, should return both projects - Disable OS-INHERIT extension - Get a list of projects for user, should return only root project """ root_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) root_project = PROVIDERS.resource_api.create_project( root_project['id'], root_project ) leaf_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=root_project['id'], ) leaf_project = PROVIDERS.resource_api.create_project( leaf_project['id'], leaf_project ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) # Grant inherited group role PROVIDERS.assignment_api.create_grant( group_id=group['id'], project_id=root_project['id'], role_id=self.role_admin['id'], inherited_to_projects=True, ) # Grant non-inherited group role PROVIDERS.assignment_api.create_grant( group_id=group['id'], project_id=root_project['id'], role_id=self.role_member['id'], ) # Should get back both projects: because the direct role assignment for # the root project and inherited role assignment for leaf project user_projects = PROVIDERS.assignment_api.list_projects_for_user( user['id'] ) self.assertEqual(2, len(user_projects)) self.assertIn(root_project, user_projects) self.assertIn(leaf_project, user_projects) # TODO(henry-nash): The test above uses list_projects_for_user # which may, in a subsequent patch, be re-implemented to call # list_role_assignments and then report only the distinct projects. # # The test plan below therefore mirrors this test, to ensure that # list_role_assignments works the same. Once list_projects_for_user # has been re-implemented then the manual tests above can be # refactored. test_plan = { # A domain with a project and sub-project, plus a user. # Also, create 2 roles. 'entities': { 'domains': { 'id': CONF.identity.default_domain_id, 'users': 1, 'groups': 1, 'projects': {'project': 1}, }, 'roles': 2, }, 'group_memberships': [{'group': 0, 'users': [0]}], # A direct role and an inherited role on the parent 'assignments': [ {'group': 0, 'role': 0, 'project': 0}, { 'group': 0, 'role': 1, 'project': 0, 'inherited_to_projects': True, }, ], 'tests': [ # List all effective assignments for user[0] - should get back # one direct role plus one inherited role. { 'params': {'user': 0, 'effective': True}, 'results': [ { 'user': 0, 'role': 0, 'project': 0, 'indirect': {'group': 0}, }, { 'user': 0, 'role': 1, 'project': 1, 'indirect': {'group': 0, 'project': 0}, }, ], } ], } self.execute_assignment_plan(test_plan) def test_list_assignments_for_tree(self): """Test we correctly list direct assignments for a tree.""" # Enable OS-INHERIT extension test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 1 user and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 1, }, 'roles': 4, }, 'assignments': [ # Direct assignment to projects 1 and 2 {'user': 0, 'role': 0, 'project': 1}, {'user': 0, 'role': 1, 'project': 2}, # Also an inherited assignment on project 1 { 'user': 0, 'role': 2, 'project': 1, 'inherited_to_projects': True, }, # ...and two spoiler assignments, one to the root and one # to project 4 {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 4}, ], 'tests': [ # List all assignments for project 1 and its subtree. { 'params': {'project': 1, 'include_subtree': True}, 'results': [ # Only the actual assignments should be returned, no # expansion of inherited assignments {'user': 0, 'role': 0, 'project': 1}, {'user': 0, 'role': 1, 'project': 2}, { 'user': 0, 'role': 2, 'project': 1, 'inherited_to_projects': 'projects', }, ], } ], } self.execute_assignment_plan(test_plan) def test_list_effective_assignments_for_tree(self): """Test we correctly list effective assignments for a tree.""" test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 1 user and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 1, }, 'roles': 4, }, 'assignments': [ # An inherited assignment on project 1 { 'user': 0, 'role': 1, 'project': 1, 'inherited_to_projects': True, }, # A direct assignment to project 2 {'user': 0, 'role': 2, 'project': 2}, # ...and two spoiler assignments, one to the root and one # to project 4 {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 4}, ], 'tests': [ # List all effective assignments for project 1 and its subtree. { 'params': { 'project': 1, 'effective': True, 'include_subtree': True, }, 'results': [ # The inherited assignment on project 1 should appear only # on its children { 'user': 0, 'role': 1, 'project': 2, 'indirect': {'project': 1}, }, { 'user': 0, 'role': 1, 'project': 3, 'indirect': {'project': 1}, }, # And finally the direct assignment on project 2 {'user': 0, 'role': 2, 'project': 2}, ], } ], } self.execute_assignment_plan(test_plan) def test_list_effective_assignments_for_tree_with_mixed_assignments(self): """Test that we correctly combine assignments for a tree. In this test we want to ensure that when asking for a list of assignments in a subtree, any assignments inherited from above the subtree are correctly combined with any assignments within the subtree itself. """ test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 2 users, 1 group and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 2, 'groups': 1, }, 'roles': 4, }, # Both users are part of the same group 'group_memberships': [{'group': 0, 'users': [0, 1]}], # We are going to ask for listing of assignment on project 1 and # it's subtree. So first we'll add two inherited assignments above # this (one user and one for a group that contains this user). 'assignments': [ { 'user': 0, 'role': 0, 'project': 0, 'inherited_to_projects': True, }, { 'group': 0, 'role': 1, 'project': 0, 'inherited_to_projects': True, }, # Now an inherited assignment on project 1 itself, # which should ONLY show up on its children { 'user': 0, 'role': 2, 'project': 1, 'inherited_to_projects': True, }, # ...and a direct assignment on one of those # children {'user': 0, 'role': 3, 'project': 2}, # The rest are spoiler assignments {'user': 0, 'role': 2, 'project': 5}, {'user': 0, 'role': 3, 'project': 4}, ], 'tests': [ # List all effective assignments for project 1 and its subtree. { 'params': { 'project': 1, 'user': 0, 'effective': True, 'include_subtree': True, }, 'results': [ # First, we should see the inherited user assignment from # project 0 on all projects in the subtree { 'user': 0, 'role': 0, 'project': 1, 'indirect': {'project': 0}, }, { 'user': 0, 'role': 0, 'project': 2, 'indirect': {'project': 0}, }, { 'user': 0, 'role': 0, 'project': 3, 'indirect': {'project': 0}, }, # Also the inherited group assignment from project 0 on # the subtree { 'user': 0, 'role': 1, 'project': 1, 'indirect': {'project': 0, 'group': 0}, }, { 'user': 0, 'role': 1, 'project': 2, 'indirect': {'project': 0, 'group': 0}, }, { 'user': 0, 'role': 1, 'project': 3, 'indirect': {'project': 0, 'group': 0}, }, # The inherited assignment on project 1 should appear only # on its children { 'user': 0, 'role': 2, 'project': 2, 'indirect': {'project': 1}, }, { 'user': 0, 'role': 2, 'project': 3, 'indirect': {'project': 1}, }, # And finally the direct assignment on project 2 {'user': 0, 'role': 3, 'project': 2}, ], } ], } self.execute_assignment_plan(test_plan) def test_list_effective_assignments_for_tree_with_domain_assignments(self): """Test we correctly honor domain inherited assignments on the tree.""" test_plan = { # Create a domain with a project hierarchy 3 levels deep: # # project 0 # ____________|____________ # | | # project 1 project 4 # ______|_____ ______|_____ # | | | | # project 2 project 3 project 5 project 6 # # Also, create 1 user and 4 roles. 'entities': { 'domains': { 'projects': {'project': [{'project': 2}, {'project': 2}]}, 'users': 1, }, 'roles': 4, }, 'assignments': [ # An inherited assignment on the domain (which should be # applied to all the projects) { 'user': 0, 'role': 1, 'domain': 0, 'inherited_to_projects': True, }, # A direct assignment to project 2 {'user': 0, 'role': 2, 'project': 2}, # ...and two spoiler assignments, one to the root and one # to project 4 {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 4}, ], 'tests': [ # List all effective assignments for project 1 and its subtree. { 'params': { 'project': 1, 'effective': True, 'include_subtree': True, }, 'results': [ # The inherited assignment from the domain should appear # only on the part of the subtree we are interested in { 'user': 0, 'role': 1, 'project': 1, 'indirect': {'domain': 0}, }, { 'user': 0, 'role': 1, 'project': 2, 'indirect': {'domain': 0}, }, { 'user': 0, 'role': 1, 'project': 3, 'indirect': {'domain': 0}, }, # And finally the direct assignment on project 2 {'user': 0, 'role': 2, 'project': 2}, ], } ], } self.execute_assignment_plan(test_plan) def test_list_user_ids_for_project_with_inheritance(self): test_plan = { # A domain with a project and sub-project, plus four users, # two groups, as well as 4 roles. 'entities': { 'domains': { 'id': CONF.identity.default_domain_id, 'users': 4, 'groups': 2, 'projects': {'project': 1}, }, 'roles': 4, }, # Each group has a unique user member 'group_memberships': [ {'group': 0, 'users': [1]}, {'group': 1, 'users': [3]}, ], # Set up assignments so that there should end up with four # effective assignments on project 1 - one direct, one due to # group membership and one user assignment inherited from the # parent and one group assignment inherited from the parent. 'assignments': [ {'user': 0, 'role': 0, 'project': 1}, {'group': 0, 'role': 1, 'project': 1}, { 'user': 2, 'role': 2, 'project': 0, 'inherited_to_projects': True, }, { 'group': 1, 'role': 3, 'project': 0, 'inherited_to_projects': True, }, ], } # Use assignment plan helper to create all the entities and # assignments - then we'll run our own tests using the data test_data = self.execute_assignment_plan(test_plan) user_ids = PROVIDERS.assignment_api.list_user_ids_for_project( test_data['projects'][1]['id'] ) self.assertThat(user_ids, matchers.HasLength(4)) for x in range(0, 4): self.assertIn(test_data['users'][x]['id'], user_ids) def test_list_role_assignment_using_inherited_sourced_groups(self): """Test listing inherited assignments when restricted by groups.""" test_plan = { # A domain with 3 users, 3 groups, 3 projects, a second domain, # plus 3 roles. 'entities': { 'domains': [{'users': 3, 'groups': 3, 'projects': 3}, 1], 'roles': 3, }, # Users 0 & 1 are in the group 0, User 0 also in group 1 'group_memberships': [ {'group': 0, 'users': [0, 1]}, {'group': 1, 'users': [0]}, ], # Spread the assignments around - we want to be able to show that # if sourced by group, assignments from other sources are excluded 'assignments': [ {'user': 0, 'role': 0, 'domain': 0}, {'group': 0, 'role': 1, 'domain': 1}, { 'group': 1, 'role': 2, 'domain': 0, 'inherited_to_projects': True, }, {'group': 1, 'role': 2, 'project': 1}, { 'user': 2, 'role': 1, 'project': 1, 'inherited_to_projects': True, }, {'group': 2, 'role': 2, 'project': 2}, ], 'tests': [ # List all effective assignments sourced from groups 0 and 1. # We should see the inherited group assigned on the 3 projects # from domain 0, as well as the direct assignments. { 'params': { 'source_from_group_ids': [0, 1], 'effective': True, }, 'results': [ {'group': 0, 'role': 1, 'domain': 1}, { 'group': 1, 'role': 2, 'project': 0, 'indirect': {'domain': 0}, }, { 'group': 1, 'role': 2, 'project': 1, 'indirect': {'domain': 0}, }, { 'group': 1, 'role': 2, 'project': 2, 'indirect': {'domain': 0}, }, {'group': 1, 'role': 2, 'project': 1}, ], }, ], } self.execute_assignment_plan(test_plan) class ImpliedRoleTests(AssignmentTestHelperMixin): def test_implied_role_crd(self): prior_role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(prior_role_ref['id'], prior_role_ref) implied_role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role( implied_role_ref['id'], implied_role_ref ) PROVIDERS.role_api.create_implied_role( prior_role_ref['id'], implied_role_ref['id'] ) implied_role = PROVIDERS.role_api.get_implied_role( prior_role_ref['id'], implied_role_ref['id'] ) expected_implied_role_ref = { 'prior_role_id': prior_role_ref['id'], 'implied_role_id': implied_role_ref['id'], } self.assertLessEqual( expected_implied_role_ref.items(), implied_role.items() ) PROVIDERS.role_api.delete_implied_role( prior_role_ref['id'], implied_role_ref['id'] ) self.assertRaises( exception.ImpliedRoleNotFound, PROVIDERS.role_api.get_implied_role, uuid.uuid4().hex, uuid.uuid4().hex, ) def test_delete_implied_role_returns_not_found(self): self.assertRaises( exception.ImpliedRoleNotFound, PROVIDERS.role_api.delete_implied_role, uuid.uuid4().hex, uuid.uuid4().hex, ) def test_role_assignments_simple_tree_of_implied_roles(self): """Test that implied roles are expanded out.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [ {'role': 0, 'implied_roles': 1}, {'role': 1, 'implied_roles': [2, 3]}, ], 'assignments': [{'user': 0, 'role': 0, 'project': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment { 'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'project': 0}], }, # Listing in effective mode should show the implied roles # expanded out { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 1, 'project': 0, 'indirect': {'role': 0}, }, { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 1}, }, { 'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}, }, ], }, ], } self.execute_assignment_plan(test_plan) def test_circular_inferences(self): """Test that implied roles are expanded out.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [ {'role': 0, 'implied_roles': [1]}, {'role': 1, 'implied_roles': [2, 3]}, {'role': 3, 'implied_roles': [0]}, ], 'assignments': [{'user': 0, 'role': 0, 'project': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment { 'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'project': 0}], }, # Listing in effective mode should show the implied roles # expanded out { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 0}, { 'user': 0, 'role': 0, 'project': 0, 'indirect': {'role': 3}, }, { 'user': 0, 'role': 1, 'project': 0, 'indirect': {'role': 0}, }, { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 1}, }, { 'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}, }, ], }, ], } self.execute_assignment_plan(test_plan) def test_role_assignments_directed_graph_of_implied_roles(self): """Test that a role can have multiple, different prior roles.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 6}, # Three level tree of implied roles, where one of the roles at the # bottom is implied by more than one top level role 'implied_roles': [ {'role': 0, 'implied_roles': [1, 2]}, {'role': 1, 'implied_roles': [3, 4]}, {'role': 5, 'implied_roles': 4}, ], # The user gets both top level roles 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 5, 'project': 0}, ], 'tests': [ # The implied roles should be expanded out and there should be # two entries for the role that had two different prior roles. { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 5, 'project': 0}, { 'user': 0, 'role': 1, 'project': 0, 'indirect': {'role': 0}, }, { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 0}, }, { 'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}, }, { 'user': 0, 'role': 4, 'project': 0, 'indirect': {'role': 1}, }, { 'user': 0, 'role': 4, 'project': 0, 'indirect': {'role': 5}, }, ], }, ], } test_data = self.execute_assignment_plan(test_plan) # We should also be able to get a similar (yet summarized) answer to # the above by calling get_roles_for_user_and_project(), which should # list the role_ids, yet remove any duplicates role_ids = PROVIDERS.assignment_api.get_roles_for_user_and_project( test_data['users'][0]['id'], test_data['projects'][0]['id'] ) # We should see 6 entries, not 7, since role index 5 appeared twice in # the answer from list_role_assignments self.assertThat(role_ids, matchers.HasLength(6)) for x in range(0, 5): self.assertIn(test_data['roles'][x]['id'], role_ids) def test_role_assignments_implied_roles_filtered_by_role(self): """Test that you can filter by role even if roles are implied.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 2}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [ {'role': 0, 'implied_roles': 1}, {'role': 1, 'implied_roles': [2, 3]}, ], 'assignments': [ {'user': 0, 'role': 0, 'project': 0}, {'user': 0, 'role': 3, 'project': 1}, ], 'tests': [ # List effective roles filtering by one of the implied roles, # showing that the filter was implied post expansion of # implied roles (and that non implied roles are included in # the filter { 'params': {'role': 3, 'effective': True}, 'results': [ { 'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}, }, {'user': 0, 'role': 3, 'project': 1}, ], }, ], } self.execute_assignment_plan(test_plan) def test_role_assignments_simple_tree_of_implied_roles_on_domain(self): """Test that implied roles are expanded out when placed on a domain.""" test_plan = { 'entities': {'domains': {'users': 1}, 'roles': 4}, # Three level tree of implied roles 'implied_roles': [ {'role': 0, 'implied_roles': 1}, {'role': 1, 'implied_roles': [2, 3]}, ], 'assignments': [{'user': 0, 'role': 0, 'domain': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment { 'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'domain': 0}], }, # Listing in effective mode should how the implied roles # expanded out { 'params': {'user': 0, 'effective': True}, 'results': [ {'user': 0, 'role': 0, 'domain': 0}, { 'user': 0, 'role': 1, 'domain': 0, 'indirect': {'role': 0}, }, { 'user': 0, 'role': 2, 'domain': 0, 'indirect': {'role': 1}, }, { 'user': 0, 'role': 3, 'domain': 0, 'indirect': {'role': 1}, }, ], }, ], } self.execute_assignment_plan(test_plan) def test_role_assignments_inherited_implied_roles(self): """Test that you can intermix inherited and implied roles.""" test_plan = { 'entities': {'domains': {'users': 1, 'projects': 1}, 'roles': 4}, # Simply one level of implied roles 'implied_roles': [{'role': 0, 'implied_roles': 1}], # Assign to top level role as an inherited assignment to the # domain 'assignments': [ { 'user': 0, 'role': 0, 'domain': 0, 'inherited_to_projects': True, } ], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment { 'params': {'user': 0}, 'results': [ { 'user': 0, 'role': 0, 'domain': 0, 'inherited_to_projects': 'projects', } ], }, # List in effective mode - we should only see the initial and # implied role on the project (since inherited roles are not # active on their anchor point). { 'params': {'user': 0, 'effective': True}, 'results': [ { 'user': 0, 'role': 0, 'project': 0, 'indirect': {'domain': 0}, }, { 'user': 0, 'role': 1, 'project': 0, 'indirect': {'domain': 0, 'role': 0}, }, ], }, ], } self.execute_assignment_plan(test_plan) def test_role_assignments_domain_specific_with_implied_roles(self): test_plan = { 'entities': { 'domains': {'users': 1, 'projects': 1, 'roles': 2}, 'roles': 2, }, # Two level tree of implied roles, with the top and 1st level being # domain specific roles, and the bottom level being inferred global # roles. 'implied_roles': [ {'role': 0, 'implied_roles': [1]}, {'role': 1, 'implied_roles': [2, 3]}, ], 'assignments': [{'user': 0, 'role': 0, 'project': 0}], 'tests': [ # List all direct assignments for user[0], this should just # show the one top level role assignment, even though this is a # domain specific role (since we are in non-effective mode and # we show any direct role assignment in that mode). { 'params': {'user': 0}, 'results': [{'user': 0, 'role': 0, 'project': 0}], }, # Now the effective ones - so the implied roles should be # expanded out, as well as any domain specific roles should be # removed. { 'params': {'user': 0, 'effective': True}, 'results': [ { 'user': 0, 'role': 2, 'project': 0, 'indirect': {'role': 1}, }, { 'user': 0, 'role': 3, 'project': 0, 'indirect': {'role': 1}, }, ], }, ], } self.execute_assignment_plan(test_plan) class SystemAssignmentTests(AssignmentTestHelperMixin): def test_create_system_grant_for_user(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] role_ref = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_user( user_id, role_ref['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_user( user_id ) self.assertEqual(len(system_roles), 1) self.assertIsNone(system_roles[0]['domain_id']) self.assertEqual(system_roles[0]['id'], role_ref['id']) self.assertEqual(system_roles[0]['name'], role_ref['name']) def test_list_system_grants_for_user(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] first_role = self._create_role() second_role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_user( user_id, first_role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_user( user_id ) self.assertEqual(len(system_roles), 1) PROVIDERS.assignment_api.create_system_grant_for_user( user_id, second_role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_user( user_id ) self.assertEqual(len(system_roles), 2) def test_check_system_grant_for_user(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] role = self._create_role() self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_system_grant_for_user, user_id, role['id'], ) PROVIDERS.assignment_api.create_system_grant_for_user( user_id, role['id'] ) PROVIDERS.assignment_api.check_system_grant_for_user( user_id, role['id'] ) def test_delete_system_grant_for_user(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_user( user_id, role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_user( user_id ) self.assertEqual(len(system_roles), 1) PROVIDERS.assignment_api.delete_system_grant_for_user( user_id, role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_user( user_id ) self.assertEqual(len(system_roles), 0) def test_check_system_grant_for_user_with_invalid_role_fails(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_system_grant_for_user, user_id, uuid.uuid4().hex, ) def test_check_system_grant_for_user_with_invalid_user_fails(self): role = self._create_role() self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_system_grant_for_user, uuid.uuid4().hex, role['id'], ) def test_delete_system_grant_for_user_with_invalid_role_fails(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_user( user_id, role['id'] ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_system_grant_for_user, user_id, uuid.uuid4().hex, ) def test_delete_system_grant_for_user_with_invalid_user_fails(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_user( user_id, role['id'] ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_system_grant_for_user, uuid.uuid4().hex, role['id'], ) def test_list_system_grants_for_user_returns_empty_list(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] system_roles = PROVIDERS.assignment_api.list_system_grants_for_user( user_id ) self.assertFalse(system_roles) def test_create_system_grant_for_user_fails_with_domain_role(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_id = PROVIDERS.identity_api.create_user(user_ref)['id'] role = self._create_role(domain_id=CONF.identity.default_domain_id) self.assertRaises( exception.ValidationError, PROVIDERS.assignment_api.create_system_grant_for_user, user_id, role['id'], ) def test_create_system_grant_for_group(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] role_ref = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_group( group_id, role_ref['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_group( group_id ) self.assertEqual(len(system_roles), 1) self.assertIsNone(system_roles[0]['domain_id']) self.assertEqual(system_roles[0]['id'], role_ref['id']) self.assertEqual(system_roles[0]['name'], role_ref['name']) def test_list_system_grants_for_group(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] first_role = self._create_role() second_role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_group( group_id, first_role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_group( group_id ) self.assertEqual(len(system_roles), 1) PROVIDERS.assignment_api.create_system_grant_for_group( group_id, second_role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_group( group_id ) self.assertEqual(len(system_roles), 2) def test_check_system_grant_for_group(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] role = self._create_role() self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_system_grant_for_group, group_id, role['id'], ) PROVIDERS.assignment_api.create_system_grant_for_group( group_id, role['id'] ) PROVIDERS.assignment_api.check_system_grant_for_group( group_id, role['id'] ) def test_delete_system_grant_for_group(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_group( group_id, role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_group( group_id ) self.assertEqual(len(system_roles), 1) PROVIDERS.assignment_api.delete_system_grant_for_group( group_id, role['id'] ) system_roles = PROVIDERS.assignment_api.list_system_grants_for_group( group_id ) self.assertEqual(len(system_roles), 0) def test_check_system_grant_for_group_with_invalid_role_fails(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_system_grant_for_group, group_id, uuid.uuid4().hex, ) def test_check_system_grant_for_group_with_invalid_group_fails(self): role = self._create_role() self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.check_system_grant_for_group, uuid.uuid4().hex, role['id'], ) def test_delete_system_grant_for_group_with_invalid_role_fails(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_group( group_id, role['id'] ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_system_grant_for_group, group_id, uuid.uuid4().hex, ) def test_delete_system_grant_for_group_with_invalid_group_fails(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] role = self._create_role() PROVIDERS.assignment_api.create_system_grant_for_group( group_id, role['id'] ) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_system_grant_for_group, uuid.uuid4().hex, role['id'], ) def test_list_system_grants_for_group_returns_empty_list(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] system_roles = PROVIDERS.assignment_api.list_system_grants_for_group( group_id ) self.assertFalse(system_roles) def test_create_system_grant_for_group_fails_with_domain_role(self): group_ref = unit.new_group_ref(CONF.identity.default_domain_id) group_id = PROVIDERS.identity_api.create_group(group_ref)['id'] role = self._create_role(CONF.identity.default_domain_id) self.assertRaises( exception.ValidationError, PROVIDERS.assignment_api.create_system_grant_for_group, group_id, role['id'], ) def test_delete_role_with_system_assignments(self): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.new_user_ref(domain_id=domain['id']) user = PROVIDERS.identity_api.create_user(user) # creating a system grant for user PROVIDERS.assignment_api.create_system_grant_for_user( user['id'], role['id'] ) # deleting the role user has on system PROVIDERS.role_api.delete_role(role['id']) system_roles = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertEqual(len(system_roles), 0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/assignment/test_core.py0000664000175000017500000003056400000000000024166 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from keystone.common import provider_api from keystone.common.resource_options import options as ro_opt from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures PROVIDERS = provider_api.ProviderAPIs class RoleTests: def test_get_role_returns_not_found(self): self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.get_role, uuid.uuid4().hex, ) def test_get_unique_role_by_name_returns_not_found(self): self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.get_unique_role_by_name, uuid.uuid4().hex, ) def test_create_duplicate_role_name_fails(self): role_id = uuid.uuid4().hex role = unit.new_role_ref(id=role_id, name='fake1name') PROVIDERS.role_api.create_role(role_id, role) new_role_id = uuid.uuid4().hex role['id'] = new_role_id self.assertRaises( exception.Conflict, PROVIDERS.role_api.create_role, new_role_id, role, ) def test_rename_duplicate_role_name_fails(self): role_id1 = uuid.uuid4().hex role_id2 = uuid.uuid4().hex role1 = unit.new_role_ref(id=role_id1, name='fake1name') role2 = unit.new_role_ref(id=role_id2, name='fake2name') PROVIDERS.role_api.create_role(role_id1, role1) PROVIDERS.role_api.create_role(role_id2, role2) role1['name'] = 'fake2name' self.assertRaises( exception.Conflict, PROVIDERS.role_api.update_role, role_id1, role1 ) def test_role_crud(self): role = unit.new_role_ref() role_name = role['name'] PROVIDERS.role_api.create_role(role['id'], role) role_ref = PROVIDERS.role_api.get_role(role['id']) role_ref_dict = {x: role_ref[x] for x in role_ref} self.assertDictEqual(role, role_ref_dict) role_ref = PROVIDERS.role_api.get_unique_role_by_name(role_name) self.assertEqual(role['id'], role_ref['id']) role['name'] = uuid.uuid4().hex updated_role_ref = PROVIDERS.role_api.update_role(role['id'], role) role_ref = PROVIDERS.role_api.get_role(role['id']) role_ref_dict = {x: role_ref[x] for x in role_ref} self.assertDictEqual(role, role_ref_dict) self.assertDictEqual(role_ref_dict, updated_role_ref) PROVIDERS.role_api.delete_role(role['id']) self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.get_role, role['id'] ) def test_role_crud_without_description(self): role = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': None, 'options': {}, } self.role_api.create_role(role['id'], role) role_ref = self.role_api.get_role(role['id']) role_ref_dict = {x: role_ref[x] for x in role_ref} self.assertIsNone(role_ref_dict['description']) role_ref_dict.pop('description') self.assertDictEqual(role, role_ref_dict) role['name'] = uuid.uuid4().hex updated_role_ref = self.role_api.update_role(role['id'], role) role_ref = self.role_api.get_role(role['id']) role_ref_dict = {x: role_ref[x] for x in role_ref} self.assertIsNone(updated_role_ref['description']) self.assertDictEqual(role_ref_dict, updated_role_ref) self.role_api.delete_role(role['id']) self.assertRaises( exception.RoleNotFound, self.role_api.get_role, role['id'] ) def test_update_role_returns_not_found(self): role = unit.new_role_ref() self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.update_role, role['id'], role, ) def test_list_roles(self): roles = PROVIDERS.role_api.list_roles() self.assertEqual(len(default_fixtures.ROLES), len(roles)) role_ids = {role['id'] for role in roles} expected_role_ids = {role['id'] for role in default_fixtures.ROLES} self.assertEqual(expected_role_ids, role_ids) @unit.skip_if_cache_disabled('role') def test_cache_layer_role_crud(self): role = unit.new_role_ref() role_id = role['id'] # Create role PROVIDERS.role_api.create_role(role_id, role) role_ref = PROVIDERS.role_api.get_role(role_id) updated_role_ref = copy.deepcopy(role_ref) updated_role_ref['name'] = uuid.uuid4().hex # Update role, bypassing the role api manager PROVIDERS.role_api.driver.update_role(role_id, updated_role_ref) # Verify get_role still returns old ref self.assertDictEqual(role_ref, PROVIDERS.role_api.get_role(role_id)) # Invalidate Cache PROVIDERS.role_api.get_role.invalidate(PROVIDERS.role_api, role_id) # Verify get_role returns the new role_ref self.assertDictEqual( updated_role_ref, PROVIDERS.role_api.get_role(role_id) ) # Update role back to original via the assignment api manager PROVIDERS.role_api.update_role(role_id, role_ref) # Verify get_role returns the original role ref self.assertDictEqual(role_ref, PROVIDERS.role_api.get_role(role_id)) # Delete role bypassing the role api manager PROVIDERS.role_api.driver.delete_role(role_id) # Verify get_role still returns the role_ref self.assertDictEqual(role_ref, PROVIDERS.role_api.get_role(role_id)) # Invalidate cache PROVIDERS.role_api.get_role.invalidate(PROVIDERS.role_api, role_id) # Verify RoleNotFound is now raised self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.get_role, role_id ) # recreate role PROVIDERS.role_api.create_role(role_id, role) PROVIDERS.role_api.get_role(role_id) # delete role via the assignment api manager PROVIDERS.role_api.delete_role(role_id) # verity RoleNotFound is now raised self.assertRaises( exception.RoleNotFound, PROVIDERS.role_api.get_role, role_id ) def test_create_role_immutable(self): role = unit.new_role_ref() role_id = role['id'] role['options'][ro_opt.IMMUTABLE_OPT.option_name] = True role_created = PROVIDERS.role_api.create_role(role_id, role) role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertTrue('options' in role_created) self.assertTrue('options' in role_via_manager) self.assertTrue( role_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) self.assertTrue( role_created['options'][ro_opt.IMMUTABLE_OPT.option_name] ) def test_cannot_update_immutable_role(self): role = unit.new_role_ref() role_id = role['id'] role['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.role_api.create_role(role_id, role) update_role = {'name': uuid.uuid4().hex} self.assertRaises( exception.ResourceUpdateForbidden, PROVIDERS.role_api.update_role, role_id, update_role, ) def test_cannot_update_immutable_role_while_unsetting_immutable(self): role = unit.new_role_ref() role_id = role['id'] role['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.role_api.create_role(role_id, role) update_role = { 'name': uuid.uuid4().hex, 'options': {ro_opt.IMMUTABLE_OPT.option_name: True}, } self.assertRaises( exception.ResourceUpdateForbidden, PROVIDERS.role_api.update_role, role_id, update_role, ) def test_cannot_delete_immutable_role(self): role = unit.new_role_ref() role_id = role['id'] role['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.role_api.create_role(role_id, role) self.assertRaises( exception.ResourceDeleteForbidden, PROVIDERS.role_api.delete_role, role_id, ) def test_update_role_set_immutable(self): role = unit.new_role_ref() role_id = role['id'] PROVIDERS.role_api.create_role(role_id, role) update_role = {'options': {ro_opt.IMMUTABLE_OPT.option_name: True}} role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertTrue('options' in role_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in role_via_manager['options'] ) role_update = PROVIDERS.role_api.update_role(role_id, update_role) role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in role_update['options'] ) self.assertTrue( role_update['options'][ro_opt.IMMUTABLE_OPT.option_name] ) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in role_via_manager['options'] ) self.assertTrue( role_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) def test_update_role_set_immutable_with_additional_updates(self): role = unit.new_role_ref() role_id = role['id'] PROVIDERS.role_api.create_role(role_id, role) update_role = { 'name': uuid.uuid4().hex, 'options': {ro_opt.IMMUTABLE_OPT.option_name: True}, } role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertTrue('options' in role_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in role_via_manager['options'] ) role_update = PROVIDERS.role_api.update_role(role_id, update_role) role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertEqual(role_update['name'], update_role['name']) self.assertEqual(role_via_manager['name'], update_role['name']) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in role_update['options'] ) self.assertTrue( role_update['options'][ro_opt.IMMUTABLE_OPT.option_name] ) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in role_via_manager['options'] ) self.assertTrue( role_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) def test_update_role_unset_immutable(self): role = unit.new_role_ref() role_id = role['id'] role['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.role_api.create_role(role_id, role) role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertTrue('options' in role_via_manager) self.assertTrue( role_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) update_role = {'options': {ro_opt.IMMUTABLE_OPT.option_name: False}} PROVIDERS.role_api.update_role(role_id, update_role) role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertTrue('options' in role_via_manager) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in role_via_manager['options'] ) self.assertFalse( role_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) update_role = {'options': {ro_opt.IMMUTABLE_OPT.option_name: None}} role_updated = PROVIDERS.role_api.update_role(role_id, update_role) role_via_manager = PROVIDERS.role_api.get_role(role_id) self.assertTrue('options' in role_updated) self.assertTrue('options' in role_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in role_updated['options'] ) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in role_via_manager['options'] ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.558113 keystone-26.0.0/keystone/tests/unit/auth/0000775000175000017500000000000000000000000020406 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/auth/__init__.py0000664000175000017500000000000000000000000022505 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.558113 keystone-26.0.0/keystone/tests/unit/auth/plugins/0000775000175000017500000000000000000000000022067 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/auth/plugins/__init__.py0000664000175000017500000000000000000000000024166 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/auth/plugins/test_core.py0000664000175000017500000000762700000000000024444 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth import plugins from keystone.tests import unit class TestPluginCore(unit.TestCase): def test_construct_method_map_with_one_methods(self): auth_methods = ['password'] self.config_fixture.config(group='auth', methods=auth_methods) expected_method_map = {1: 'password'} method_map = plugins.construct_method_map_from_config() self.assertDictEqual(expected_method_map, method_map) def test_construct_method_map_with_two_methods(self): auth_methods = ['password', 'token'] self.config_fixture.config(group='auth', methods=auth_methods) expected_method_map = {1: 'password', 2: 'token'} method_map = plugins.construct_method_map_from_config() self.assertDictEqual(expected_method_map, method_map) def test_construct_method_map_with_three_methods(self): auth_methods = ['password', 'token', 'totp'] self.config_fixture.config(group='auth', methods=auth_methods) expected_method_map = {1: 'password', 2: 'token', 4: 'totp'} method_map = plugins.construct_method_map_from_config() self.assertDictEqual(expected_method_map, method_map) def test_convert_methods_to_integer(self): auth_methods = ['password', 'token', 'totp'] self.config_fixture.config(group='auth', methods=auth_methods) method_integer = plugins.convert_method_list_to_integer(['password']) self.assertEqual(1, method_integer) method_integer = plugins.convert_method_list_to_integer( ['password', 'token'] ) self.assertEqual(3, method_integer) method_integer = plugins.convert_method_list_to_integer( ['password', 'totp'] ) self.assertEqual(5, method_integer) method_integer = plugins.convert_method_list_to_integer( ['token', 'totp'] ) self.assertEqual(6, method_integer) method_integer = plugins.convert_method_list_to_integer( ['password', 'token', 'totp'] ) self.assertEqual(7, method_integer) def test_convert_integer_to_methods(self): auth_methods = ['password', 'token', 'totp'] self.config_fixture.config(group='auth', methods=auth_methods) expected_methods = ['password'] methods = plugins.convert_integer_to_method_list(1) self.assertTrue(len(methods) == 1) for method in methods: self.assertIn(method, expected_methods) expected_methods = ['password', 'token'] methods = plugins.convert_integer_to_method_list(3) self.assertTrue(len(methods) == 2) for method in methods: self.assertIn(method, expected_methods) expected_methods = ['password', 'totp'] methods = plugins.convert_integer_to_method_list(5) self.assertTrue(len(methods) == 2) for method in methods: self.assertIn(method, expected_methods) expected_methods = ['token', 'totp'] methods = plugins.convert_integer_to_method_list(6) self.assertTrue(len(methods) == 2) for method in methods: self.assertIn(method, expected_methods) expected_methods = ['password', 'token', 'totp'] methods = plugins.convert_integer_to_method_list(7) self.assertTrue(len(methods) == 3) for method in methods: self.assertIn(method, expected_methods) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/auth/plugins/test_mapped.py0000664000175000017500000001747300000000000024762 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from keystone.assignment.core import Manager as AssignmentApi from keystone.auth.plugins import mapped from keystone.exception import ProjectNotFound from keystone.resource.core import Manager as ResourceApi from keystone.tests import unit class TestMappedPlugin(unit.TestCase): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def setUp(self): super().setUp() self.resource_api_mock = mock.Mock(spec=ResourceApi) self.assignment_api_mock = mock.Mock(spec=AssignmentApi) self.domain_uuid_mock = uuid.uuid4().hex self.domain_mock = {'id': self.domain_uuid_mock} self.idp_domain_uuid_mock = uuid.uuid4().hex self.member_role_id = uuid.uuid4().hex self.member_role_name = "member" self.existing_roles = { self.member_role_name: {'id': self.member_role_id} } self.shadow_project_mock = { 'name': "test-project", 'roles': [{'name': self.member_role_name}], } self.shadow_project_in_domain_mock = { 'name': "test-project-in-domain", 'domain': self.domain_mock, 'roles': [{'name': self.member_role_name}], } self.shadow_projects_mock = [ self.shadow_project_mock, self.shadow_project_in_domain_mock, ] self.user_mock = {'id': uuid.uuid4().hex, 'name': "test-user"} def test_configure_project_domain_no_project_domain(self): mapped.configure_project_domain( self.shadow_project_mock, self.idp_domain_uuid_mock, self.resource_api_mock, ) self.assertIn("domain", self.shadow_project_mock) self.assertEqual( self.idp_domain_uuid_mock, self.shadow_project_mock['domain']['id'] ) def test_configure_project_domain_with_domain_id(self): self.shadow_project_mock['domain'] = self.domain_mock mapped.configure_project_domain( self.shadow_project_mock, self.idp_domain_uuid_mock, self.resource_api_mock, ) self.assertIn("domain", self.shadow_project_mock) self.assertEqual( self.domain_uuid_mock, self.shadow_project_mock['domain']['id'] ) def test_configure_project_domain_with_domain_name(self): domain_name = "test-domain" self.shadow_project_mock['domain'] = {'name': domain_name} self.resource_api_mock.get_domain_by_name.return_value = ( self.domain_mock ) mapped.configure_project_domain( self.shadow_project_mock, self.idp_domain_uuid_mock, self.resource_api_mock, ) self.assertIn("domain", self.shadow_project_mock) self.assertEqual( self.domain_uuid_mock, self.shadow_project_mock['domain']['id'] ) self.resource_api_mock.get_domain_by_name.assert_called_with( domain_name ) def test_handle_projects_from_mapping_project_exists(self): project_mock_1 = self.create_project_mock_for_shadow_project( self.shadow_project_mock ) project_mock_2 = self.create_project_mock_for_shadow_project( self.shadow_project_in_domain_mock ) self.resource_api_mock.get_project_by_name.side_effect = [ project_mock_1, project_mock_2, ] mapped.handle_projects_from_mapping( self.shadow_projects_mock, self.idp_domain_uuid_mock, self.existing_roles, self.user_mock, self.assignment_api_mock, self.resource_api_mock, ) self.resource_api_mock.get_project_by_name.assert_has_calls( [ mock.call( self.shadow_project_in_domain_mock['name'], self.shadow_project_in_domain_mock['domain']['id'], ), mock.call( self.shadow_project_mock['name'], self.idp_domain_uuid_mock ), ], any_order=True, ) self.assignment_api_mock.create_grant.assert_has_calls( [ mock.call( self.member_role_id, user_id=self.user_mock['id'], project_id=project_mock_1['id'], ), mock.call( self.member_role_id, user_id=self.user_mock['id'], project_id=project_mock_2['id'], ), ] ) @mock.patch("uuid.UUID.hex", new_callable=mock.PropertyMock) def test_handle_projects_from_mapping_create_projects(self, uuid_mock): uuid_mock.return_value = "uuid" project_mock_1 = self.create_project_mock_for_shadow_project( self.shadow_project_mock ) project_mock_2 = self.create_project_mock_for_shadow_project( self.shadow_project_in_domain_mock ) self.resource_api_mock.get_project_by_name.side_effect = [ ProjectNotFound(project_id=project_mock_1['name']), ProjectNotFound(project_id=project_mock_2['name']), ] self.resource_api_mock.create_project.side_effect = [ project_mock_1, project_mock_2, ] mapped.handle_projects_from_mapping( self.shadow_projects_mock, self.idp_domain_uuid_mock, self.existing_roles, self.user_mock, self.assignment_api_mock, self.resource_api_mock, ) self.resource_api_mock.get_project_by_name.assert_has_calls( [ mock.call( self.shadow_project_in_domain_mock['name'], self.shadow_project_in_domain_mock['domain']['id'], ), mock.call( self.shadow_project_mock['name'], self.idp_domain_uuid_mock ), ], any_order=True, ) expected_project_ref1 = { 'id': "uuid", 'name': self.shadow_project_mock['name'], 'domain_id': self.idp_domain_uuid_mock, } expected_project_ref2 = { 'id': "uuid", 'name': self.shadow_project_in_domain_mock['name'], 'domain_id': self.shadow_project_in_domain_mock['domain']['id'], } self.resource_api_mock.create_project.assert_has_calls( [ mock.call(expected_project_ref1['id'], expected_project_ref1), mock.call(expected_project_ref2['id'], expected_project_ref2), ] ) self.assignment_api_mock.create_grant.assert_has_calls( [ mock.call( self.member_role_id, user_id=self.user_mock['id'], project_id=project_mock_1['id'], ), mock.call( self.member_role_id, user_id=self.user_mock['id'], project_id=project_mock_2['id'], ), ] ) def create_project_mock_for_shadow_project(self, shadow_project): project = shadow_project.copy() project['id'] = uuid.uuid4().hex return project ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/auth/test_controllers.py0000664000175000017500000000544500000000000024375 0ustar00zuulzuul00000000000000# Copyright 2015 IBM Corp. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import fixtures from oslo_config import cfg from oslo_config import fixture as config_fixture import stevedore from stevedore import extension from keystone.auth import core from keystone.tests import unit class TestLoadAuthMethod(unit.BaseTestCase): def test_entrypoint_works(self): method = uuid.uuid4().hex plugin_name = self.getUniqueString() # Register the method using the given plugin cf = self.useFixture(config_fixture.Config()) cf.register_opt(cfg.StrOpt(method), group='auth') cf.config(group='auth', **{method: plugin_name}) # Setup stevedore.DriverManager to return a driver for the plugin extension_ = extension.Extension( plugin_name, entry_point=mock.sentinel.entry_point, plugin=mock.sentinel.plugin, obj=mock.sentinel.driver, ) auth_plugin_namespace = 'keystone.auth.%s' % method fake_driver_manager = stevedore.DriverManager.make_test_instance( extension_, namespace=auth_plugin_namespace ) driver_manager_mock = self.useFixture( fixtures.MockPatchObject( stevedore, 'DriverManager', return_value=fake_driver_manager ) ).mock driver = core.load_auth_method(method) self.assertEqual(auth_plugin_namespace, fake_driver_manager.namespace) driver_manager_mock.assert_called_once_with( auth_plugin_namespace, plugin_name, invoke_on_load=True ) self.assertIs(mock.sentinel.driver, driver) def test_entrypoint_fails(self): method = uuid.uuid4().hex plugin_name = self.getUniqueString() # Register the method using the given plugin cf = self.useFixture(config_fixture.Config()) cf.register_opt(cfg.StrOpt(method), group='auth') cf.config(group='auth', **{method: plugin_name}) # stevedore.DriverManager raises RuntimeError if it can't load the # driver. self.useFixture( fixtures.MockPatchObject( stevedore, 'DriverManager', side_effect=RuntimeError ) ) self.assertRaises(RuntimeError, core.load_auth_method, method) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/auth/test_schema.py0000664000175000017500000003644700000000000023275 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.auth import schema from keystone import exception from keystone.tests import unit class TestValidateIssueTokenAuth(unit.BaseTestCase): def _expect_failure(self, post_data): self.assertRaises( exception.SchemaValidationError, schema.validate_issue_token_auth, post_data, ) def test_auth_not_object_ex(self): self._expect_failure('something') def test_auth_no_identity_ex(self): self._expect_failure({}) def test_identity_not_object_ex(self): self._expect_failure({'identity': 'something'}) def test_no_methods_ex(self): self._expect_failure({'identity': {}}) def test_methods_not_array_ex(self): p = {'identity': {'methods': 'something'}} self._expect_failure(p) def test_methods_not_array_str_ex(self): p = {'identity': {'methods': [{}]}} self._expect_failure(p) def test_no_auth_plugin_parameters(self): # auth plugin (password / token) may not be present. post_data = { 'identity': { 'methods': ['password'], }, } schema.validate_issue_token_auth(post_data) def test_password_not_object_ex(self): # if password is present, it must be an object. p = { 'identity': { 'methods': ['password'], 'password': 'something', }, } self._expect_failure(p) def test_password_user_not_object_ex(self): # if user is present, it must be an object p = { 'identity': { 'methods': ['password'], 'password': { 'user': 'something', }, }, } self._expect_failure(p) def test_password_user_name_not_string_ex(self): # if user name is present, it must be a string p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': 1, }, }, }, } self._expect_failure(p) def test_password_user_id_not_string_ex(self): # if user id is present, it must be a string p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'id': {}, }, }, }, } self._expect_failure(p) def test_password_no_user_id_or_name_ex(self): # either user id or name must be present. p = { 'identity': { 'methods': ['password'], 'password': { 'user': {}, }, }, } self._expect_failure(p) def test_password_user_password_not_string_ex(self): # if user password is present, it must be a string p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'id': 'something', 'password': {}, }, }, }, } self._expect_failure(p) def test_password_user_domain_not_object_ex(self): # if user domain is present, it must be an object p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'id': 'something', 'domain': 'something', }, }, }, } self._expect_failure(p) def test_password_user_domain_no_id_or_name_ex(self): # user domain must have id or name. p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'id': 'something', 'domain': {}, }, }, }, } self._expect_failure(p) def test_password_user_domain_name_not_string_ex(self): # if user domain name is present, it must be a string. p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'id': 'something', 'domain': {'name': {}}, }, }, }, } self._expect_failure(p) def test_password_user_domain_id_not_string_ex(self): # if user domain id is present, it must be a string. p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'id': 'something', 'domain': {'id': {}}, }, }, }, } self._expect_failure(p) def test_token(self): # valid token auth plugin data is supported. p = { 'identity': { 'methods': ['token'], 'token': { 'id': 'something', }, }, } schema.validate_issue_token_auth(p) def test_token_not_object_ex(self): # if token auth plugin data is present, it must be an object. p = { 'identity': { 'methods': ['token'], 'token': '', }, } self._expect_failure(p) def test_token_no_id_ex(self): # if token auth plugin data is present, id must be present. p = { 'identity': { 'methods': ['token'], 'token': {}, }, } self._expect_failure(p) def test_token_id_not_string_ex(self): # if token auth plugin data is present, id must be a string. p = { 'identity': { 'methods': ['token'], 'token': { 'id': 123, }, }, } self._expect_failure(p) def test_scope_not_object_or_string_ex(self): p = { 'identity': { 'methods': [], }, 'scope': 1, } self._expect_failure(p) def test_project_not_object_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': 'something', }, } self._expect_failure(p) def test_project_name_not_string_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': { 'name': {}, }, }, } self._expect_failure(p) def test_project_id_not_string_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': { 'id': {}, }, }, } self._expect_failure(p) def test_project_no_id_or_name_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': {}, }, } self._expect_failure(p) def test_project_domain_not_object_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': { 'id': 'something', 'domain': 'something', }, }, } self._expect_failure(p) def test_project_domain_name_not_string_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': { 'id': 'something', 'domain': { 'name': {}, }, }, }, } self._expect_failure(p) def test_project_domain_id_not_string_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': { 'id': 'something', 'domain': { 'id': {}, }, }, }, } self._expect_failure(p) def test_project_domain_no_id_or_name_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'project': { 'id': 'something', 'domain': {}, }, }, } self._expect_failure(p) def test_domain_not_object_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'domain': 'something', }, } self._expect_failure(p) def test_domain_id_not_string_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'domain': { 'id': {}, }, }, } self._expect_failure(p) def test_domain_name_not_string_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'domain': { 'name': {}, }, }, } self._expect_failure(p) def test_domain_no_id_or_name_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'domain': {}, }, } self._expect_failure(p) def test_trust_not_object_ex(self): p = { 'identity': { 'methods': [], }, 'scope': { 'OS-TRUST:trust': 'something', }, } self._expect_failure(p) def test_unscoped(self): post_data = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': 'admin', 'domain': { 'name': 'Default', }, 'password': 'devstacker', }, }, }, } schema.validate_issue_token_auth(post_data) def test_user_domain_id(self): post_data = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': 'admin', 'domain': { 'id': 'default', }, 'password': 'devstacker', }, }, }, } schema.validate_issue_token_auth(post_data) def test_two_methods(self): post_data = { 'identity': { 'methods': ['password', 'mapped'], 'password': { 'user': { 'name': 'admin', 'domain': { 'name': 'Default', }, 'password': 'devstacker', }, }, }, } schema.validate_issue_token_auth(post_data) def test_project_scoped(self): post_data = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': 'admin', 'domain': { 'name': 'Default', }, 'password': 'devstacker', }, }, }, 'scope': { 'project': { 'name': 'demo', 'domain': { 'name': 'Default', }, }, }, } schema.validate_issue_token_auth(post_data) def test_domain_scoped(self): post_data = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': 'admin', 'domain': { 'name': 'Default', }, 'password': 'devstacker', }, }, }, 'scope': { 'domain': { 'name': 'Default', }, }, } schema.validate_issue_token_auth(post_data) def test_explicit_unscoped(self): post_data = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': 'admin', 'domain': { 'name': 'Default', }, 'password': 'devstacker', }, }, }, 'scope': 'unscoped', } schema.validate_issue_token_auth(post_data) def test_additional_properties(self): # Everything can have extra properties and they're ignored. p = { 'identity': { 'methods': ['password'], 'password': { 'user': { 'id': 'whatever', 'extra4': 'whatever4', 'domain': { 'id': 'whatever', 'extra5': 'whatever5', }, }, 'extra3': 'whatever3', }, 'token': { 'id': 'something', 'extra9': 'whatever9', }, 'extra4': 'whatever4', }, 'scope': { 'project': { 'id': 'something', 'domain': { 'id': 'something', 'extra8': 'whatever8', }, 'extra7': 'whatever7', }, 'domain': { 'id': 'something', 'extra9': 'whatever9', }, 'extra6': 'whatever6', }, 'extra2': 'whatever2', } schema.validate_issue_token_auth(p) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.558113 keystone-26.0.0/keystone/tests/unit/backend/0000775000175000017500000000000000000000000021034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/backend/__init__.py0000664000175000017500000000000000000000000023133 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/backend/core_ldap.py0000664000175000017500000001120100000000000023331 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldap from keystone.common import cache from keystone.common import provider_api import keystone.conf from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.ksfixtures import ldapdb CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def create_group_container(identity_api): # Create the groups base entry (ou=Groups,cn=example,cn=com) group_api = identity_api.driver.group conn = group_api.get_connection() dn = 'ou=Groups,cn=example,cn=com' conn.add_s( dn, [('objectclass', ['organizationalUnit']), ('ou', ['Groups'])] ) class BaseBackendLdapCommon: """Mixin class to set up generic LDAP backends.""" def setUp(self): super().setUp() self.useFixture(ldapdb.LDAPDatabase()) self.load_backends() self.load_fixtures(default_fixtures) def _get_domain_fixture(self): """Return the static domain, since domains in LDAP are read-only.""" return PROVIDERS.resource_api.get_domain( CONF.identity.default_domain_id ) def get_config(self, domain_id): # Only one conf structure unless we are using separate domain backends return CONF def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def get_user_enabled_vals(self, user): user_dn = PROVIDERS.identity_api.driver.user._id_to_dn_string( user['id'] ) enabled_attr_name = CONF.ldap.user_enabled_attribute ldap_ = PROVIDERS.identity_api.driver.user.get_connection() res = ldap_.search_s( user_dn, ldap.SCOPE_BASE, '(sn=%s)' % user['name'] ) if enabled_attr_name in res[0][1]: return res[0][1][enabled_attr_name] else: return None class BaseBackendLdap: """Mixin class to set up an all-LDAP configuration.""" def setUp(self): # NOTE(dstanek): The database must be setup prior to calling the # parent's setUp. The parent's setUp uses services (like # credentials) that require a database. self.useFixture(database.Database()) super().setUp() def load_fixtures(self, fixtures): # Override super impl since need to create group container. create_group_container(PROVIDERS.identity_api) super().load_fixtures(fixtures) class BaseBackendLdapIdentitySqlEverythingElse(unit.SQLDriverOverrides): """Mixin base for Identity LDAP, everything else SQL backend tests.""" def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def setUp(self): sqldb = self.useFixture(database.Database()) super().setUp() self.load_backends() cache.configure_cache() sqldb.recreate() self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='resource', driver='sql') self.config_fixture.config(group='assignment', driver='sql') class BaseBackendLdapIdentitySqlEverythingElseWithMapping: """Mixin base class to test mapping of default LDAP backend. The default configuration is not to enable mapping when using a single backend LDAP driver. However, a cloud provider might want to enable the mapping, hence hiding the LDAP IDs from any clients of keystone. Setting backward_compatible_ids to False will enable this mapping. """ def config_overrides(self): super().config_overrides() self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/backend/core_sql.py0000664000175000017500000000344600000000000023224 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sqlalchemy from keystone.common import sql from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database class BaseBackendSqlTests(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super().setUp() self.database_fixture = self.useFixture(database.Database()) self.load_backends() # populate the engine with tables & fixtures self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files class BaseBackendSqlModels(BaseBackendSqlTests): def load_table(self, name): table = sqlalchemy.Table( name, sql.ModelBase.metadata, autoload_with=self.database_fixture.engine, ) return table def assertExpectedSchema(self, table, cols): table = self.load_table(table) for col, type_, length in cols: self.assertIsInstance(table.c[col].type, type_) if length: self.assertEqual(length, table.c[col].type.length) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/base_classes.py0000664000175000017500000000621500000000000022452 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as config_fixture from keystone.cmd import bootstrap from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.tests.unit import core from keystone.tests.unit import default_fixtures from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestCaseWithBootstrap(core.BaseTestCase): """A simpler version of TestCase that uses bootstrap. Re-implementation of TestCase that doesn't load a bunch of fixtures by hand and instead uses the bootstrap process. This makes it so that our base tests have the same things available to us as operators after they run bootstrap. It also makes our tests DRY and pushes setup required for specific tests into the actual test class, instead of pushing it into a generic structure that gets loaded for every test. """ def setUp(self): self.useFixture(database.Database()) super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) CONF(args=[], project='keystone') self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_receipts', CONF.fernet_receipts.max_active_keys, ) ) self.bootstrapper = bootstrap.Bootstrapper() self.addCleanup(provider_api.ProviderAPIs._clear_registry_instances) self.addCleanup(self.clean_default_domain) self.bootstrapper.admin_password = 'password' self.bootstrapper.admin_username = 'admin' self.bootstrapper.project_name = 'admin' self.bootstrapper.admin_role_name = 'admin' self.bootstrapper.service_name = 'keystone' self.bootstrapper.public_url = 'http://localhost/identity/' self.bootstrapper.immutable_roles = True try: PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN, ) except exception.Conflict: pass self.bootstrapper.bootstrap() def clean_default_domain(self): PROVIDERS.resource_api.update_domain( CONF.identity.default_domain_id, {'enabled': False} ) PROVIDERS.resource_api.delete_domain(CONF.identity.default_domain_id) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.558113 keystone-26.0.0/keystone/tests/unit/catalog/0000775000175000017500000000000000000000000021057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/catalog/__init__.py0000664000175000017500000000000000000000000023156 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/catalog/test_backends.py0000664000175000017500000006346300000000000024256 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import uuid from testtools import matchers from keystone.catalog.backends import base from keystone.common import driver_hints from keystone.common import provider_api from keystone import exception from keystone.tests import unit PROVIDERS = provider_api.ProviderAPIs class CatalogTests: _legacy_endpoint_id_in_endpoint = True _enabled_default_to_true_when_creating_endpoint = False def test_region_crud(self): # create region_id = 'default' new_region = unit.new_region_ref(id=region_id) res = PROVIDERS.catalog_api.create_region(new_region) # Ensure that we don't need to have a # parent_region_id in the original supplied # ref dict, but that it will be returned from # the endpoint, with None value. expected_region = new_region.copy() expected_region['parent_region_id'] = None self.assertDictEqual(expected_region, res) # Test adding another region with the one above # as its parent. We will check below whether deleting # the parent successfully deletes any child regions. parent_region_id = region_id new_region = unit.new_region_ref(parent_region_id=parent_region_id) region_id = new_region['id'] res = PROVIDERS.catalog_api.create_region(new_region) self.assertDictEqual(new_region, res) # list regions = PROVIDERS.catalog_api.list_regions() self.assertThat(regions, matchers.HasLength(2)) region_ids = [x['id'] for x in regions] self.assertIn(parent_region_id, region_ids) self.assertIn(region_id, region_ids) # update region_desc_update = {'description': uuid.uuid4().hex} res = PROVIDERS.catalog_api.update_region( region_id, region_desc_update ) expected_region = new_region.copy() expected_region['description'] = region_desc_update['description'] self.assertDictEqual(expected_region, res) # delete PROVIDERS.catalog_api.delete_region(parent_region_id) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.delete_region, parent_region_id, ) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, parent_region_id, ) # Ensure the child is also gone... self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_id, ) def _create_region_with_parent_id(self, parent_id=None): new_region = unit.new_region_ref(parent_region_id=parent_id) PROVIDERS.catalog_api.create_region(new_region) return new_region def test_list_regions_filtered_by_parent_region_id(self): new_region = self._create_region_with_parent_id() parent_id = new_region['id'] new_region = self._create_region_with_parent_id(parent_id) new_region = self._create_region_with_parent_id(parent_id) # filter by parent_region_id hints = driver_hints.Hints() hints.add_filter('parent_region_id', parent_id) regions = PROVIDERS.catalog_api.list_regions(hints) for region in regions: self.assertEqual(parent_id, region['parent_region_id']) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_region_crud(self): new_region = unit.new_region_ref() region_id = new_region['id'] PROVIDERS.catalog_api.create_region(new_region.copy()) updated_region = copy.deepcopy(new_region) updated_region['description'] = uuid.uuid4().hex # cache the result PROVIDERS.catalog_api.get_region(region_id) # update the region bypassing catalog_api PROVIDERS.catalog_api.driver.update_region(region_id, updated_region) self.assertLessEqual( new_region.items(), PROVIDERS.catalog_api.get_region(region_id).items(), ) PROVIDERS.catalog_api.get_region.invalidate( PROVIDERS.catalog_api, region_id ) self.assertLessEqual( updated_region.items(), PROVIDERS.catalog_api.get_region(region_id).items(), ) # delete the region PROVIDERS.catalog_api.driver.delete_region(region_id) # still get the old region self.assertLessEqual( updated_region.items(), PROVIDERS.catalog_api.get_region(region_id).items(), ) PROVIDERS.catalog_api.get_region.invalidate( PROVIDERS.catalog_api, region_id ) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_id, ) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_region(self): new_region = unit.new_region_ref() region_id = new_region['id'] PROVIDERS.catalog_api.create_region(new_region) # cache the region PROVIDERS.catalog_api.get_region(region_id) # update the region via catalog_api new_description = {'description': uuid.uuid4().hex} PROVIDERS.catalog_api.update_region(region_id, new_description) # assert that we can get the new region current_region = PROVIDERS.catalog_api.get_region(region_id) self.assertEqual( new_description['description'], current_region['description'] ) def test_update_region_extras(self): new_region = unit.new_region_ref() region_id = new_region['id'] PROVIDERS.catalog_api.create_region(new_region) email = 'keystone@openstack.org' new_ref = {'description': uuid.uuid4().hex, 'email': email} PROVIDERS.catalog_api.update_region(region_id, new_ref) current_region = PROVIDERS.catalog_api.get_region(region_id) self.assertEqual(email, current_region['email']) def test_create_region_with_duplicate_id(self): new_region = unit.new_region_ref() PROVIDERS.catalog_api.create_region(new_region) # Create region again with duplicate id self.assertRaises( exception.Conflict, PROVIDERS.catalog_api.create_region, new_region ) def test_get_region_returns_not_found(self): self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, uuid.uuid4().hex, ) def test_delete_region_returns_not_found(self): self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.delete_region, uuid.uuid4().hex, ) def test_create_region_invalid_parent_region_returns_not_found(self): new_region = unit.new_region_ref(parent_region_id=uuid.uuid4().hex) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.create_region, new_region, ) def test_avoid_creating_circular_references_in_regions_update(self): region_one = self._create_region_with_parent_id() # self circle: region_one->region_one self.assertRaises( exception.CircularRegionHierarchyError, PROVIDERS.catalog_api.update_region, region_one['id'], {'parent_region_id': region_one['id']}, ) # region_one->region_two->region_one region_two = self._create_region_with_parent_id(region_one['id']) self.assertRaises( exception.CircularRegionHierarchyError, PROVIDERS.catalog_api.update_region, region_one['id'], {'parent_region_id': region_two['id']}, ) # region_one region_two->region_three->region_four->region_two region_three = self._create_region_with_parent_id(region_two['id']) region_four = self._create_region_with_parent_id(region_three['id']) self.assertRaises( exception.CircularRegionHierarchyError, PROVIDERS.catalog_api.update_region, region_two['id'], {'parent_region_id': region_four['id']}, ) @mock.patch.object( base.CatalogDriverBase, "_ensure_no_circle_in_hierarchical_regions" ) def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle): # turn off the enforcement so that cycles can be created for the test mock_ensure_on_circle.return_value = None region_one = self._create_region_with_parent_id() # self circle: region_one->region_one PROVIDERS.catalog_api.update_region( region_one['id'], {'parent_region_id': region_one['id']} ) PROVIDERS.catalog_api.delete_region(region_one['id']) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_one['id'], ) # region_one->region_two->region_one region_one = self._create_region_with_parent_id() region_two = self._create_region_with_parent_id(region_one['id']) PROVIDERS.catalog_api.update_region( region_one['id'], {'parent_region_id': region_two['id']} ) PROVIDERS.catalog_api.delete_region(region_one['id']) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_one['id'], ) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_two['id'], ) # region_one->region_two->region_three->region_one region_one = self._create_region_with_parent_id() region_two = self._create_region_with_parent_id(region_one['id']) region_three = self._create_region_with_parent_id(region_two['id']) PROVIDERS.catalog_api.update_region( region_one['id'], {'parent_region_id': region_three['id']} ) PROVIDERS.catalog_api.delete_region(region_two['id']) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_two['id'], ) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_one['id'], ) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.get_region, region_three['id'], ) def test_service_crud(self): # create new_service = unit.new_service_ref() service_id = new_service['id'] res = PROVIDERS.catalog_api.create_service(service_id, new_service) self.assertDictEqual(new_service, res) # list services = PROVIDERS.catalog_api.list_services() self.assertIn(service_id, [x['id'] for x in services]) # update service_name_update = {'name': uuid.uuid4().hex} res = PROVIDERS.catalog_api.update_service( service_id, service_name_update ) expected_service = new_service.copy() expected_service['name'] = service_name_update['name'] self.assertDictEqual(expected_service, res) # delete PROVIDERS.catalog_api.delete_service(service_id) self.assertRaises( exception.ServiceNotFound, PROVIDERS.catalog_api.delete_service, service_id, ) self.assertRaises( exception.ServiceNotFound, PROVIDERS.catalog_api.get_service, service_id, ) def _create_random_service(self): new_service = unit.new_service_ref() service_id = new_service['id'] return PROVIDERS.catalog_api.create_service(service_id, new_service) def test_service_filtering(self): target_service = self._create_random_service() unrelated_service1 = self._create_random_service() unrelated_service2 = self._create_random_service() # filter by type hint_for_type = driver_hints.Hints() hint_for_type.add_filter(name="type", value=target_service['type']) services = PROVIDERS.catalog_api.list_services(hint_for_type) self.assertEqual(1, len(services)) filtered_service = services[0] self.assertEqual(target_service['type'], filtered_service['type']) self.assertEqual(target_service['id'], filtered_service['id']) # filter should have been removed, since it was already used by the # backend self.assertEqual(0, len(hint_for_type.filters)) # the backend shouldn't filter by name, since this is handled by the # front end hint_for_name = driver_hints.Hints() hint_for_name.add_filter(name="name", value=target_service['name']) services = PROVIDERS.catalog_api.list_services(hint_for_name) self.assertEqual(3, len(services)) # filter should still be there, since it wasn't used by the backend self.assertEqual(1, len(hint_for_name.filters)) PROVIDERS.catalog_api.delete_service(target_service['id']) PROVIDERS.catalog_api.delete_service(unrelated_service1['id']) PROVIDERS.catalog_api.delete_service(unrelated_service2['id']) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_service_crud(self): new_service = unit.new_service_ref() service_id = new_service['id'] res = PROVIDERS.catalog_api.create_service(service_id, new_service) self.assertDictEqual(new_service, res) PROVIDERS.catalog_api.get_service(service_id) updated_service = copy.deepcopy(new_service) updated_service['description'] = uuid.uuid4().hex # update bypassing catalog api PROVIDERS.catalog_api.driver.update_service( service_id, updated_service ) self.assertLessEqual( new_service.items(), PROVIDERS.catalog_api.get_service(service_id).items(), ) PROVIDERS.catalog_api.get_service.invalidate( PROVIDERS.catalog_api, service_id ) self.assertLessEqual( updated_service.items(), PROVIDERS.catalog_api.get_service(service_id).items(), ) # delete bypassing catalog api PROVIDERS.catalog_api.driver.delete_service(service_id) self.assertLessEqual( updated_service.items(), PROVIDERS.catalog_api.get_service(service_id).items(), ) PROVIDERS.catalog_api.get_service.invalidate( PROVIDERS.catalog_api, service_id ) self.assertRaises( exception.ServiceNotFound, PROVIDERS.catalog_api.delete_service, service_id, ) self.assertRaises( exception.ServiceNotFound, PROVIDERS.catalog_api.get_service, service_id, ) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_service(self): new_service = unit.new_service_ref() service_id = new_service['id'] PROVIDERS.catalog_api.create_service(service_id, new_service) # cache the service PROVIDERS.catalog_api.get_service(service_id) # update the service via catalog api new_type = {'type': uuid.uuid4().hex} PROVIDERS.catalog_api.update_service(service_id, new_type) # assert that we can get the new service current_service = PROVIDERS.catalog_api.get_service(service_id) self.assertEqual(new_type['type'], current_service['type']) def test_delete_service_with_endpoint(self): # create a service service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service endpoint = unit.new_endpoint_ref( service_id=service['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) # deleting the service should also delete the endpoint PROVIDERS.catalog_api.delete_service(service['id']) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.get_endpoint, endpoint['id'], ) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.delete_endpoint, endpoint['id'], ) def test_cache_layer_delete_service_with_endpoint(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service endpoint = unit.new_endpoint_ref( service_id=service['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) # cache the result PROVIDERS.catalog_api.get_service(service['id']) PROVIDERS.catalog_api.get_endpoint(endpoint['id']) # delete the service bypassing catalog api PROVIDERS.catalog_api.driver.delete_service(service['id']) self.assertLessEqual( endpoint.items(), PROVIDERS.catalog_api.get_endpoint(endpoint['id']).items(), ) self.assertLessEqual( service.items(), PROVIDERS.catalog_api.get_service(service['id']).items(), ) PROVIDERS.catalog_api.get_endpoint.invalidate( PROVIDERS.catalog_api, endpoint['id'] ) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.get_endpoint, endpoint['id'], ) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.delete_endpoint, endpoint['id'], ) # multiple endpoints associated with a service second_endpoint = unit.new_endpoint_ref( service_id=service['id'], region_id=None ) PROVIDERS.catalog_api.create_service(service['id'], service) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) PROVIDERS.catalog_api.create_endpoint( second_endpoint['id'], second_endpoint ) PROVIDERS.catalog_api.delete_service(service['id']) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.get_endpoint, endpoint['id'], ) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.delete_endpoint, endpoint['id'], ) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.get_endpoint, second_endpoint['id'], ) self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.delete_endpoint, second_endpoint['id'], ) def test_get_service_returns_not_found(self): self.assertRaises( exception.ServiceNotFound, PROVIDERS.catalog_api.get_service, uuid.uuid4().hex, ) def test_delete_service_returns_not_found(self): self.assertRaises( exception.ServiceNotFound, PROVIDERS.catalog_api.delete_service, uuid.uuid4().hex, ) def test_create_endpoint_nonexistent_service(self): endpoint = unit.new_endpoint_ref( service_id=uuid.uuid4().hex, region_id=None ) self.assertRaises( exception.ValidationError, PROVIDERS.catalog_api.create_endpoint, endpoint['id'], endpoint, ) def test_update_endpoint_nonexistent_service(self): dummy_service, enabled_endpoint, dummy_disabled_endpoint = ( self._create_endpoints() ) new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex) self.assertRaises( exception.ValidationError, PROVIDERS.catalog_api.update_endpoint, enabled_endpoint['id'], new_endpoint, ) def test_create_endpoint_nonexistent_region(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref(service_id=service['id']) self.assertRaises( exception.ValidationError, PROVIDERS.catalog_api.create_endpoint, endpoint['id'], endpoint, ) def test_update_endpoint_nonexistent_region(self): dummy_service, enabled_endpoint, dummy_disabled_endpoint = ( self._create_endpoints() ) new_endpoint = unit.new_endpoint_ref(service_id=uuid.uuid4().hex) self.assertRaises( exception.ValidationError, PROVIDERS.catalog_api.update_endpoint, enabled_endpoint['id'], new_endpoint, ) def test_get_endpoint_returns_not_found(self): self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.get_endpoint, uuid.uuid4().hex, ) def test_delete_endpoint_returns_not_found(self): self.assertRaises( exception.EndpointNotFound, PROVIDERS.catalog_api.delete_endpoint, uuid.uuid4().hex, ) def test_create_endpoint(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref( service_id=service['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint.copy()) def test_update_endpoint(self): dummy_service_ref, endpoint_ref, dummy_disabled_endpoint_ref = ( self._create_endpoints() ) res = PROVIDERS.catalog_api.update_endpoint( endpoint_ref['id'], {'interface': 'private'} ) expected_endpoint = endpoint_ref.copy() expected_endpoint['enabled'] = True expected_endpoint['interface'] = 'private' if self._legacy_endpoint_id_in_endpoint: expected_endpoint['legacy_endpoint_id'] = None if self._enabled_default_to_true_when_creating_endpoint: expected_endpoint['enabled'] = True self.assertDictEqual(expected_endpoint, res) def _create_endpoints(self): # Creates a service and 2 endpoints for the service in the same region. # The 'public' interface is enabled and the 'internal' interface is # disabled. def create_endpoint(service_id, region, **kwargs): ref = unit.new_endpoint_ref( service_id=service_id, region_id=region, url='http://localhost/%s' % uuid.uuid4().hex, **kwargs ) PROVIDERS.catalog_api.create_endpoint(ref['id'], ref) return ref # Create a service for use with the endpoints. service_ref = unit.new_service_ref() service_id = service_ref['id'] PROVIDERS.catalog_api.create_service(service_id, service_ref) region = unit.new_region_ref() PROVIDERS.catalog_api.create_region(region) # Create endpoints enabled_endpoint_ref = create_endpoint(service_id, region['id']) disabled_endpoint_ref = create_endpoint( service_id, region['id'], enabled=False, interface='internal' ) return service_ref, enabled_endpoint_ref, disabled_endpoint_ref def test_list_endpoints(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) expected_ids = {uuid.uuid4().hex for _ in range(3)} for endpoint_id in expected_ids: endpoint = unit.new_endpoint_ref( service_id=service['id'], id=endpoint_id, region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) endpoints = PROVIDERS.catalog_api.list_endpoints() self.assertEqual(expected_ids, {e['id'] for e in endpoints}) def test_get_v3_catalog_endpoint_disabled(self): """Get back only enabled endpoints when get the v3 catalog.""" enabled_endpoint_ref = self._create_endpoints()[1] user_id = uuid.uuid4().hex # Use the project created by the default fixture since the project # should exist if we want to filter the catalog by the project or # replace the url with a valid project id. catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.project_bar['id'] ) endpoint_ids = [x['id'] for x in catalog[0]['endpoints']] self.assertEqual([enabled_endpoint_ref['id']], endpoint_ids) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_endpoint(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service endpoint = unit.new_endpoint_ref( service_id=service['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) # cache the endpoint PROVIDERS.catalog_api.get_endpoint(endpoint['id']) # update the endpoint via catalog api new_url = {'url': uuid.uuid4().hex} PROVIDERS.catalog_api.update_endpoint(endpoint['id'], new_url) # assert that we can get the new endpoint current_endpoint = PROVIDERS.catalog_api.get_endpoint(endpoint['id']) self.assertEqual(new_url['url'], current_endpoint['url']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/catalog/test_core.py0000664000175000017500000000760500000000000023430 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import utils from keystone import exception from keystone.tests import unit class FormatUrlTests(unit.BaseTestCase): def test_successful_formatting(self): url_template = ( 'http://server:9090/$(tenant_id)s/$(user_id)s/$(project_id)s' ) project_id = uuid.uuid4().hex values = {'tenant_id': 'A', 'user_id': 'B', 'project_id': project_id} actual_url = utils.format_url(url_template, values) expected_url = f'http://server:9090/A/B/{project_id}' self.assertEqual(expected_url, actual_url) def test_raises_malformed_on_missing_key(self): self.assertRaises( exception.MalformedEndpoint, utils.format_url, "http://server:9090/$(tenant_id)s", {}, ) def test_raises_malformed_on_wrong_type(self): self.assertRaises( exception.MalformedEndpoint, utils.format_url, "http://server:9090/$(tenant_id)d", {"tenant_id": 'A'}, ) def test_raises_malformed_on_incomplete_format(self): self.assertRaises( exception.MalformedEndpoint, utils.format_url, "http://server:9090/$(tenant_id)", {"tenant_id": 'A'}, ) def test_formatting_a_non_string(self): def _test(url_template): self.assertRaises( exception.MalformedEndpoint, utils.format_url, url_template, {} ) _test(None) _test(object()) def test_substitution_with_key_not_allowed(self): # If the url template contains a substitution that's not in the allowed # list then MalformedEndpoint is raised. # For example, admin_token isn't allowed. url_template = ( 'http://server:9090/$(project_id)s/$(user_id)s/$(admin_token)s' ) values = {'user_id': 'B', 'admin_token': 'C'} self.assertRaises( exception.MalformedEndpoint, utils.format_url, url_template, values ) def test_substitution_with_allowed_tenant_keyerror(self): # No value of 'tenant_id' is passed into url_template. # mod: format_url will return None instead of raising # "MalformedEndpoint" exception. # This is intentional behavior since we don't want to skip # all the later endpoints once there is an URL of endpoint # trying to replace 'tenant_id' with None. url_template = 'http://server:9090/$(tenant_id)s/$(user_id)s' values = {'user_id': 'B'} self.assertIsNone( utils.format_url( url_template, values, silent_keyerror_failures=['tenant_id'] ) ) def test_substitution_with_allowed_project_keyerror(self): # No value of 'project_id' is passed into url_template. # mod: format_url will return None instead of raising # "MalformedEndpoint" exception. # This is intentional behavior since we don't want to skip # all the later endpoints once there is an URL of endpoint # trying to replace 'project_id' with None. url_template = 'http://server:9090/$(project_id)s/$(user_id)s' values = {'user_id': 'B'} self.assertIsNone( utils.format_url( url_template, values, silent_keyerror_failures=['project_id'] ) ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/common/0000775000175000017500000000000000000000000020735 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/__init__.py0000664000175000017500000000000000000000000023034 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/common/sql/0000775000175000017500000000000000000000000021534 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/sql/__init__.py0000664000175000017500000000000000000000000023633 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/sql/test_upgrades.py0000664000175000017500000002400500000000000024760 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for database migrations for the database. These are "opportunistic" tests which allow testing against all three databases (sqlite in memory, mysql, pg) in a properly configured unit test environment. For the opportunistic testing you need to set up DBs named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that DB and username/password combo to run the tests. """ import fixtures from oslo_db import options as db_options from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures from oslo_db.sqlalchemy import test_migrations from oslo_log.fixture import logging_error as log_fixture from oslo_log import log as logging from oslotest import base # We need to import all of these so the tables are registered. It would be # easier if these were all in a central location :( import keystone.application_credential.backends.sql # noqa: F401 import keystone.assignment.backends.sql # noqa: F401 import keystone.assignment.role_backends.sql_model # noqa: F401 import keystone.catalog.backends.sql # noqa: F401 from keystone.common import sql from keystone.common.sql import upgrades import keystone.conf import keystone.credential.backends.sql # noqa: F401 import keystone.endpoint_policy.backends.sql # noqa: F401 import keystone.federation.backends.sql # noqa: F401 import keystone.identity.backends.sql_model # noqa: F401 import keystone.identity.mapping_backends.sql # noqa: F401 import keystone.limit.backends.sql # noqa: F401 import keystone.oauth1.backends.sql # noqa: F401 import keystone.policy.backends.sql # noqa: F401 import keystone.resource.backends.sql_model # noqa: F401 import keystone.resource.config_backends.sql # noqa: F401 import keystone.revoke.backends.sql # noqa: F401 from keystone.tests.unit import ksfixtures import keystone.trust.backends.sql # noqa: F401 CONF = keystone.conf.CONF LOG = logging.getLogger(__name__) class KeystoneModelsMigrationsSync(test_migrations.ModelsMigrationsSync): """Test sqlalchemy-migrate migrations.""" # Migrations can take a long time, particularly on underpowered CI nodes. # Give them some breathing room. TIMEOUT_SCALING_FACTOR = 4 def setUp(self): # Ensure BaseTestCase's ConfigureLogging fixture is disabled since # we're using our own (StandardLogging). with fixtures.EnvironmentVariable('OS_LOG_CAPTURE', '0'): super().setUp() self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.useFixture(ksfixtures.WarningsFixture()) self.useFixture(ksfixtures.StandardLogging()) self.engine = enginefacade.writer.get_engine() # Configure our connection string in CONF and enable SQLite fkeys db_options.set_defaults(CONF, connection=self.engine.url) # TODO(stephenfin): Do we need this? I suspect not since we're using # enginefacade.write.get_engine() directly above # Override keystone's context manager to be oslo.db's global context # manager. sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True self.addCleanup( setattr, sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False ) self.addCleanup(sql.cleanup) def db_sync(self, engine): upgrades.offline_sync_database_to_version(engine=engine) def get_engine(self): return self.engine def get_metadata(self): return sql.ModelBase.metadata def include_object(self, object_, name, type_, reflected, compare_to): if type_ == 'table': # migrate_version is a sqlalchemy-migrate control table and # isn't included in the models if name == 'migrate_version': return False # This is created in tests and isn't a "real" table if name == 'test_table': return False # FIXME(stephenfin): This was dropped in commit 93aff6e42 but the # migrations were never adjusted if name == 'token': return False return True def filter_metadata_diff(self, diff): """Filter changes before assert in test_models_sync(). :param diff: a list of differences (see `compare_metadata()` docs for details on format) :returns: a list of differences """ new_diff = [] for element in diff: # The modify_foo elements are lists; everything else is a tuple if isinstance(element, list): if element[0][0] == 'modify_nullable': if (element[0][2], element[0][3]) in ( ('credential', 'encrypted_blob'), ('credential', 'key_hash'), ('federated_user', 'user_id'), ('federated_user', 'idp_id'), ('local_user', 'user_id'), ('nonlocal_user', 'user_id'), ('password', 'local_user_id'), ): continue # skip if element[0][0] == 'modify_default': if (element[0][2], element[0][3]) in ( ('password', 'created_at_int'), ('password', 'self_service'), ('project', 'is_domain'), ('service_provider', 'relay_state_prefix'), ): continue # skip else: # FIXME(stephenfin): These indexes are present in the # migrations but not on the equivalent models. Resolve by # updating the models. if element[0] == 'add_index': if ( element[1].table.name, [x.name for x in element[1].columns], ) in ( ('access_rule', ['external_id']), ('access_rule', ['user_id']), ('revocation_event', ['revoked_at']), ('system_assignment', ['actor_id']), ('user', ['default_project_id']), ): continue # skip # FIXME(stephenfin): These indexes are present on the models # but not in the migrations. Resolve by either removing from # the models or adding new migrations. if element[0] == 'remove_index': if ( element[1].table.name, [x.name for x in element[1].columns], ) in ( ('access_rule', ['external_id']), ('access_rule', ['user_id']), ('access_token', ['consumer_id']), ('endpoint', ['service_id']), ('revocation_event', ['revoked_at']), ('user', ['default_project_id']), ('user_group_membership', ['group_id']), ( 'trust', [ 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', 'expires_at_int', ], ), (), ): continue # skip # FIXME(stephenfin): These fks are present in the # migrations but not on the equivalent models. Resolve by # updating the models. if element[0] == 'add_fk': if (element[1].table.name, element[1].column_keys) in ( ( 'application_credential_access_rule', ['access_rule_id'], ), ('limit', ['registered_limit_id']), ('registered_limit', ['service_id']), ('registered_limit', ['region_id']), ('endpoint', ['region_id']), ): continue # skip # FIXME(stephenfin): These indexes are present on the models # but not in the migrations. Resolve by either removing from # the models or adding new migrations. if element[0] == 'remove_fk': if (element[1].table.name, element[1].column_keys) in ( ( 'application_credential_access_rule', ['access_rule_id'], ), ('endpoint', ['region_id']), ('assignment', ['role_id']), ): continue # skip new_diff.append(element) return new_diff class TestModelsSyncSQLite( KeystoneModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, base.BaseTestCase, ): pass class TestModelsSyncMySQL( KeystoneModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, base.BaseTestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestModelsSyncPostgreSQL( KeystoneModelsMigrationsSync, test_fixtures.OpportunisticDBTestMixin, base.BaseTestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_cache.py0000664000175000017500000001607000000000000023415 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from dogpile.cache import api as dogpile from dogpile.cache.backends import memory from oslo_config import fixture as config_fixture from keystone.common import cache import keystone.conf from keystone.tests import unit CONF = keystone.conf.CONF class TestCacheRegion(unit.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config( # TODO(morganfainberg): Make Cache Testing a separate test case # in tempest, and move it out of the base unit tests. group='cache', backend='dogpile.cache.memory', ) # replace existing backend since this may already be configured cache.CACHE_INVALIDATION_REGION.configure( backend='dogpile.cache.memory', expiration_time=None, replace_existing_backend=True, ) self.region_name = uuid.uuid4().hex self.region0 = cache.create_region('test_region') self.region1 = cache.create_region('test_region') cache.configure_cache(region=self.region0) cache.configure_cache(region=self.region1) # TODO(dstanek): this should be a mock entrypoint self.cache_dict = {} self.backend = memory.MemoryBackend({'cache_dict': self.cache_dict}) self.region0.backend = self.backend self.region1.backend = self.backend def _assert_has_no_value(self, values): for value in values: self.assertIsInstance(value, dogpile.NoValue) def test_singular_methods_when_invalidating_the_region(self): key = uuid.uuid4().hex value = uuid.uuid4().hex # key does not exist self.assertIsInstance(self.region0.get(key), dogpile.NoValue) # make it exist self.region0.set(key, value) # ensure it exists self.assertEqual(value, self.region0.get(key)) # invalidating region1 should invalidate region0 self.region1.invalidate() self.assertIsInstance(self.region0.get(key), dogpile.NoValue) def test_region_singular_methods_delete(self): key = uuid.uuid4().hex value = uuid.uuid4().hex # key does not exist self.assertIsInstance(self.region0.get(key), dogpile.NoValue) # make it exist self.region0.set(key, value) # ensure it exists self.assertEqual(value, self.region0.get(key)) # delete it self.region1.delete(key) # ensure it's gone self.assertIsInstance(self.region0.get(key), dogpile.NoValue) def test_multi_methods_when_invalidating_the_region(self): mapping = {uuid.uuid4().hex: uuid.uuid4().hex for _ in range(4)} keys = list(mapping.keys()) values = [mapping[k] for k in keys] # keys do not exist self._assert_has_no_value(self.region0.get_multi(keys)) # make them exist self.region0.set_multi(mapping) # ensure they exist self.assertEqual(values, self.region0.get_multi(keys)) # check using the singular get method for completeness self.assertEqual(mapping[keys[0]], self.region0.get(keys[0])) # invalidating region1 should invalidate region0 self.region1.invalidate() # ensure they are gone self._assert_has_no_value(self.region0.get_multi(keys)) def test_region_multi_methods_delete(self): mapping = {uuid.uuid4().hex: uuid.uuid4().hex for _ in range(4)} keys = list(mapping.keys()) values = [mapping[k] for k in keys] # keys do not exist self._assert_has_no_value(self.region0.get_multi(keys)) # make them exist self.region0.set_multi(mapping) # ensure they exist keys = list(mapping.keys()) self.assertEqual(values, self.region0.get_multi(keys)) # check using the singular get method for completeness self.assertEqual(mapping[keys[0]], self.region0.get(keys[0])) # delete them self.region1.delete_multi(mapping.keys()) # ensure they are gone self._assert_has_no_value(self.region0.get_multi(keys)) def test_memoize_decorator_when_invalidating_the_region(self): memoize = cache.get_memoization_decorator('cache', region=self.region0) @memoize def func(value): return value + uuid.uuid4().hex key = uuid.uuid4().hex # test get/set return_value = func(key) # the values should be the same since it comes from the cache self.assertEqual(return_value, func(key)) # invalidating region1 should invalidate region0 self.region1.invalidate() new_value = func(key) self.assertNotEqual(return_value, new_value) def test_combination(self): memoize = cache.get_memoization_decorator('cache', region=self.region0) @memoize def func(value): return value + uuid.uuid4().hex key = uuid.uuid4().hex simple_value = uuid.uuid4().hex # test get/set using the decorator return_value = func(key) self.assertEqual(return_value, func(key)) # test get/set using the singular methods self.region0.set(key, simple_value) self.assertEqual(simple_value, self.region0.get(key)) # invalidating region1 should invalidate region0 self.region1.invalidate() # ensure that the decorated function returns a new value new_value = func(key) self.assertNotEqual(return_value, new_value) # ensure that a get doesn't have a value self.assertIsInstance(self.region0.get(key), dogpile.NoValue) def test_direct_region_key_invalidation(self): """Invalidate by manually clearing the region key's value. NOTE(dstanek): I normally don't like tests that repeat application logic, but in this case we need to. There are too many ways that the tests above can erroneosly pass that we need this sanity check. """ region_key = cache.RegionInvalidationManager( None, self.region0.name )._region_key key = uuid.uuid4().hex value = uuid.uuid4().hex # key does not exist self.assertIsInstance(self.region0.get(key), dogpile.NoValue) # make it exist self.region0.set(key, value) # ensure it exists self.assertEqual(value, self.region0.get(key)) # test invalidation cache.CACHE_INVALIDATION_REGION.delete(region_key) self.assertIsInstance(self.region0.get(key), dogpile.NoValue) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_database_conflicts.py0000664000175000017500000002733000000000000026163 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit import mapping_fixtures from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class DuplicateTestCase(test_v3.RestfulTestCase): # TODO(lbragstad): This class relies heavily on the usage of try/excepts # within the tests. We could achieve the same functionality with better # readability using a context manager from `assertRaises()`. The reason why # we aren't is because we are using the testtools library, which # reimplemented the functionality of `assertRaises` but didn't include # support for using it to generate a context manager. If that ever changes, # or if we move away from testtools, we should fix this to be more # test-like and not rely on try/except/else patterns in tests. def test_domain_duplicate_conflict_gives_name(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) domain['id'] = uuid.uuid4().hex try: PROVIDERS.resource_api.create_domain(domain['id'], domain) except exception.Conflict as e: self.assertIn("%s" % domain['name'], repr(e)) else: self.fail("Creating duplicate domain did not raise a conflict") def test_project_duplicate_conflict_gives_name(self): project = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project['id'], project) project['id'] = uuid.uuid4().hex try: PROVIDERS.resource_api.create_project(project['id'], project) except exception.Conflict as e: self.assertIn("%s" % project['name'], repr(e)) else: self.fail("Creating duplicate project did not raise a conflict") def test_user_duplicate_conflict_gives_name(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) user['id'] = uuid.uuid4().hex try: PROVIDERS.identity_api.create_user(user) except exception.Conflict as e: self.assertIn( "Duplicate entry found with name %s" % user['name'], repr(e) ) else: self.fail("Create duplicate user did not raise a conflict") def test_role_duplicate_conflict_gives_name(self): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role['id'] = uuid.uuid4().hex try: PROVIDERS.role_api.create_role(role['id'], role) except exception.Conflict as e: self.assertIn( "Duplicate entry found with name %s" % role['name'], repr(e) ) else: self.fail("Create duplicate role did not raise a conflict") def test_group_duplicate_conflict_gives_name(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) try: PROVIDERS.identity_api.create_group(group) except exception.Conflict as e: self.assertIn( "Duplicate entry found with name %s" % group['name'], repr(e) ) else: self.fail("Create duplicate group did not raise a conflict") def test_policy_duplicate_conflict_gives_name(self): policy_ref = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(policy_ref['id'], policy_ref) try: PROVIDERS.policy_api.create_policy(policy_ref['id'], policy_ref) except exception.Conflict as e: self.assertIn( "Duplicate entry found with name %s" % policy_ref['name'], repr(e), ) else: self.fail("Create duplicate policy did not raise a conflict") def test_credential_duplicate_conflict_gives_name(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) credential = unit.new_credential_ref(user_id=user['id']) PROVIDERS.credential_api.create_credential( credential['id'], credential ) try: PROVIDERS.credential_api.create_credential( credential['id'], credential ) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % credential['id'], repr(e) ) else: self.fail("Create duplicate credential did not raise a conflict") def test_trust_duplicate_conflict_gives_name(self): trustor = unit.new_user_ref(domain_id=self.domain_id) trustor = PROVIDERS.identity_api.create_user(trustor) trustee = unit.new_user_ref(domain_id=self.domain_id) trustee = PROVIDERS.identity_api.create_user(trustee) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) trust_ref = unit.new_trust_ref(trustor['id'], trustee['id']) PROVIDERS.trust_api.create_trust( trust_ref['id'], trust_ref, [role_ref] ) try: PROVIDERS.trust_api.create_trust( trust_ref['id'], trust_ref, [role_ref] ) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % trust_ref['id'], repr(e) ) else: self.fail("Create duplicate trust did not raise a conflict") def test_mapping_duplicate_conflict_gives_name(self): self.mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER self.mapping['id'] = uuid.uuid4().hex PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) try: PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % self.mapping['id'], repr(e), ) else: self.fail("Create duplicate mapping did not raise a conflict") def test_mapping_duplicate_conflict_with_id_in_id(self): self.mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER self.mapping['id'] = 'mapping_with_id_in_the_id' PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) try: PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % self.mapping['id'], repr(e), ) # Any other exception will cause the test to fail def test_region_duplicate_conflict_gives_name(self): region_ref = unit.new_region_ref() PROVIDERS.catalog_api.create_region(region_ref) try: PROVIDERS.catalog_api.create_region(region_ref) except exception.Conflict as e: self.assertIn("Duplicate ID, %s" % region_ref['id'], repr(e)) else: self.fail("Create duplicate region did not raise a conflict") def test_federation_protocol_duplicate_conflict_gives_name(self): self.idp = { 'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, } PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) self.mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER self.mapping['id'] = uuid.uuid4().hex PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) protocol = {'id': uuid.uuid4().hex, 'mapping_id': self.mapping['id']} protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) try: PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % protocol_ret['id'], repr(e), ) else: self.fail( "Create duplicate federation_protocol did not raise " "a conflict" ) def test_federation_protocol_duplicate_conflict_with_id_in_id(self): self.idp = { 'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, } PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) self.mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER self.mapping['id'] = uuid.uuid4().hex PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) protocol = { 'id': 'federation_protocol_with_id_in_the_id', 'mapping_id': self.mapping['id'], } protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) try: PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % protocol_ret['id'], repr(e), ) # Any other exception will fail the test def test_federation_protocol_duplicate_conflict_with_id_in_idp_id(self): self.idp = { 'id': 'myidp', 'enabled': True, 'description': uuid.uuid4().hex, } PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) self.mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER self.mapping['id'] = uuid.uuid4().hex PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) protocol = {'id': uuid.uuid4().hex, 'mapping_id': self.mapping['id']} protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) try: PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % protocol_ret['id'], repr(e), ) # Any other exception will fail the test def test_sp_duplicate_conflict_gives_name(self): sp = { 'auth_url': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, 'sp_url': uuid.uuid4().hex, 'relay_state_prefix': CONF.saml.relay_state_prefix, } service_ref = PROVIDERS.federation_api.create_sp('SP1', sp) try: PROVIDERS.federation_api.create_sp('SP1', sp) except exception.Conflict as e: self.assertIn( "Duplicate entry found with ID %s" % service_ref['id'], repr(e) ) else: self.fail("Create duplicate sp did not raise a conflict") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_json_home.py0000664000175000017500000000725500000000000024340 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from testtools import matchers from keystone.common import json_home from keystone.tests import unit class JsonHomeTest(unit.BaseTestCase): def test_build_v3_resource_relation(self): resource_name = self.getUniqueString() relation = json_home.build_v3_resource_relation(resource_name) exp_relation = ( 'https://docs.openstack.org/api/openstack-identity/3/rel/%s' % resource_name ) self.assertThat(relation, matchers.Equals(exp_relation)) def test_build_v3_extension_resource_relation(self): extension_name = self.getUniqueString() extension_version = self.getUniqueString() resource_name = self.getUniqueString() relation = json_home.build_v3_extension_resource_relation( extension_name, extension_version, resource_name ) exp_relation = ( 'https://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/rel' '/%s' % (extension_name, extension_version, resource_name) ) self.assertThat(relation, matchers.Equals(exp_relation)) def test_build_v3_parameter_relation(self): parameter_name = self.getUniqueString() relation = json_home.build_v3_parameter_relation(parameter_name) exp_relation = ( 'https://docs.openstack.org/api/openstack-identity/3/param/%s' % parameter_name ) self.assertThat(relation, matchers.Equals(exp_relation)) def test_build_v3_extension_parameter_relation(self): extension_name = self.getUniqueString() extension_version = self.getUniqueString() parameter_name = self.getUniqueString() relation = json_home.build_v3_extension_parameter_relation( extension_name, extension_version, parameter_name ) exp_relation = ( 'https://docs.openstack.org/api/openstack-identity/3/ext/%s/%s/' 'param/%s' % (extension_name, extension_version, parameter_name) ) self.assertThat(relation, matchers.Equals(exp_relation)) def test_translate_urls(self): href_rel = self.getUniqueString() href = self.getUniqueString() href_template_rel = self.getUniqueString() href_template = self.getUniqueString() href_vars = {self.getUniqueString(): self.getUniqueString()} original_json_home = { 'resources': { href_rel: {'href': href}, href_template_rel: { 'href-template': href_template, 'href-vars': href_vars, }, } } new_json_home = copy.deepcopy(original_json_home) new_prefix = self.getUniqueString() json_home.translate_urls(new_json_home, new_prefix) exp_json_home = { 'resources': { href_rel: {'href': new_prefix + href}, href_template_rel: { 'href-template': new_prefix + href_template, 'href-vars': href_vars, }, } } self.assertThat(new_json_home, matchers.Equals(exp_json_home)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_notifications.py0000664000175000017500000020152400000000000025223 0ustar00zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client from unittest import mock import uuid import fixtures import freezegun from oslo_config import fixture as config_fixture from oslo_log import log import oslo_messaging from oslo_utils import timeutils from pycadf import cadftaxonomy from pycadf import cadftype from pycadf import eventfactory from pycadf import resource as cadfresource from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import notifications from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs EXP_RESOURCE_TYPE = uuid.uuid4().hex CREATED_OPERATION = notifications.ACTIONS.created UPDATED_OPERATION = notifications.ACTIONS.updated DELETED_OPERATION = notifications.ACTIONS.deleted DISABLED_OPERATION = notifications.ACTIONS.disabled class ArbitraryException(Exception): pass def register_callback(operation, resource_type=EXP_RESOURCE_TYPE): """Helper for creating and registering a mock callback.""" callback = mock.Mock( __name__='callback', im_class=mock.Mock(__name__='class') ) notifications.register_event_callback(operation, resource_type, callback) return callback class AuditNotificationsTestCase(unit.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.addCleanup(notifications.clear_subscribers) def _test_notification_operation_with_basic_format( self, notify_function, operation ): self.config_fixture.config(notification_format='basic') exp_resource_id = uuid.uuid4().hex callback = register_callback(operation) notify_function(EXP_RESOURCE_TYPE, exp_resource_id) callback.assert_called_once_with( 'identity', EXP_RESOURCE_TYPE, operation, {'resource_info': exp_resource_id}, ) def _test_notification_operation_with_cadf_format( self, notify_function, operation ): self.config_fixture.config(notification_format='cadf') exp_resource_id = uuid.uuid4().hex with mock.patch( 'keystone.notifications._create_cadf_payload' ) as cadf_notify: notify_function(EXP_RESOURCE_TYPE, exp_resource_id) initiator = None reason = None cadf_notify.assert_called_once_with( operation, EXP_RESOURCE_TYPE, exp_resource_id, notifications.taxonomy.OUTCOME_SUCCESS, initiator, reason, ) notify_function(EXP_RESOURCE_TYPE, exp_resource_id, public=False) cadf_notify.assert_called_once_with( operation, EXP_RESOURCE_TYPE, exp_resource_id, notifications.taxonomy.OUTCOME_SUCCESS, initiator, reason, ) def test_resource_created_notification(self): self._test_notification_operation_with_basic_format( notifications.Audit.created, CREATED_OPERATION ) self._test_notification_operation_with_cadf_format( notifications.Audit.created, CREATED_OPERATION ) def test_resource_updated_notification(self): self._test_notification_operation_with_basic_format( notifications.Audit.updated, UPDATED_OPERATION ) self._test_notification_operation_with_cadf_format( notifications.Audit.updated, UPDATED_OPERATION ) def test_resource_deleted_notification(self): self._test_notification_operation_with_basic_format( notifications.Audit.deleted, DELETED_OPERATION ) self._test_notification_operation_with_cadf_format( notifications.Audit.deleted, DELETED_OPERATION ) def test_resource_disabled_notification(self): self._test_notification_operation_with_basic_format( notifications.Audit.disabled, DISABLED_OPERATION ) self._test_notification_operation_with_cadf_format( notifications.Audit.disabled, DISABLED_OPERATION ) class NotificationsTestCase(unit.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config( group='oslo_messaging_notifications', transport_url='rabbit://' ) def test_send_notification(self): """Test _send_notification. Test the private method _send_notification to ensure event_type, payload, and context are built and passed properly. """ resource = uuid.uuid4().hex resource_type = EXP_RESOURCE_TYPE operation = CREATED_OPERATION conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_format='basic') # NOTE(ldbragst): Even though notifications._send_notification doesn't # contain logic that creates cases, this is supposed to test that # context is always empty and that we ensure the resource ID of the # resource in the notification is contained in the payload. It was # agreed that context should be empty in Keystone's case, which is # also noted in the /keystone/notifications.py module. This test # ensures and maintains these conditions. expected_args = [ {}, # empty context 'identity.%s.created' % resource_type, # event_type {'resource_info': resource}, # payload ] with mock.patch.object( notifications._get_notifier(), 'info' ) as mocked: notifications._send_notification( operation, resource_type, resource ) mocked.assert_called_once_with(*expected_args) def test_send_notification_with_opt_out(self): """Test the private method _send_notification with opt-out. Test that _send_notification does not notify when a valid notification_opt_out configuration is provided. """ resource = uuid.uuid4().hex resource_type = EXP_RESOURCE_TYPE operation = CREATED_OPERATION event_type = 'identity.%s.created' % resource_type # NOTE(diazjf): Here we add notification_opt_out to the # configuration so that we should return before _get_notifer is # called. This is because we are opting out notifications for the # passed resource_type and operation. conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_opt_out=[event_type]) with mock.patch.object( notifications._get_notifier(), 'info' ) as mocked: notifications._send_notification( operation, resource_type, resource ) mocked.assert_not_called() def test_send_audit_notification_with_opt_out(self): """Test the private method _send_audit_notification with opt-out. Test that _send_audit_notification does not notify when a valid notification_opt_out configuration is provided. """ resource_type = EXP_RESOURCE_TYPE action = CREATED_OPERATION + '.' + resource_type initiator = mock target = mock outcome = 'success' event_type = 'identity.%s.created' % resource_type conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_opt_out=[event_type]) with mock.patch.object( notifications._get_notifier(), 'info' ) as mocked: notifications._send_audit_notification( action, initiator, outcome, target, event_type ) mocked.assert_not_called() def test_opt_out_authenticate_event(self): """Test that authenticate events are successfully opted out.""" resource_type = EXP_RESOURCE_TYPE action = CREATED_OPERATION + '.' + resource_type initiator = mock target = mock outcome = 'success' event_type = 'identity.authenticate' meter_name = f'{event_type}.{outcome}' conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_opt_out=[meter_name]) with mock.patch.object( notifications._get_notifier(), 'info' ) as mocked: notifications._send_audit_notification( action, initiator, outcome, target, event_type ) mocked.assert_not_called() class BaseNotificationTest(test_v3.RestfulTestCase): def setUp(self): super().setUp() self._notifications = [] self._audits = [] def fake_notify( operation, resource_type, resource_id, initiator=None, actor_dict=None, public=True, ): note = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'initiator': initiator, 'send_notification_called': True, 'public': public, } if actor_dict: note['actor_id'] = actor_dict.get('id') note['actor_type'] = actor_dict.get('type') note['actor_operation'] = actor_dict.get('actor_operation') self._notifications.append(note) self.useFixture( fixtures.MockPatchObject( notifications, '_send_notification', fake_notify ) ) def fake_audit( action, initiator, outcome, target, event_type, reason=None, **kwargs, ): service_security = cadftaxonomy.SERVICE_SECURITY event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, reason=reason, observer=cadfresource.Resource(typeURI=service_security), ) for key, value in kwargs.items(): setattr(event, key, value) payload = event.as_dict() audit = { 'payload': payload, 'event_type': event_type, 'send_notification_called': True, } self._audits.append(audit) self.useFixture( fixtures.MockPatchObject( notifications, '_send_audit_notification', fake_audit ) ) def _assert_last_note( self, resource_id, operation, resource_type, actor_id=None, actor_type=None, actor_operation=None, ): # NOTE(stevemar): If 'basic' format is not used, then simply # return since this assertion is not valid. if CONF.notification_format != 'basic': return self.assertGreater(len(self._notifications), 0) note = self._notifications[-1] self.assertEqual(operation, note['operation']) self.assertEqual(resource_id, note['resource_id']) self.assertEqual(resource_type, note['resource_type']) self.assertTrue(note['send_notification_called']) if actor_id: self.assertEqual(actor_id, note['actor_id']) self.assertEqual(actor_type, note['actor_type']) self.assertEqual(actor_operation, note['actor_operation']) def _assert_last_audit( self, resource_id, operation, resource_type, target_uri, reason=None ): # NOTE(stevemar): If 'cadf' format is not used, then simply # return since this assertion is not valid. if CONF.notification_format != 'cadf': return self.assertGreater(len(self._audits), 0) audit = self._audits[-1] payload = audit['payload'] if 'resource_info' in payload: self.assertEqual(resource_id, payload['resource_info']) action = '.'.join(filter(None, [operation, resource_type])) self.assertEqual(action, payload['action']) self.assertEqual(target_uri, payload['target']['typeURI']) if resource_id: self.assertEqual(resource_id, payload['target']['id']) event_type = '.'.join( filter(None, ['identity', resource_type, operation]) ) self.assertEqual(event_type, audit['event_type']) if reason: self.assertEqual( reason['reasonCode'], payload['reason']['reasonCode'] ) self.assertEqual( reason['reasonType'], payload['reason']['reasonType'] ) self.assertTrue(audit['send_notification_called']) def _assert_initiator_data_is_set(self, operation, resource_type, typeURI): self.assertGreater(len(self._audits), 0) audit = self._audits[-1] payload = audit['payload'] self.assertEqual(self.user_id, payload['initiator']['id']) self.assertEqual(self.project_id, payload['initiator']['project_id']) self.assertEqual(typeURI, payload['target']['typeURI']) self.assertIn('request_id', payload['initiator']) action = f'{operation}.{resource_type}' self.assertEqual(action, payload['action']) def _assert_notify_not_sent( self, resource_id, operation, resource_type, public=True ): unexpected = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public, } for note in self._notifications: self.assertNotEqual(unexpected, note) def _assert_notify_sent( self, resource_id, operation, resource_type, public=True ): expected = { 'resource_id': resource_id, 'operation': operation, 'resource_type': resource_type, 'send_notification_called': True, 'public': public, } for note in self._notifications: # compare only expected fields if all(note.get(k) == v for k, v in expected.items()): break else: self.fail("Notification not sent.") class NotificationsForEntities(BaseNotificationTest): def test_create_group(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = PROVIDERS.identity_api.create_group(group_ref) self._assert_last_note(group_ref['id'], CREATED_OPERATION, 'group') self._assert_last_audit( group_ref['id'], CREATED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP, ) def test_create_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) self._assert_last_note(project_ref['id'], CREATED_OPERATION, 'project') self._assert_last_audit( project_ref['id'], CREATED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT, ) def test_create_role(self): role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role') self._assert_last_audit( role_ref['id'], CREATED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE, ) def test_create_user(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = PROVIDERS.identity_api.create_user(user_ref) self._assert_last_note(user_ref['id'], CREATED_OPERATION, 'user') self._assert_last_audit( user_ref['id'], CREATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER, ) def test_create_trust(self): trustor = unit.new_user_ref(domain_id=self.domain_id) trustor = PROVIDERS.identity_api.create_user(trustor) trustee = unit.new_user_ref(domain_id=self.domain_id) trustee = PROVIDERS.identity_api.create_user(trustee) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) trust_ref = unit.new_trust_ref(trustor['id'], trustee['id']) PROVIDERS.trust_api.create_trust( trust_ref['id'], trust_ref, [role_ref] ) self._assert_last_note( trust_ref['id'], CREATED_OPERATION, 'OS-TRUST:trust' ) self._assert_last_audit( trust_ref['id'], CREATED_OPERATION, 'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST, ) def test_delete_group(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = PROVIDERS.identity_api.create_group(group_ref) PROVIDERS.identity_api.delete_group(group_ref['id']) self._assert_last_note(group_ref['id'], DELETED_OPERATION, 'group') self._assert_last_audit( group_ref['id'], DELETED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP, ) def test_delete_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) PROVIDERS.resource_api.delete_project(project_ref['id']) self._assert_last_note(project_ref['id'], DELETED_OPERATION, 'project') self._assert_last_audit( project_ref['id'], DELETED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT, ) def test_delete_role(self): role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) PROVIDERS.role_api.delete_role(role_ref['id']) self._assert_last_note(role_ref['id'], DELETED_OPERATION, 'role') self._assert_last_audit( role_ref['id'], DELETED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE, ) def test_delete_user(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = PROVIDERS.identity_api.create_user(user_ref) PROVIDERS.identity_api.delete_user(user_ref['id']) self._assert_last_note(user_ref['id'], DELETED_OPERATION, 'user') self._assert_last_audit( user_ref['id'], DELETED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER, ) def test_create_domain(self): domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) self._assert_last_note(domain_ref['id'], CREATED_OPERATION, 'domain') self._assert_last_audit( domain_ref['id'], CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN, ) def test_update_domain(self): domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) domain_ref['description'] = uuid.uuid4().hex PROVIDERS.resource_api.update_domain(domain_ref['id'], domain_ref) self._assert_last_note(domain_ref['id'], UPDATED_OPERATION, 'domain') self._assert_last_audit( domain_ref['id'], UPDATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN, ) def test_delete_domain(self): domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) domain_ref['enabled'] = False PROVIDERS.resource_api.update_domain(domain_ref['id'], domain_ref) PROVIDERS.resource_api.delete_domain(domain_ref['id']) self._assert_last_note(domain_ref['id'], DELETED_OPERATION, 'domain') self._assert_last_audit( domain_ref['id'], DELETED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN, ) def test_delete_trust(self): trustor = unit.new_user_ref(domain_id=self.domain_id) trustor = PROVIDERS.identity_api.create_user(trustor) trustee = unit.new_user_ref(domain_id=self.domain_id) trustee = PROVIDERS.identity_api.create_user(trustee) role_ref = unit.new_role_ref() trust_ref = unit.new_trust_ref(trustor['id'], trustee['id']) PROVIDERS.trust_api.create_trust( trust_ref['id'], trust_ref, [role_ref] ) PROVIDERS.trust_api.delete_trust(trust_ref['id']) self._assert_last_note( trust_ref['id'], DELETED_OPERATION, 'OS-TRUST:trust' ) self._assert_last_audit( trust_ref['id'], DELETED_OPERATION, 'OS-TRUST:trust', cadftaxonomy.SECURITY_TRUST, ) def test_create_endpoint(self): endpoint_ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, ) PROVIDERS.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) self._assert_notify_sent( endpoint_ref['id'], CREATED_OPERATION, 'endpoint' ) self._assert_last_audit( endpoint_ref['id'], CREATED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT, ) def test_update_endpoint(self): endpoint_ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, ) PROVIDERS.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) PROVIDERS.catalog_api.update_endpoint(endpoint_ref['id'], endpoint_ref) self._assert_notify_sent( endpoint_ref['id'], UPDATED_OPERATION, 'endpoint' ) self._assert_last_audit( endpoint_ref['id'], UPDATED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT, ) def test_delete_endpoint(self): endpoint_ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, ) PROVIDERS.catalog_api.create_endpoint(endpoint_ref['id'], endpoint_ref) PROVIDERS.catalog_api.delete_endpoint(endpoint_ref['id']) self._assert_notify_sent( endpoint_ref['id'], DELETED_OPERATION, 'endpoint' ) self._assert_last_audit( endpoint_ref['id'], DELETED_OPERATION, 'endpoint', cadftaxonomy.SECURITY_ENDPOINT, ) def test_create_service(self): service_ref = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service_ref['id'], service_ref) self._assert_notify_sent( service_ref['id'], CREATED_OPERATION, 'service' ) self._assert_last_audit( service_ref['id'], CREATED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE, ) def test_update_service(self): service_ref = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service_ref['id'], service_ref) PROVIDERS.catalog_api.update_service(service_ref['id'], service_ref) self._assert_notify_sent( service_ref['id'], UPDATED_OPERATION, 'service' ) self._assert_last_audit( service_ref['id'], UPDATED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE, ) def test_delete_service(self): service_ref = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service_ref['id'], service_ref) PROVIDERS.catalog_api.delete_service(service_ref['id']) self._assert_notify_sent( service_ref['id'], DELETED_OPERATION, 'service' ) self._assert_last_audit( service_ref['id'], DELETED_OPERATION, 'service', cadftaxonomy.SECURITY_SERVICE, ) def test_create_region(self): region_ref = unit.new_region_ref() PROVIDERS.catalog_api.create_region(region_ref) self._assert_notify_sent(region_ref['id'], CREATED_OPERATION, 'region') self._assert_last_audit( region_ref['id'], CREATED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION, ) def test_update_region(self): region_ref = unit.new_region_ref() PROVIDERS.catalog_api.create_region(region_ref) PROVIDERS.catalog_api.update_region(region_ref['id'], region_ref) self._assert_notify_sent(region_ref['id'], UPDATED_OPERATION, 'region') self._assert_last_audit( region_ref['id'], UPDATED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION, ) def test_delete_region(self): region_ref = unit.new_region_ref() PROVIDERS.catalog_api.create_region(region_ref) PROVIDERS.catalog_api.delete_region(region_ref['id']) self._assert_notify_sent(region_ref['id'], DELETED_OPERATION, 'region') self._assert_last_audit( region_ref['id'], DELETED_OPERATION, 'region', cadftaxonomy.SECURITY_REGION, ) def test_create_policy(self): policy_ref = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(policy_ref['id'], policy_ref) self._assert_notify_sent(policy_ref['id'], CREATED_OPERATION, 'policy') self._assert_last_audit( policy_ref['id'], CREATED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY, ) def test_update_policy(self): policy_ref = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(policy_ref['id'], policy_ref) PROVIDERS.policy_api.update_policy(policy_ref['id'], policy_ref) self._assert_notify_sent(policy_ref['id'], UPDATED_OPERATION, 'policy') self._assert_last_audit( policy_ref['id'], UPDATED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY, ) def test_delete_policy(self): policy_ref = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(policy_ref['id'], policy_ref) PROVIDERS.policy_api.delete_policy(policy_ref['id']) self._assert_notify_sent(policy_ref['id'], DELETED_OPERATION, 'policy') self._assert_last_audit( policy_ref['id'], DELETED_OPERATION, 'policy', cadftaxonomy.SECURITY_POLICY, ) def test_disable_domain(self): domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) domain_ref['enabled'] = False PROVIDERS.resource_api.update_domain(domain_ref['id'], domain_ref) self._assert_notify_sent( domain_ref['id'], 'disabled', 'domain', public=False ) def test_disable_of_disabled_domain_does_not_notify(self): domain_ref = unit.new_domain_ref(enabled=False) PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) # The domain_ref above is not changed during the create process. We # can use the same ref to perform the update. PROVIDERS.resource_api.update_domain(domain_ref['id'], domain_ref) self._assert_notify_not_sent( domain_ref['id'], 'disabled', 'domain', public=False ) def test_update_group(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = PROVIDERS.identity_api.create_group(group_ref) PROVIDERS.identity_api.update_group(group_ref['id'], group_ref) self._assert_last_note(group_ref['id'], UPDATED_OPERATION, 'group') self._assert_last_audit( group_ref['id'], UPDATED_OPERATION, 'group', cadftaxonomy.SECURITY_GROUP, ) def test_update_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) PROVIDERS.resource_api.update_project(project_ref['id'], project_ref) self._assert_notify_sent( project_ref['id'], UPDATED_OPERATION, 'project', public=True ) self._assert_last_audit( project_ref['id'], UPDATED_OPERATION, 'project', cadftaxonomy.SECURITY_PROJECT, ) def test_disable_project(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) project_ref['enabled'] = False PROVIDERS.resource_api.update_project(project_ref['id'], project_ref) self._assert_notify_sent( project_ref['id'], 'disabled', 'project', public=False ) def test_disable_of_disabled_project_does_not_notify(self): project_ref = unit.new_project_ref( domain_id=self.domain_id, enabled=False ) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) # The project_ref above is not changed during the create process. We # can use the same ref to perform the update. PROVIDERS.resource_api.update_project(project_ref['id'], project_ref) self._assert_notify_not_sent( project_ref['id'], 'disabled', 'project', public=False ) def test_update_project_does_not_send_disable(self): project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) project_ref['enabled'] = True PROVIDERS.resource_api.update_project(project_ref['id'], project_ref) self._assert_last_note(project_ref['id'], UPDATED_OPERATION, 'project') self._assert_notify_not_sent(project_ref['id'], 'disabled', 'project') def test_update_role(self): role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) PROVIDERS.role_api.update_role(role_ref['id'], role_ref) self._assert_last_note(role_ref['id'], UPDATED_OPERATION, 'role') self._assert_last_audit( role_ref['id'], UPDATED_OPERATION, 'role', cadftaxonomy.SECURITY_ROLE, ) def test_update_user(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = PROVIDERS.identity_api.create_user(user_ref) PROVIDERS.identity_api.update_user(user_ref['id'], user_ref) self._assert_last_note(user_ref['id'], UPDATED_OPERATION, 'user') self._assert_last_audit( user_ref['id'], UPDATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER, ) def test_config_option_no_events(self): self.config_fixture.config(notification_format='basic') role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) # The regular notifications will still be emitted, since they are # used for callback handling. self._assert_last_note(role_ref['id'], CREATED_OPERATION, 'role') # No audit event should have occurred self.assertEqual(0, len(self._audits)) def test_add_user_to_group(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = PROVIDERS.identity_api.create_user(user_ref) group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = PROVIDERS.identity_api.create_group(group_ref) PROVIDERS.identity_api.add_user_to_group( user_ref['id'], group_ref['id'] ) self._assert_last_note( group_ref['id'], UPDATED_OPERATION, 'group', actor_id=user_ref['id'], actor_type='user', actor_operation='added', ) def test_remove_user_from_group(self): user_ref = unit.new_user_ref(domain_id=self.domain_id) user_ref = PROVIDERS.identity_api.create_user(user_ref) group_ref = unit.new_group_ref(domain_id=self.domain_id) group_ref = PROVIDERS.identity_api.create_group(group_ref) PROVIDERS.identity_api.add_user_to_group( user_ref['id'], group_ref['id'] ) PROVIDERS.identity_api.remove_user_from_group( user_ref['id'], group_ref['id'] ) self._assert_last_note( group_ref['id'], UPDATED_OPERATION, 'group', actor_id=user_ref['id'], actor_type='user', actor_operation='removed', ) def test_initiator_request_id(self): ref = unit.new_domain_ref() self.post('/domains', body={'domain': ref}) note = self._notifications[-1] initiator = note['initiator'] self.assertIsNotNone(initiator.request_id) def test_initiator_global_request_id(self): global_request_id = 'req-%s' % uuid.uuid4() ref = unit.new_domain_ref() self.post( '/domains', body={'domain': ref}, headers={'X-OpenStack-Request-Id': global_request_id}, ) note = self._notifications[-1] initiator = note['initiator'] self.assertEqual(initiator.global_request_id, global_request_id) def test_initiator_global_request_id_not_set(self): ref = unit.new_domain_ref() self.post('/domains', body={'domain': ref}) note = self._notifications[-1] initiator = note['initiator'] self.assertFalse(hasattr(initiator, 'global_request_id')) class CADFNotificationsForPCIDSSEvents(BaseNotificationTest): def setUp(self): super().setUp() conf = self.useFixture(config_fixture.Config(CONF)) conf.config(notification_format='cadf') conf.config(group='security_compliance', password_expires_days=2) conf.config(group='security_compliance', lockout_failure_attempts=3) conf.config(group='security_compliance', unique_last_password_count=2) conf.config(group='security_compliance', minimum_password_age=2) conf.config( group='security_compliance', password_regex=r'^(?=.*\d)(?=.*[a-zA-Z]).{7,}$', ) conf.config( group='security_compliance', password_regex_description='1 letter, 1 digit, 7 chars', ) def test_password_expired_sends_notification(self): password = uuid.uuid4().hex password_creation_time = timeutils.utcnow() - datetime.timedelta( days=CONF.security_compliance.password_expires_days + 1 ) freezer = freezegun.freeze_time(password_creation_time) # NOTE(gagehugo): This part below uses freezegun to spoof # the time as being three days in the past from right now. We will # create a user and have that user successfully authenticate, # then stop the time machine and return to the present time, # where the user's password is now expired. freezer.start() user_ref = unit.new_user_ref( domain_id=self.domain_id, password=password ) user_ref = PROVIDERS.identity_api.create_user(user_ref) with self.make_request(): PROVIDERS.identity_api.authenticate(user_ref['id'], password) freezer.stop() reason_type = exception.PasswordExpired.message_format % { 'user_id': user_ref['id'] } expected_reason = {'reasonCode': '401', 'reasonType': reason_type} with self.make_request(): self.assertRaises( exception.PasswordExpired, PROVIDERS.identity_api.authenticate, user_id=user_ref['id'], password=password, ) self._assert_last_audit( None, 'authenticate', None, cadftaxonomy.ACCOUNT_USER, reason=expected_reason, ) def test_locked_out_user_sends_notification(self): password = uuid.uuid4().hex new_password = uuid.uuid4().hex expected_responses = [ AssertionError, AssertionError, AssertionError, exception.Unauthorized, ] user_ref = unit.new_user_ref( domain_id=self.domain_id, password=password ) user_ref = PROVIDERS.identity_api.create_user(user_ref) reason_type = exception.AccountLocked.message_format % { 'user_id': user_ref['id'] } expected_reason = {'reasonCode': '401', 'reasonType': reason_type} for ex in expected_responses: with self.make_request(): self.assertRaises( ex, PROVIDERS.identity_api.change_password, user_id=user_ref['id'], original_password=new_password, new_password=new_password, ) self._assert_last_audit( None, 'authenticate', None, cadftaxonomy.ACCOUNT_USER, reason=expected_reason, ) def test_repeated_password_sends_notification(self): conf = self.useFixture(config_fixture.Config(CONF)) conf.config(group='security_compliance', minimum_password_age=0) password = uuid.uuid4().hex new_password = uuid.uuid4().hex count = CONF.security_compliance.unique_last_password_count reason_type = ( exception.PasswordHistoryValidationError.message_format % {'unique_count': count} ) expected_reason = {'reasonCode': '400', 'reasonType': reason_type} user_ref = unit.new_user_ref( domain_id=self.domain_id, password=password ) user_ref = PROVIDERS.identity_api.create_user(user_ref) with self.make_request(): PROVIDERS.identity_api.change_password( user_id=user_ref['id'], original_password=password, new_password=new_password, ) with self.make_request(): self.assertRaises( exception.PasswordValidationError, PROVIDERS.identity_api.change_password, user_id=user_ref['id'], original_password=new_password, new_password=password, ) self._assert_last_audit( user_ref['id'], UPDATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER, reason=expected_reason, ) def test_invalid_password_sends_notification(self): password = uuid.uuid4().hex invalid_password = '1' regex = CONF.security_compliance.password_regex_description reason_type = ( exception.PasswordRequirementsValidationError.message_format % {'detail': regex} ) expected_reason = {'reasonCode': '400', 'reasonType': reason_type} user_ref = unit.new_user_ref( domain_id=self.domain_id, password=password ) user_ref = PROVIDERS.identity_api.create_user(user_ref) with self.make_request(): self.assertRaises( exception.PasswordValidationError, PROVIDERS.identity_api.change_password, user_id=user_ref['id'], original_password=password, new_password=invalid_password, ) self._assert_last_audit( user_ref['id'], UPDATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER, reason=expected_reason, ) def test_changing_password_too_early_sends_notification(self): password = uuid.uuid4().hex new_password = uuid.uuid4().hex next_password = uuid.uuid4().hex user_ref = unit.new_user_ref( domain_id=self.domain_id, password=password, password_created_at=(timeutils.utcnow()), ) user_ref = PROVIDERS.identity_api.create_user(user_ref) min_days = CONF.security_compliance.minimum_password_age min_age = user_ref['password_created_at'] + datetime.timedelta( days=min_days ) days_left = (min_age - timeutils.utcnow()).days reason_type = exception.PasswordAgeValidationError.message_format % { 'min_age_days': min_days, 'days_left': days_left, } expected_reason = {'reasonCode': '400', 'reasonType': reason_type} with self.make_request(): PROVIDERS.identity_api.change_password( user_id=user_ref['id'], original_password=password, new_password=new_password, ) with self.make_request(): self.assertRaises( exception.PasswordValidationError, PROVIDERS.identity_api.change_password, user_id=user_ref['id'], original_password=new_password, new_password=next_password, ) self._assert_last_audit( user_ref['id'], UPDATED_OPERATION, 'user', cadftaxonomy.SECURITY_ACCOUNT_USER, reason=expected_reason, ) class CADFNotificationsForEntities(NotificationsForEntities): def setUp(self): super().setUp() self.config_fixture.config(notification_format='cadf') def test_initiator_data_is_set(self): ref = unit.new_domain_ref() resp = self.post('/domains', body={'domain': ref}) resource_id = resp.result.get('domain').get('id') self._assert_last_audit( resource_id, CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN, ) self._assert_initiator_data_is_set( CREATED_OPERATION, 'domain', cadftaxonomy.SECURITY_DOMAIN ) def test_initiator_request_id(self): data = self.build_authentication_request( user_id=self.user_id, password=self.user['password'] ) self.post('/auth/tokens', body=data) audit = self._audits[-1] initiator = audit['payload']['initiator'] self.assertIn('request_id', initiator) def test_initiator_global_request_id(self): global_request_id = 'req-%s' % uuid.uuid4() data = self.build_authentication_request( user_id=self.user_id, password=self.user['password'] ) self.post( '/auth/tokens', body=data, headers={'X-OpenStack-Request-Id': global_request_id}, ) audit = self._audits[-1] initiator = audit['payload']['initiator'] self.assertEqual(initiator['global_request_id'], global_request_id) def test_initiator_global_request_id_not_set(self): data = self.build_authentication_request( user_id=self.user_id, password=self.user['password'] ) self.post('/auth/tokens', body=data) audit = self._audits[-1] initiator = audit['payload']['initiator'] self.assertNotIn('global_request_id', initiator) class TestEventCallbacks(test_v3.RestfulTestCase): class FakeManager: def _project_deleted_callback( self, service, resource_type, operation, payload ): """Used just for the callback interface.""" def test_notification_received(self): callback = register_callback(CREATED_OPERATION, 'project') project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) self.assertTrue(callback.called) def test_notification_method_not_callable(self): fake_method = None self.assertRaises( TypeError, notifications.register_event_callback, UPDATED_OPERATION, 'project', [fake_method], ) def test_notification_event_not_valid(self): manager = self.FakeManager() self.assertRaises( ValueError, notifications.register_event_callback, uuid.uuid4().hex, 'project', manager._project_deleted_callback, ) def test_event_registration_for_unknown_resource_type(self): # Registration for unknown resource types should succeed. If no event # is issued for that resource type, the callback wont be triggered. manager = self.FakeManager() notifications.register_event_callback( DELETED_OPERATION, uuid.uuid4().hex, manager._project_deleted_callback, ) resource_type = uuid.uuid4().hex notifications.register_event_callback( DELETED_OPERATION, resource_type, manager._project_deleted_callback ) def test_provider_event_callback_subscription(self): callback_called = [] @notifications.listener class Foo: def __init__(self): self.event_callbacks = { CREATED_OPERATION: {'project': self.foo_callback} } def foo_callback(self, service, resource_type, operation, payload): # uses callback_called from the closure callback_called.append(True) Foo() project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) self.assertEqual([True], callback_called) def test_provider_event_callbacks_subscription(self): callback_called = [] @notifications.listener class Foo: def __init__(self): self.event_callbacks = { CREATED_OPERATION: { 'project': [self.callback_0, self.callback_1] } } def callback_0(self, service, resource_type, operation, payload): # uses callback_called from the closure callback_called.append('cb0') def callback_1(self, service, resource_type, operation, payload): # uses callback_called from the closure callback_called.append('cb1') Foo() project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) self.assertCountEqual(['cb1', 'cb0'], callback_called) def test_invalid_event_callbacks(self): @notifications.listener class Foo: def __init__(self): self.event_callbacks = 'bogus' self.assertRaises(AttributeError, Foo) def test_invalid_event_callbacks_event(self): @notifications.listener class Foo: def __init__(self): self.event_callbacks = {CREATED_OPERATION: 'bogus'} self.assertRaises(AttributeError, Foo) def test_using_an_unbound_method_as_a_callback_fails(self): # NOTE(dstanek): An unbound method is when you reference a method # from a class object. You'll get a method that isn't bound to a # particular instance so there is no magic 'self'. You can call it, # but you have to pass in the instance manually like: C.m(C()). # If you reference the method from an instance then you get a method # that effectively curries the self argument for you # (think functools.partial). Obviously is we don't have an # instance then we can't call the method. @notifications.listener class Foo: def __init__(self): self.event_callbacks = { CREATED_OPERATION: {'project': Foo.callback} } def callback(self, service, resource_type, operation, payload): pass # TODO(dstanek): it would probably be nice to fail early using # something like: # self.assertRaises(TypeError, Foo) Foo() project_ref = unit.new_project_ref(domain_id=self.domain_id) self.assertRaises( TypeError, PROVIDERS.resource_api.create_project, project_ref['id'], project_ref, ) class CadfNotificationsWrapperTestCase(test_v3.RestfulTestCase): LOCAL_HOST = 'localhost' ACTION = 'authenticate' ROLE_ASSIGNMENT = 'role_assignment' def setUp(self): super().setUp() self._notifications = [] def fake_notify( action, initiator, outcome, target, event_type, reason=None, **kwargs, ): service_security = cadftaxonomy.SERVICE_SECURITY event = eventfactory.EventFactory().new_event( eventType=cadftype.EVENTTYPE_ACTIVITY, outcome=outcome, action=action, initiator=initiator, target=target, reason=reason, observer=cadfresource.Resource(typeURI=service_security), ) for key, value in kwargs.items(): setattr(event, key, value) note = { 'action': action, 'initiator': initiator, 'event': event, 'event_type': event_type, 'send_notification_called': True, } self._notifications.append(note) self.useFixture( fixtures.MockPatchObject( notifications, '_send_audit_notification', fake_notify ) ) def _get_last_note(self): self.assertTrue(self._notifications) return self._notifications[-1] def _assert_last_note(self, action, user_id, event_type=None): self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual(action, note['action']) initiator = note['initiator'] self.assertEqual(user_id, initiator.id) self.assertEqual(self.LOCAL_HOST, initiator.host.address) self.assertTrue(note['send_notification_called']) if event_type: self.assertEqual(event_type, note['event_type']) def _assert_event( self, role_id, project=None, domain=None, user=None, group=None, inherit=False, ): """Assert that the CADF event is valid. In the case of role assignments, the event will have extra data, specifically, the role, target, actor, and if the role is inherited. An example event, as a dictionary is seen below: { 'typeURI': 'http://schemas.dmtf.org/cloud/audit/1.0/event', 'initiator': { 'typeURI': 'service/security/account/user', 'host': {'address': 'localhost'}, 'id': 'openstack:0a90d95d-582c-4efb-9cbc-e2ca7ca9c341', 'username': u'admin' }, 'target': { 'typeURI': 'service/security/account/user', 'id': 'openstack:d48ea485-ef70-4f65-8d2b-01aa9d7ec12d' }, 'observer': { 'typeURI': 'service/security', 'id': 'openstack:d51dd870-d929-4aba-8d75-dcd7555a0c95' }, 'eventType': 'activity', 'eventTime': '2014-08-21T21:04:56.204536+0000', 'role': u'0e6b990380154a2599ce6b6e91548a68', 'domain': u'24bdcff1aab8474895dbaac509793de1', 'inherited_to_projects': False, 'group': u'c1e22dc67cbd469ea0e33bf428fe597a', 'action': 'created.role_assignment', 'outcome': 'success', 'id': 'openstack:782689dd-f428-4f13-99c7-5c70f94a5ac1' } """ note = self._notifications[-1] event = note['event'] if project: self.assertEqual(project, event.project) if domain: self.assertEqual(domain, event.domain) if group: self.assertEqual(group, event.group) elif user: self.assertEqual(user, event.user) self.assertEqual(role_id, event.role) self.assertEqual(inherit, event.inherited_to_projects) def test_initiator_id_always_matches_user_id(self): # Clear notifications while self._notifications: self._notifications.pop() self.get_scoped_token() self.assertEqual(len(self._notifications), 1) note = self._notifications.pop() initiator = note['initiator'] self.assertEqual(self.user_id, initiator.id) self.assertEqual(self.user_id, initiator.user_id) def test_initiator_always_contains_username(self): # Clear notifications while self._notifications: self._notifications.pop() self.get_scoped_token() self.assertEqual(len(self._notifications), 1) note = self._notifications.pop() initiator = note['initiator'] self.assertEqual(self.user['name'], initiator.username) def test_v3_authenticate_user_name_and_domain_id(self): user_id = self.user_id user_name = self.user['name'] password = self.user['password'] domain_id = self.domain_id data = self.build_authentication_request( username=user_name, user_domain_id=domain_id, password=password ) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def test_v3_authenticate_user_id(self): user_id = self.user_id password = self.user['password'] data = self.build_authentication_request( user_id=user_id, password=password ) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def test_v3_authenticate_with_invalid_user_id_sends_notification(self): user_id = uuid.uuid4().hex password = self.user['password'] data = self.build_authentication_request( user_id=user_id, password=password ) self.post( '/auth/tokens', body=data, expected_status=http.client.UNAUTHORIZED ) note = self._get_last_note() initiator = note['initiator'] # Confirm user-name specific event was emitted. self.assertEqual(self.ACTION, note['action']) self.assertEqual(user_id, initiator.user_id) self.assertTrue(note['send_notification_called']) self.assertEqual(cadftaxonomy.OUTCOME_FAILURE, note['event'].outcome) self.assertEqual(self.LOCAL_HOST, initiator.host.address) def test_v3_authenticate_with_invalid_user_name_sends_notification(self): user_name = uuid.uuid4().hex password = self.user['password'] domain_id = self.domain_id data = self.build_authentication_request( username=user_name, user_domain_id=domain_id, password=password ) self.post( '/auth/tokens', body=data, expected_status=http.client.UNAUTHORIZED ) note = self._get_last_note() initiator = note['initiator'] # Confirm user-name specific event was emitted. self.assertEqual(self.ACTION, note['action']) self.assertEqual(user_name, initiator.user_name) self.assertEqual(domain_id, initiator.domain_id) self.assertTrue(note['send_notification_called']) self.assertEqual(cadftaxonomy.OUTCOME_FAILURE, note['event'].outcome) self.assertEqual(self.LOCAL_HOST, initiator.host.address) def test_v3_authenticate_user_name_and_domain_name(self): user_id = self.user_id user_name = self.user['name'] password = self.user['password'] domain_name = self.domain['name'] data = self.build_authentication_request( username=user_name, user_domain_name=domain_name, password=password ) self.post('/auth/tokens', body=data) self._assert_last_note(self.ACTION, user_id) def _test_role_assignment( self, url, role, project=None, domain=None, user=None, group=None ): self.put(url) action = f"{CREATED_OPERATION}.{self.ROLE_ASSIGNMENT}" event_type = '{}.{}.{}'.format( notifications.SERVICE, self.ROLE_ASSIGNMENT, CREATED_OPERATION, ) self._assert_last_note(action, self.user_id, event_type) self._assert_event(role, project, domain, user, group) self.delete(url) action = f"{DELETED_OPERATION}.{self.ROLE_ASSIGNMENT}" event_type = '{}.{}.{}'.format( notifications.SERVICE, self.ROLE_ASSIGNMENT, DELETED_OPERATION, ) self._assert_last_note(action, self.user_id, event_type) self._assert_event(role, project, domain, user, None) def test_user_project_grant(self): url = '/projects/{}/users/{}/roles/{}'.format( self.project_id, self.user_id, self.role_id, ) self._test_role_assignment( url, self.role_id, project=self.project_id, user=self.user_id ) def test_group_domain_grant(self): group_ref = unit.new_group_ref(domain_id=self.domain_id) group = PROVIDERS.identity_api.create_group(group_ref) PROVIDERS.identity_api.add_user_to_group(self.user_id, group['id']) url = '/domains/{}/groups/{}/roles/{}'.format( self.domain_id, group['id'], self.role_id, ) self._test_role_assignment( url, self.role_id, domain=self.domain_id, group=group['id'] ) def test_add_role_to_user_and_project(self): # A notification is sent when add_role_to_user_and_project is called on # the assignment manager. project_ref = unit.new_project_ref(self.domain_id) project = PROVIDERS.resource_api.create_project( project_ref['id'], project_ref ) project_id = project['id'] PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_id, project_id, self.role_id ) self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual('created.role_assignment', note['action']) self.assertTrue(note['send_notification_called']) self._assert_event(self.role_id, project=project_id, user=self.user_id) def test_remove_role_from_user_and_project(self): # A notification is sent when remove_role_from_user_and_project is # called on the assignment manager. PROVIDERS.assignment_api.remove_role_from_user_and_project( self.user_id, self.project_id, self.role_id ) self.assertTrue(self._notifications) note = self._notifications[-1] self.assertEqual('deleted.role_assignment', note['action']) self.assertTrue(note['send_notification_called']) self._assert_event( self.role_id, project=self.project_id, user=self.user_id ) class TestCallbackRegistration(unit.BaseTestCase): def setUp(self): super().setUp() self.mock_log = mock.Mock() # Force the callback logging to occur self.mock_log.logger.getEffectiveLevel.return_value = log.DEBUG def verify_log_message(self, data): """Verify log message. Tests that use this are a little brittle because adding more logging can break them. TODO(dstanek): remove the need for this in a future refactoring """ log_fn = self.mock_log.debug self.assertEqual(len(data), log_fn.call_count) for datum in data: log_fn.assert_any_call(mock.ANY, datum) def test_a_function_callback(self): def callback(*args, **kwargs): pass resource_type = 'thing' with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, resource_type, callback ) callback = 'keystone.tests.unit.common.test_notifications.callback' expected_log_data = { 'callback': callback, 'event': 'identity.%s.created' % resource_type, } self.verify_log_message([expected_log_data]) def test_a_method_callback(self): class C: def callback(self, *args, **kwargs): pass with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, 'thing', C().callback ) callback = 'keystone.tests.unit.common.test_notifications.C.callback' expected_log_data = { 'callback': callback, 'event': 'identity.thing.created', } self.verify_log_message([expected_log_data]) def test_a_list_of_callbacks(self): def callback(*args, **kwargs): pass class C: def callback(self, *args, **kwargs): pass with mock.patch('keystone.notifications.LOG', self.mock_log): notifications.register_event_callback( CREATED_OPERATION, 'thing', [callback, C().callback] ) callback_1 = 'keystone.tests.unit.common.test_notifications.callback' callback_2 = 'keystone.tests.unit.common.test_notifications.C.callback' expected_log_data = [ {'callback': callback_1, 'event': 'identity.thing.created'}, {'callback': callback_2, 'event': 'identity.thing.created'}, ] self.verify_log_message(expected_log_data) def test_an_invalid_callback(self): self.assertRaises( TypeError, notifications.register_event_callback, (CREATED_OPERATION, 'thing', object()), ) def test_an_invalid_event(self): def callback(*args, **kwargs): pass self.assertRaises( ValueError, notifications.register_event_callback, uuid.uuid4().hex, 'thing', callback, ) class CADFNotificationsDataTestCase(test_v3.RestfulTestCase): def config_overrides(self): super().config_overrides() # NOTE(lbragstad): This is a workaround since oslo.messaging version # 9.0.0 had a broken default for transport_url. This makes it so that # we are able to use version 9.0.0 in tests because we are supplying # an override to use a sane default (rabbit://). The problem is that # we can't update the config fixture until we call # get_notification_transport since that method registers the # configuration options for oslo.messaging, which fails since there # isn't a default value for transport_url with version 9.0.0. All the # next line is doing is bypassing the broken default logic by supplying # a dummy url, which allows the options to be registered. After that, # we can actually update the configuration option to override the # transport_url option that was just registered before proceeding with # the test. oslo_messaging.get_notification_transport(CONF, url='rabbit://') self.config_fixture.config( group='oslo_messaging_notifications', transport_url='rabbit://' ) def test_receive_identityId_from_audit_notification(self): observer = None resource_type = EXP_RESOURCE_TYPE ref = getattr(self, 'service', None) if ref is None or ref['type'] != 'identity': ref = unit.new_service_ref() ref['type'] = 'identity' PROVIDERS.catalog_api.create_service(ref['id'], ref.copy()) action = CREATED_OPERATION + '.' + resource_type initiator = notifications._get_request_audit_info(self.user_id) target = cadfresource.Resource(typeURI=cadftaxonomy.ACCOUNT_USER) outcome = 'success' event_type = 'identity.authenticate.created' with mock.patch.object( notifications._get_notifier(), 'info' ) as mocked: notifications._send_audit_notification( action, initiator, outcome, target, event_type ) for mock_args_list in mocked.call_args: if len(mock_args_list) != 0: for mock_args in mock_args_list: if 'observer' in mock_args: observer = mock_args['observer'] break self.assertEqual(ref['id'], observer['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_provider_api.py0000664000175000017500000000613400000000000025035 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import manager from keystone.common import provider_api from keystone.tests import unit class TestProviderAPIRegistry(unit.BaseTestCase): def setUp(self): super().setUp() provider_api.ProviderAPIs._clear_registry_instances() self.addCleanup(provider_api.ProviderAPIs._clear_registry_instances) def _create_manager_instance(self, provides_api=None): provides_api = provides_api or '%s_api' % uuid.uuid4().hex class TestManager(manager.Manager): _provides_api = provides_api driver_namespace = '_TEST_NOTHING' def do_something(self): return provides_api return TestManager(driver_name=None) def test_deferred_gettr(self): api_name = '%s_api' % uuid.uuid4().hex class TestClass: descriptor = provider_api.ProviderAPIs.deferred_provider_lookup( api=api_name, method='do_something' ) test_instance = TestClass() # Accessing the descriptor will raise the known "attribute" error self.assertRaises(AttributeError, getattr, test_instance, 'descriptor') self._create_manager_instance(provides_api=api_name) # once the provider has been instantiated, we can call the descriptor # which will return the method (callable) and we can check that the # return value is as expected. self.assertEqual(api_name, test_instance.descriptor()) def test_registry_lock(self): provider_api.ProviderAPIs.lock_provider_registry() self.assertRaises(RuntimeError, self._create_manager_instance) def test_registry_duplicate(self): test_manager = self._create_manager_instance() self.assertRaises( provider_api.DuplicateProviderError, self._create_manager_instance, provides_api=test_manager._provides_api, ) def test_provider_api_mixin(self): test_manager = self._create_manager_instance() class Testing(provider_api.ProviderAPIMixin): pass instance = Testing() self.assertIs( test_manager, getattr(instance, test_manager._provides_api) ) def test_manager_api_reference(self): manager = self._create_manager_instance() second_manager = self._create_manager_instance() self.assertIs( second_manager, getattr(manager, second_manager._provides_api) ) self.assertIs(manager, getattr(second_manager, manager._provides_api)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_rbac_enforcer.py0000664000175000017500000007545700000000000025162 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import fixtures import flask from flask import blueprints import flask_restful from oslo_policy import policy from keystone.common import authorization from keystone.common import context from keystone.common import provider_api from keystone.common import rbac_enforcer from keystone import exception from keystone.tests import unit from keystone.tests.unit import rest PROVIDER_APIS = provider_api.ProviderAPIs class TestRBACEnforcer(unit.TestCase): def test_enforcer_shared_state(self): enforcer = rbac_enforcer.enforcer.RBACEnforcer() enforcer2 = rbac_enforcer.enforcer.RBACEnforcer() self.assertIsNotNone(enforcer._enforcer) self.assertEqual(enforcer._enforcer, enforcer2._enforcer) setattr(enforcer, '_test_attr', uuid.uuid4().hex) self.assertEqual(enforcer._test_attr, enforcer2._test_attr) def test_enforcer_auto_instantiated(self): enforcer = rbac_enforcer.enforcer.RBACEnforcer() # Check that the enforcer instantiates the oslo_policy enforcer object # on demand. self.assertIsNotNone(enforcer._enforcer) enforcer._reset() self.assertIsNotNone(enforcer._enforcer) class _TestRBACEnforcerBase(rest.RestfulTestCase): def setUp(self): super().setUp() self._setup_enforcer_object() self._setup_dynamic_flask_blueprint_api() self._setup_flask_restful_api() def _setup_enforcer_object(self): self.enforcer = rbac_enforcer.enforcer.RBACEnforcer() self.cleanup_instance('enforcer') def register_new_rules(enforcer): rules = self._testing_policy_rules() enforcer.register_defaults(rules) self.useFixture( fixtures.MockPatchObject( self.enforcer, 'register_rules', register_new_rules ) ) # Set the possible actions to our limited list original_actions = rbac_enforcer.enforcer._POSSIBLE_TARGET_ACTIONS rbac_enforcer.enforcer._POSSIBLE_TARGET_ACTIONS = frozenset( [rule.name for rule in self._testing_policy_rules()] ) # RESET the FrozenSet of possible target actions to the original # value self.addCleanup( setattr, rbac_enforcer.enforcer, '_POSSIBLE_TARGET_ACTIONS', original_actions, ) # Force a reset on the enforcer to load up new policy rules. self.enforcer._reset() def _setup_dynamic_flask_blueprint_api(self): # Create a dynamic flask blueprint with a known prefix api = uuid.uuid4().hex url_prefix = '/_%s_TEST' % api blueprint = blueprints.Blueprint(api, __name__, url_prefix=url_prefix) self.url_prefix = url_prefix self.flask_blueprint = blueprint self.cleanup_instance('flask_blueprint', 'url_prefix') def _driver_simulation_get_method(self, argument_id): user = self.user_req_admin return {'id': argument_id, 'value': 'TEST', 'owner_id': user['id']} def _setup_flask_restful_api(self): self.restful_api_url_prefix = '/_%s_TEST' % uuid.uuid4().hex self.restful_api = flask_restful.Api( self.public_app.app, self.restful_api_url_prefix ) driver_simulation_method = self._driver_simulation_get_method # Very Basic Restful Resource class RestfulResource(flask_restful.Resource): def get(self, argument_id=None): if argument_id is not None: return self._get_argument(argument_id) return self._list_arguments() def _get_argument(self, argument_id): return {'argument': driver_simulation_method(argument_id)} def _list_arguments(self): return {'arguments': []} self.restful_api_resource = RestfulResource self.restful_api.add_resource( RestfulResource, '/argument/', '/argument' ) self.cleanup_instance( 'restful_api', 'restful_resource', 'restful_api_url_prefix' ) def _register_blueprint_to_app(self): # TODO(morgan): remove the need for webtest, but for now just unwrap # by one layer. Once everything is converted to flask, we can fix # the tests to eliminate "webtest". self.public_app.app.register_blueprint( self.flask_blueprint, url_prefix=self.url_prefix ) def _auth_json(self): return { 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user_req_admin['name'], 'password': self.user_req_admin['password'], 'domain': {'id': self.user_req_admin['domain_id']}, } }, }, 'scope': {'project': {'id': self.project_service['id']}}, } } def _testing_policy_rules(self): test_policy_rules = [ policy.RuleDefault( name='example:subject_token', check_str='user_id:%(target.token.user_id)s', scope_types=['project'], ), policy.RuleDefault( name='example:target', check_str='user_id:%(target.myuser.id)s', scope_types=['project'], ), policy.RuleDefault( name='example:inferred_member_data', check_str='user_id:%(target.argument.owner_id)s', scope_types=['project'], ), policy.RuleDefault( name='example:with_filter', check_str='user_id:%(user)s', scope_types=['project'], ), policy.RuleDefault( name='example:allowed', check_str='', scope_types=['project'], ), policy.RuleDefault( name='example:denied', check_str='false:false', scope_types=['project'], ), ] return test_policy_rules class TestRBACEnforcerRestAdminAuthToken(_TestRBACEnforcerBase): def config_overrides(self): super().config_overrides() self.config_fixture.config(admin_token='ADMIN') def test_enforcer_is_admin_check_with_token(self): # Admin-shared token passed and valid, "is_admin" should be true. with self.test_client() as c: c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={authorization.AUTH_TOKEN_HEADER: 'ADMIN'}, ) self.assertTrue(self.enforcer._shared_admin_auth_token_set()) def test_enforcer_is_admin_check_without_token(self): with self.test_client() as c: # Admin-shared token passed and invalid, "is_admin" should be false c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={authorization.AUTH_TOKEN_HEADER: 'BOGUS'}, ) self.assertFalse(self.enforcer._shared_admin_auth_token_set()) # Admin-shared token not passed, "is_admin" should be false c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex) ) self.assertFalse(self.enforcer._shared_admin_auth_token_set()) def test_enforce_call_is_admin(self): with self.test_client() as c: c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={authorization.AUTH_TOKEN_HEADER: 'ADMIN'}, ) with mock.patch.object(self.enforcer, '_enforce') as mock_method: self.enforcer.enforce_call(action='example:allowed') mock_method.assert_not_called() class TestRBACEnforcerRest(_TestRBACEnforcerBase): def test_extract_subject_token_target_data(self): path = '/v3/auth/tokens' body = self._auth_json() with self.test_client() as c: r = c.post( path, json=body, follow_redirects=True, expected_status_code=201, ) token_id = r.headers['X-Subject-Token'] c.get( '/v3', headers={ 'X-Auth-Token': token_id, 'X-Subject-Token': token_id, }, ) token = PROVIDER_APIS.token_provider_api.validate_token(token_id) subj_token_data = ( self.enforcer._extract_subject_token_target_data() ) subj_token_data = subj_token_data['token'] self.assertEqual(token.user_id, subj_token_data['user_id']) self.assertIn('user', subj_token_data) self.assertIn('domain', subj_token_data['user']) self.assertEqual( token.user_domain['id'], subj_token_data['user']['domain']['id'], ) def test_extract_filter_data(self): # Test that we are extracting useful filter data from the # request context. The tested function validates tha # extract_filter_attr only adds the passed filter values to the # policy dict, all other query-params are ignored. path = uuid.uuid4().hex @self.flask_blueprint.route('/%s' % path) def return_nothing_interesting(): return 'OK', 200 self._register_blueprint_to_app() with self.test_client() as c: expected_param = uuid.uuid4().hex unexpected_param = uuid.uuid4().hex get_path = '/'.join([self.url_prefix, path]) # Populate the query-string with two params, one that should # exist and one that should not in the resulting policy # dict. qs = '{expected}=EXPECTED&{unexpected}=UNEXPECTED'.format( expected=expected_param, unexpected=unexpected_param, ) # Perform the get with the query-string c.get(f'{get_path}?{qs}') # Extract the filter values. extracted_filter = self.enforcer._extract_filter_values( [expected_param] ) # Unexpected param is not in the extracted values # Expected param is in the extracted values # Expected param has the expected value self.assertNotIn(extracted_filter, unexpected_param) self.assertIn(expected_param, expected_param) self.assertEqual(extracted_filter[expected_param], 'EXPECTED') def test_retrive_oslo_req_context(self): # Test to ensure 'get_oslo_req_context' is pulling the request context # from the environ as expected. The only way to really test is an # instance check. with self.test_client() as c: c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex) ) oslo_req_context = self.enforcer._get_oslo_req_context() self.assertIsInstance(oslo_req_context, context.RequestContext) def test_is_authenticated_check(self): # Check that the auth_context is in-fact decoded as expected. token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={'X-Auth-Token': token_id}, ) self.enforcer._assert_is_authenticated() c.get('/', expected_status_code=300) self.assertRaises( exception.Unauthorized, self.enforcer._assert_is_authenticated ) oslo_ctx = self.enforcer._get_oslo_req_context() # Set authenticated to a false value that is not None oslo_ctx.authenticated = False self.assertRaises( exception.Unauthorized, self.enforcer._assert_is_authenticated ) def test_extract_policy_check_credentials(self): # Make sure extracting the creds is the same as what is in the request # environment. token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={'X-Auth-Token': token_id}, ) extracted_creds = self.enforcer._extract_policy_check_credentials() self.assertEqual( flask.request.environ.get(authorization.AUTH_CONTEXT_ENV), extracted_creds, ) def test_extract_member_target_data_inferred(self): # NOTE(morgan): Setup the "resource" object with a 'member_name' attr # and the 'get_member_from_driver' binding to the 'get' method. The # enforcer here will look for 'get_member_from_driver' (callable) and # the 'member_name' (e.g. 'user') so it can automatically populate # the target dict with the member information. This is mostly compat # with current @protected (ease of use). For most cases the target # should be explicitly passed to .enforce_call, but for ease of # converting / use, the automatic population of data has been added. self.restful_api_resource.member_key = 'argument' member_from_driver = self._driver_simulation_get_method self.restful_api_resource.get_member_from_driver = member_from_driver argument_id = uuid.uuid4().hex with self.test_client() as c: c.get( '{}/argument/{}'.format( self.restful_api_url_prefix, argument_id ) ) extracted = self.enforcer._extract_member_target_data( member_target_type=None, member_target=None ) self.assertDictEqual( extracted['target'], self.restful_api_resource().get(argument_id), ) def test_view_args_populated_in_policy_dict(self): # Setup the "resource" object and make a call that has view arguments # (substituted values in the URL). Make sure to use an policy enforcer # that properly checks (substitutes in) a value that is not in "target" # path but in the main policy dict path. def _enforce_mock_func(credentials, action, target, do_raise=True): if 'argument_id' not in target: raise exception.ForbiddenAction(action=action) self.useFixture( fixtures.MockPatchObject( self.enforcer, '_enforce', _enforce_mock_func ) ) argument_id = uuid.uuid4().hex # Check with a call that will populate view_args. with self.test_client() as c: path = '/v3/auth/tokens' body = self._auth_json() r = c.post( path, json=body, follow_redirects=True, expected_status_code=201, ) token_id = r.headers['X-Subject-Token'] c.get( '{}/argument/{}'.format( self.restful_api_url_prefix, argument_id ), headers={'X-Auth-Token': token_id}, ) # Use any valid policy as _enforce is mockpatched out self.enforcer.enforce_call(action='example:allowed') c.get( '%s/argument' % self.restful_api_url_prefix, headers={'X-Auth-Token': token_id}, ) self.assertRaises( exception.ForbiddenAction, self.enforcer.enforce_call, action='example:allowed', ) def test_extract_member_target_data_supplied_target(self): # Test extract member target data with member_target and # member_target_type supplied. member_type = uuid.uuid4().hex member_target = {uuid.uuid4().hex: {uuid.uuid4().hex}} extracted = self.enforcer._extract_member_target_data( member_target_type=member_type, member_target=member_target ) self.assertDictEqual( {'target': {member_type: member_target}}, extracted ) def test_extract_member_target_data_bad_input(self): # Test Extract Member Target Data with only "member_target" and only # "member_target_type" and ensure empty dict is returned. self.assertEqual( {}, self.enforcer._extract_member_target_data( member_target=None, member_target_type=uuid.uuid4().hex ), ) self.assertEqual( {}, self.enforcer._extract_member_target_data( member_target={}, member_target_type=None ), ) def test_call_build_enforcement_target(self): assertIn = self.assertIn assertEq = self.assertEqual ref_uuid = uuid.uuid4().hex def _enforce_mock_func(credentials, action, target, do_raise=True): assertIn('target.domain.id', target) assertEq(target['target.domain.id'], ref_uuid) def _build_enforcement_target(): return {'domain': {'id': ref_uuid}} self.useFixture( fixtures.MockPatchObject( self.enforcer, '_enforce', _enforce_mock_func ) ) argument_id = uuid.uuid4().hex with self.test_client() as c: path = '/v3/auth/tokens' body = self._auth_json() r = c.post( path, json=body, follow_redirects=True, expected_status_code=201, ) token_id = r.headers['X-Subject-Token'] c.get( '{}/argument/{}'.format( self.restful_api_url_prefix, argument_id ), headers={'X-Auth-Token': token_id}, ) self.enforcer.enforce_call( action='example:allowed', build_target=_build_enforcement_target, ) def test_policy_enforcer_action_decorator(self): # Create a method that has an action pre-registered action = 'example:allowed' @self.flask_blueprint.route('') @self.enforcer.policy_enforcer_action(action) def nothing_interesting(): return 'OK', 200 self._register_blueprint_to_app() with self.test_client() as c: c.get('%s' % self.url_prefix) self.assertEqual( action, getattr(flask.g, self.enforcer.ACTION_STORE_ATTR) ) def test_policy_enforcer_action_invalid_action_decorator(self): # If the "action" is not a registered policy enforcement point, check # that a ValueError is raised. def _decorator_fails(): # Create a method that has an action pre-registered, but the # action is bogus action = uuid.uuid4().hex @self.flask_blueprint.route('') @self.enforcer.policy_enforcer_action(action) def nothing_interesting(): return 'OK', 200 self.assertRaises(ValueError, _decorator_fails) def test_enforce_call_invalid_action(self): self.assertRaises( exception.Forbidden, self.enforcer.enforce_call, action=uuid.uuid4().hex, ) def test_enforce_call_not_is_authenticated(self): with self.test_client() as c: c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex) ) # Patch the enforcer to return an empty oslo context. with mock.patch.object( self.enforcer, '_get_oslo_req_context', return_value=None ): self.assertRaises( exception.Unauthorized, self.enforcer.enforce_call, action='example:allowed', ) # Explicitly set "authenticated" on the context to false. ctx = self.enforcer._get_oslo_req_context() ctx.authenticated = False self.assertRaises( exception.Unauthorized, self.enforcer.enforce_call, action='example:allowed', ) def test_enforce_call_explicit_target_attr(self): token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') # check the enforcer properly handles explicitly passed in targets # no subject-token processing is done in this case. # # TODO(morgan): confirm if subject-token-processing can/should # occur in this form without causing issues. c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={ 'X-Auth-Token': token_id, 'X-Subject-Token': token_id, }, ) target = {'myuser': {'id': self.user_req_admin['id']}} self.enforcer.enforce_call( action='example:target', target_attr=target ) # Ensure extracting the subject-token data is not happening. self.assertRaises( exception.ForbiddenAction, self.enforcer.enforce_call, action='example:subject_token', target_attr=target, ) def test_enforce_call_with_subject_token_data(self): token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') # Check that the enforcer passes if user_id and subject token # user_id are the same. example:deprecated should also pass # since it is open enforcement. c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={ 'X-Auth-Token': token_id, 'X-Subject-Token': token_id, }, ) self.enforcer.enforce_call(action='example:subject_token') def test_enforce_call_with_member_target_type_and_member_target(self): token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') # check the enforcer properly handles passed in member_target_type # and member_target. This form still extracts data from the subject # token. c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={ 'X-Auth-Token': token_id, 'X-Subject-Token': token_id, }, ) target_type = 'myuser' target = {'id': self.user_req_admin['id']} self.enforcer.enforce_call( action='example:target', member_target_type=target_type, member_target=target, ) # Ensure we're still extracting the subject-token data self.enforcer.enforce_call(action='example:subject_token') def test_enforce_call_inferred_member_target_data(self): # Check that inferred "get" works as expected for the member target # setup the restful resource for an inferred "get" self.restful_api_resource.member_key = 'argument' member_from_driver = self._driver_simulation_get_method self.restful_api_resource.get_member_from_driver = member_from_driver token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') # check the enforcer properly handles inferred member data get # This form still extracts data from the subject token. c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={ 'X-Auth-Token': token_id, 'X-Subject-Token': token_id, }, ) self.enforcer.enforce_call(action='example:inferred_member_data') # Ensure we're still extracting the subject-token data self.enforcer.enforce_call(action='example:subject_token') def test_enforce_call_with_filter_values(self): token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') # Check that the enforcer passes if a filter is supplied *and* # the filter name is passed to enforce_call c.get( '%s/argument/%s?user=%s' % ( self.restful_api_url_prefix, uuid.uuid4().hex, self.user_req_admin['id'], ), headers={'X-Auth-Token': token_id}, ) self.enforcer.enforce_call( action='example:with_filter', filters=['user'] ) # With No Filters passed into enforce_call self.assertRaises( exception.ForbiddenAction, self.enforcer.enforce_call, action='example:with_filter', ) # With No Filters in the PATH c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={'X-Auth-Token': token_id}, ) self.assertRaises( exception.ForbiddenAction, self.enforcer.enforce_call, action='example:with_filter', filters=['user'], ) # With no filters in the path and no filters passed to enforce_call c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={'X-Auth-Token': token_id}, ) self.assertRaises( exception.ForbiddenAction, self.enforcer.enforce_call, action='example:with_filter', ) def test_enforce_call_with_pre_instantiated_enforcer(self): token_path = '/v3/auth/tokens' auth_json = self._auth_json() enforcer = rbac_enforcer.enforcer.RBACEnforcer() with self.test_client() as c: r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') # Check the enforcer behaves as expected with a pre-instantiated # enforcer passed into .enforce_call() c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={'X-Auth-Token': token_id}, ) self.enforcer.enforce_call( action='example:allowed', enforcer=enforcer ) self.assertRaises( exception.ForbiddenAction, self.enforcer.enforce_call, action='example:denied', enforcer=enforcer, ) def test_enforce_call_sets_enforcement_attr(self): # Ensure calls to enforce_call set the value on flask.g that indicates # enforce_call has actually been called token_path = '/v3/auth/tokens' auth_json = self._auth_json() with self.test_client() as c: # setup/initial call. Note that the request must hit the flask # app to have access to g (without an explicit app-context push) r = c.post(token_path, json=auth_json, expected_status_code=201) token_id = r.headers.get('X-Subject-Token') c.get( '%s/argument/%s' % (self.restful_api_url_prefix, uuid.uuid4().hex), headers={'X-Auth-Token': token_id}, ) # Ensure the attribute is not set self.assertFalse( hasattr( flask.g, rbac_enforcer.enforcer._ENFORCEMENT_CHECK_ATTR ) ) # Set the value to false, like the resource have done automatically setattr( flask.g, rbac_enforcer.enforcer._ENFORCEMENT_CHECK_ATTR, False ) # Enforce self.enforcer.enforce_call(action='example:allowed') # Verify the attribute has been set to true. self.assertEqual( getattr( flask.g, rbac_enforcer.enforcer._ENFORCEMENT_CHECK_ATTR ), True, ) # Reset Attribute and check that attribute is still set even if # enforcement results in forbidden. setattr( flask.g, rbac_enforcer.enforcer._ENFORCEMENT_CHECK_ATTR, False ) self.assertRaises( exception.ForbiddenAction, self.enforcer.enforce_call, action='example:denied', ) self.assertEqual( getattr( flask.g, rbac_enforcer.enforcer._ENFORCEMENT_CHECK_ATTR ), True, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_resource_options_common.py0000664000175000017500000000626000000000000027324 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import resource_options from keystone.tests import unit class TestResourceOptionObjects(unit.BaseTestCase): def test_option_init_validation(self): # option_name must be a string self.assertRaises( TypeError, resource_options.ResourceOption, 'test', 1234 ) # option_id must be a string self.assertRaises( TypeError, resource_options.ResourceOption, 1234, 'testing' ) # option_id must be 4 characters self.assertRaises( ValueError, resource_options.ResourceOption, 'testing', 'testing' ) resource_options.ResourceOption('test', 'testing') def test_duplicate_option_cases(self): option_id_str_valid = 'test' registry = resource_options.ResourceOptionRegistry(option_id_str_valid) option_name_unique = uuid.uuid4().hex option = resource_options.ResourceOption( option_id_str_valid, option_name_unique ) option_dup_id = resource_options.ResourceOption( option_id_str_valid, uuid.uuid4().hex ) option_dup_name = resource_options.ResourceOption( uuid.uuid4().hex[:4], option_name_unique ) registry.register_option(option) self.assertRaises(ValueError, registry.register_option, option_dup_id) self.assertRaises( ValueError, registry.register_option, option_dup_name ) self.assertIs(1, len(registry.options)) registry.register_option(option) self.assertIs(1, len(registry.options)) def test_registry(self): option = resource_options.ResourceOption( uuid.uuid4().hex[:4], uuid.uuid4().hex ) option2 = resource_options.ResourceOption( uuid.uuid4().hex[:4], uuid.uuid4().hex ) registry = resource_options.ResourceOptionRegistry('TEST') registry.register_option(option) self.assertIn(option.option_name, registry.option_names) self.assertIs(1, len(registry.options)) self.assertIn(option.option_id, registry.option_ids) registry.register_option(option2) self.assertIn(option2.option_name, registry.option_names) self.assertIs(2, len(registry.options)) self.assertIn(option2.option_id, registry.option_ids) self.assertIs(option, registry.get_option_by_id(option.option_id)) self.assertIs(option2, registry.get_option_by_id(option2.option_id)) self.assertIs(option, registry.get_option_by_name(option.option_name)) self.assertIs( option2, registry.get_option_by_name(option2.option_name) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_sql_core.py0000664000175000017500000000407000000000000024156 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy.ext import declarative from keystone.common import sql from keystone.tests import unit from keystone.tests.unit import utils ModelBase = declarative.declarative_base() class TestModel(ModelBase, sql.ModelDictMixin): # type: ignore __tablename__ = 'testmodel' id = sql.Column(sql.String(64), primary_key=True) text = sql.Column(sql.String(64), nullable=False) class TestModelDictMixin(unit.BaseTestCase): def test_creating_a_model_instance_from_a_dict(self): d = {'id': utils.new_uuid(), 'text': utils.new_uuid()} m = TestModel.from_dict(d) self.assertEqual(d['id'], m.id) self.assertEqual(d['text'], m.text) def test_creating_a_dict_from_a_model_instance(self): m = TestModel(id=utils.new_uuid(), text=utils.new_uuid()) d = m.to_dict() self.assertEqual(d['id'], m.id) self.assertEqual(d['text'], m.text) def test_creating_a_model_instance_from_an_invalid_dict(self): d = {'id': utils.new_uuid(), 'text': utils.new_uuid(), 'extra': None} self.assertRaises(TypeError, TestModel.from_dict, d) def test_creating_a_dict_from_a_model_instance_that_has_extra_attrs(self): expected = {'id': utils.new_uuid(), 'text': utils.new_uuid()} m = TestModel(id=expected['id'], text=expected['text']) m.extra = 'this should not be in the dictionary' # NOTE(notmorgan): This is currently explicitly harmless as this does # not actually use SQL-Alchemy. self.assertEqual(expected, m.to_dict()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/common/test_utils.py0000664000175000017500000004343200000000000023514 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid import fixtures import freezegun from oslo_config import fixture as config_fixture from oslo_log import log from oslo_utils import timeutils from keystone.common import fernet_utils from keystone.common import utils as common_utils import keystone.conf from keystone.credential.providers import fernet as credential_fernet from keystone import exception from keystone.server.flask import application from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit import utils CONF = keystone.conf.CONF TZ = utils.TZ class UtilsTestCase(unit.BaseTestCase): OPTIONAL = object() def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) def test_resource_uuid(self): # Basic uuid test, most IDs issued by keystone look like this: value = '536e28c2017e405e89b25a1ed777b952' self.assertEqual(value, common_utils.resource_uuid(value)) def test_resource_64_char_uuid(self): # Exact 64 length string, like ones used by mapping_id backend, are not # valid UUIDs, so they will be UUID5 namespaced value = 'f13de678ac714bb1b7d1e9a007c10db5' * 2 expected_id = uuid.uuid5(common_utils.RESOURCE_ID_NAMESPACE, value).hex self.assertEqual(expected_id, common_utils.resource_uuid(value)) def test_resource_non_ascii_chars(self): # IDs with non-ASCII characters will be UUID5 namespaced value = 'ß' * 32 expected_id = uuid.uuid5(common_utils.RESOURCE_ID_NAMESPACE, value).hex self.assertEqual(expected_id, common_utils.resource_uuid(value)) def test_resource_invalid_id(self): # This input is invalid because it's length is more than 64. value = 'x' * 65 self.assertRaises(ValueError, common_utils.resource_uuid, value) def test_hash(self): password = 'right' wrong = 'wrongwrong' # Two wrongs don't make a right hashed = common_utils.hash_password(password) self.assertTrue(common_utils.check_password(password, hashed)) self.assertFalse(common_utils.check_password(wrong, hashed)) def test_verify_normal_password_strict(self): self.config_fixture.config(strict_password_check=False) password = uuid.uuid4().hex verified = common_utils.verify_length_and_trunc_password(password) self.assertEqual(password.encode('utf-8'), verified) def test_that_a_hash_can_not_be_validated_against_a_hash(self): # NOTE(dstanek): Bug 1279849 reported a problem where passwords # were not being hashed if they already looked like a hash. This # would allow someone to hash their password ahead of time # (potentially getting around password requirements, like # length) and then they could auth with their original password. password = uuid.uuid4().hex hashed_password = common_utils.hash_password(password) new_hashed_password = common_utils.hash_password(hashed_password) self.assertFalse( common_utils.check_password(password, new_hashed_password) ) def test_verify_long_password_strict(self): self.config_fixture.config(strict_password_check=False) self.config_fixture.config(group='identity', max_password_length=5) max_length = CONF.identity.max_password_length invalid_password = 'passw0rd' trunc = common_utils.verify_length_and_trunc_password(invalid_password) self.assertEqual(invalid_password.encode('utf-8')[:max_length], trunc) def test_verify_long_password_strict_raises_exception(self): self.config_fixture.config(strict_password_check=True) self.config_fixture.config(group='identity', max_password_length=5) invalid_password = 'passw0rd' self.assertRaises( exception.PasswordVerificationError, common_utils.verify_length_and_trunc_password, invalid_password, ) def test_verify_length_and_trunc_password_throws_validation_error(self): class SpecialObject: pass special_object = SpecialObject() invalid_passwords = [True, special_object, 4.3, 5] for invalid_password in invalid_passwords: self.assertRaises( exception.ValidationError, common_utils.verify_length_and_trunc_password, invalid_password, ) def test_hash_long_password_truncation(self): self.config_fixture.config(strict_password_check=False) invalid_length_password = '0' * 9999999 hashed = common_utils.hash_password(invalid_length_password) self.assertTrue( common_utils.check_password(invalid_length_password, hashed) ) def test_hash_long_password_strict(self): self.config_fixture.config(strict_password_check=True) invalid_length_password = '0' * 9999999 self.assertRaises( exception.PasswordVerificationError, common_utils.hash_password, invalid_length_password, ) def test_max_algo_length_truncates_password(self): self.config_fixture.config(strict_password_check=True) self.config_fixture.config( group='identity', password_hash_algorithm='bcrypt' ) self.config_fixture.config(group='identity', max_password_length='96') invalid_length_password = '0' * 96 self.assertRaises( exception.PasswordVerificationError, common_utils.hash_password, invalid_length_password, ) def test_bcrypt_sha256_not_truncate_password(self): self.config_fixture.config(strict_password_check=True) self.config_fixture.config( group='identity', password_hash_algorithm='bcrypt_sha256' ) password = '0' * 128 password_verified = common_utils.verify_length_and_trunc_password( password ) hashed = common_utils.hash_password(password) self.assertTrue(common_utils.check_password(password, hashed)) self.assertEqual(password.encode('utf-8'), password_verified) def _create_test_user(self, password=OPTIONAL): user = {"name": "hthtest"} if password is not self.OPTIONAL: user['password'] = password return user def test_hash_user_password_without_password(self): user = self._create_test_user() hashed = common_utils.hash_user_password(user) self.assertEqual(user, hashed) def test_hash_user_password_with_null_password(self): user = self._create_test_user(password=None) hashed = common_utils.hash_user_password(user) self.assertEqual(user, hashed) def test_hash_user_password_with_empty_password(self): password = '' user = self._create_test_user(password=password) user_hashed = common_utils.hash_user_password(user) password_hashed = user_hashed['password'] self.assertTrue(common_utils.check_password(password, password_hashed)) def test_hash_edge_cases(self): hashed = common_utils.hash_password('secret') self.assertFalse(common_utils.check_password('', hashed)) self.assertFalse(common_utils.check_password(None, hashed)) def test_hash_unicode(self): password = 'Comment \xe7a va' wrong = 'Comment ?a va' hashed = common_utils.hash_password(password) self.assertTrue(common_utils.check_password(password, hashed)) self.assertFalse(common_utils.check_password(wrong, hashed)) def test_auth_str_equal(self): self.assertTrue(common_utils.auth_str_equal('abc123', 'abc123')) self.assertFalse(common_utils.auth_str_equal('a', 'aaaaa')) self.assertFalse(common_utils.auth_str_equal('aaaaa', 'a')) self.assertFalse(common_utils.auth_str_equal('ABC123', 'abc123')) def test_url_safe_check(self): base_str = 'i am safe' self.assertFalse(common_utils.is_not_url_safe(base_str)) for i in common_utils.URL_RESERVED_CHARS: self.assertTrue(common_utils.is_not_url_safe(base_str + i)) def test_url_safe_with_unicode_check(self): base_str = 'i am \xe7afe' self.assertFalse(common_utils.is_not_url_safe(base_str)) for i in common_utils.URL_RESERVED_CHARS: self.assertTrue(common_utils.is_not_url_safe(base_str + i)) def test_isotime_returns_microseconds_when_subsecond_is_true(self): time = timeutils.utcnow().replace(microsecond=500000) with freezegun.freeze_time(time): string_time = common_utils.isotime(subsecond=True) expected_string_ending = str(time.second) + '.000000Z' self.assertTrue(string_time.endswith(expected_string_ending)) def test_isotime_returns_seconds_when_subsecond_is_false(self): time = timeutils.utcnow().replace(microsecond=500000) with freezegun.freeze_time(time): string_time = common_utils.isotime(subsecond=False) expected_string_ending = str(time.second) + 'Z' self.assertTrue(string_time.endswith(expected_string_ending)) def test_isotime_rounds_microseconds_of_objects_passed_in(self): time = timeutils.utcnow().replace(microsecond=500000) string_time = common_utils.isotime(at=time, subsecond=True) expected_string_ending = str(time.second) + '.000000Z' self.assertTrue(string_time.endswith(expected_string_ending)) def test_isotime_truncates_microseconds_of_objects_passed_in(self): time = timeutils.utcnow().replace(microsecond=500000) string_time = common_utils.isotime(at=time, subsecond=False) expected_string_ending = str(time.second) + 'Z' self.assertTrue(string_time.endswith(expected_string_ending)) def test_get_certificate_subject_dn(self): cert_pem = unit.create_pem_certificate( unit.create_dn( common_name='test', organization_name='dev', locality_name='suzhou', state_or_province_name='jiangsu', country_name='cn', user_id='user_id', domain_component='test.com', email_address='user@test.com', ) ) dn = common_utils.get_certificate_subject_dn(cert_pem) self.assertEqual('test', dn.get('CN')) self.assertEqual('dev', dn.get('O')) self.assertEqual('suzhou', dn.get('L')) self.assertEqual('jiangsu', dn.get('ST')) self.assertEqual('cn', dn.get('C')) self.assertEqual('user_id', dn.get('UID')) self.assertEqual('test.com', dn.get('DC')) self.assertEqual('user@test.com', dn.get('emailAddress')) def test_get_certificate_issuer_dn(self): root_cert, root_key = unit.create_certificate( unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organization_name='fujitsu', organizational_unit_name='test', common_name='root', ) ) cert_pem = unit.create_pem_certificate( unit.create_dn( common_name='test', organization_name='dev', locality_name='suzhou', state_or_province_name='jiangsu', country_name='cn', user_id='user_id', domain_component='test.com', email_address='user@test.com', ), ca=root_cert, ca_key=root_key, ) dn = common_utils.get_certificate_subject_dn(cert_pem) self.assertEqual('test', dn.get('CN')) self.assertEqual('dev', dn.get('O')) self.assertEqual('suzhou', dn.get('L')) self.assertEqual('jiangsu', dn.get('ST')) self.assertEqual('cn', dn.get('C')) self.assertEqual('user_id', dn.get('UID')) self.assertEqual('test.com', dn.get('DC')) self.assertEqual('user@test.com', dn.get('emailAddress')) dn = common_utils.get_certificate_issuer_dn(cert_pem) self.assertEqual('root', dn.get('CN')) self.assertEqual('fujitsu', dn.get('O')) self.assertEqual('kawasaki', dn.get('L')) self.assertEqual('kanagawa', dn.get('ST')) self.assertEqual('jp', dn.get('C')) self.assertEqual('test', dn.get('OU')) def test_get_certificate_subject_dn_not_pem_format(self): self.assertRaises( exception.ValidationError, common_utils.get_certificate_subject_dn, 'MIIEkTCCAnkCFDIzsgpdRGF//5ukMuueXnRxQALhMA0GCSqGSIb3DQEBCwUAMIGC', ) def test_get_certificate_issuer_dn_not_pem_format(self): self.assertRaises( exception.ValidationError, common_utils.get_certificate_issuer_dn, 'MIIEkTCCAnkCFDIzsgpdRGF//5ukMuueXnRxQALhMA0GCSqGSIb3DQEBCwUAMIGC', ) def test_get_certificate_thumbprint(self): cert_pem = '''-----BEGIN CERTIFICATE----- MIIEkTCCAnkCFDIzsgpdRGF//5ukMuueXnRxQALhMA0GCSqGSIb3DQEBCwUAMIGC MQswCQYDVQQGEwJjbjEQMA4GA1UECAwHamlhbmdzdTEPMA0GA1UEBwwGc3V6aG91 MQ0wCwYDVQQKDARqZnR0MQwwCgYDVQQLDANkZXYxEzARBgNVBAMMCnJvb3QubG9j YWwxHjAcBgkqhkiG9w0BCQEWD3Rlc3RAcm9vdC5sb2NhbDAeFw0yMjA2MTYwNzM3 NTZaFw0yMjEyMTMwNzM3NTZaMIGGMQswCQYDVQQGEwJjbjEQMA4GA1UECAwHamlh bmdzdTEPMA0GA1UEBwwGc3V6aG91MQ0wCwYDVQQKDARqZnR0MQwwCgYDVQQLDANk ZXYxFTATBgNVBAMMDGNsaWVudC5sb2NhbDEgMB4GCSqGSIb3DQEJARYRdGVzdEBj bGllbnQubG9jYWwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCah1Uz 2OVbk8zLslxxGV+AR6FTy9b/VoinmB6A0jJA1Zz2D6rsjN2S5xQ5wHIO2WSVX9Ry SonOmeZZqRA9faNJcNNcrBhJICScAhMGHCuli3EUMry/6xK0OYHGgI2X6mcTaIjv tFKHO1BCb5YGdNBa+ff+ncTeVX/PeN3nKjA4xvQb9JZxJTgY0JVhledbaoepFSdW EFW0nbUF+8lj1gCo5E4cAX1eTcUKs43FnWGCJcJT6FB1vP9x8e4h9p0RWbb9GMrU DXKbzF5e28qIiCkYHv2/A/G/J+aeg2K4Cbqy+8908I5BdWZEsJBhWJ0+CEtC3n91 fU6dnAyipO496aa/AgMBAAEwDQYJKoZIhvcNAQELBQADggIBABoOOmLrWNlQzodS n2wfkiF0Lz+pj3FKFPz3sYUYWkAiKXU/6RRu1Md7INRo0MFau4iAN8Raq4JFdbnU HRN9G/UU58ETqi/8cYfOA2+MHHRif1Al9YSvTgHQa6ljZPttGeigOqmGlovPd+7R vLXlKtcr5XBVk9pWPmVpwtAN3bMVlphgEqBO26Ff9J3G5PaNQ6UdpwXC19mRqk6r BUsFBRwy7EeeGNy8DvoHTJfMc2JUbLjesSMOmIkaOGbhe327iRd/GJe4dO91+prE HNWVR/bVoGiUZvSLPqrwU173XbdNd6yMKC+fULICI34eaWDe1zHrg9XdRxtessUx OyJw5bgH09lOs8DSYXjFyx5lDxtERKHaLRgpSNd5foQO/mHiegC2qmdtxqKyOwub V/h6vziDsFZfciwmo6iw3ZpdBvjbYqw32joURQ1IVh1naY6ZzMwq/PsyYVhMYUNB XYPKvm68YfKuYmpwF7Z5Wll4EWm5DTq1dbmjdo+OQsMyiwWepWE0WV7Ng+AEbTqP /akzUXt/AEbbBpZskB6v5q/YOcglWuAQVXs2viguyDvOQVbEB7JKDi4xzlZg3kQP apjt17fip7wQi2jJkwdyAqvrdi/xLhK5+6BSo04lNc8sGZ9wToIoNkgv0cG+BrVU 4cJHNiTQl8bxfSgwemgSYnnyXM4k -----END CERTIFICATE-----''' thumbprint = common_utils.get_certificate_thumbprint(cert_pem) self.assertEqual( 'dMmoJKE9MIJK9VcyahYCb417JDhDfdtTiq_krco8-tk=', thumbprint ) class ServiceHelperTests(unit.BaseTestCase): @application.fail_gracefully def _do_test(self): raise Exception("Test Exc") def test_fail_gracefully(self): self.assertRaises(unit.UnexpectedExit, self._do_test) class FernetUtilsTestCase(unit.BaseTestCase): def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) def test_debug_message_logged_when_loading_fernet_token_keys(self): self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) logging_fixture = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) fernet_utilities = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) fernet_utilities.load_keys() expected_debug_message = ( 'Loaded 2 Fernet keys from %(dir)s, but `[fernet_tokens] ' 'max_active_keys = %(max)d`; perhaps there have not been enough ' 'key rotations to reach `max_active_keys` yet?' ) % { 'dir': CONF.fernet_tokens.key_repository, 'max': CONF.fernet_tokens.max_active_keys, } self.assertIn(expected_debug_message, logging_fixture.output) def test_debug_message_not_logged_when_loading_fernet_credential_key(self): self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', CONF.fernet_tokens.max_active_keys, ) ) logging_fixture = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) fernet_utilities = fernet_utils.FernetUtils( CONF.credential.key_repository, credential_fernet.MAX_ACTIVE_KEYS, 'credential', ) fernet_utilities.load_keys() debug_message = ( 'Loaded 2 Fernet keys from %(dir)s, but `[credential] ' 'max_active_keys = %(max)d`; perhaps there have not been enough ' 'key rotations to reach `max_active_keys` yet?' ) % { 'dir': CONF.credential.key_repository, 'max': credential_fernet.MAX_ACTIVE_KEYS, } self.assertNotIn(debug_message, logging_fixture.output) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/config_files/0000775000175000017500000000000000000000000022074 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_ldap.conf0000664000175000017500000000013200000000000025326 0ustar00zuulzuul00000000000000[ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_ldap_pool.conf0000664000175000017500000000207600000000000026370 0ustar00zuulzuul00000000000000[ldap] url = fakepool://memory user = cn=Admin password = password backend_entities = ['Tenant', 'User', 'UserRoleAssociation', 'Role', 'Group', 'Domain'] suffix = cn=example,cn=com # Connection pooling specific attributes # Enable LDAP connection pooling. (boolean value) use_pool=true # Connection pool size. (integer value) pool_size=5 # Maximum count of reconnect trials. (integer value) pool_retry_max=2 # Time span in seconds to wait between two reconnect trials. # (floating point value) pool_retry_delay=0.2 # Connector timeout in seconds. Value -1 indicates indefinite # wait for response. (integer value) pool_connection_timeout=-1 # Connection lifetime in seconds. # (integer value) pool_connection_lifetime=600 # Enable LDAP connection pooling for end user authentication. # If use_pool is disabled, then this setting is meaningless # and is not used at all. (boolean value) use_auth_pool=true # End user auth connection pool size. (integer value) auth_pool_size=50 # End user auth connection lifetime in seconds. (integer # value) auth_pool_connection_lifetime=60././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_ldap_sql.conf0000664000175000017500000000064700000000000026220 0ustar00zuulzuul00000000000000[database] #For a specific location file based SQLite use: #connection = sqlite:////tmp/keystone.db #To Test MySQL: #connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8 #To Test PostgreSQL: #connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8 connection_recycle_time = 200 [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_liveldap.conf0000664000175000017500000000041000000000000026205 0ustar00zuulzuul00000000000000[ldap] url = ldap://localhost user = cn=Manager,dc=openstack,dc=org password = test suffix = dc=openstack,dc=org group_tree_dn = ou=UserGroups,dc=openstack,dc=org user_tree_dn = ou=Users,dc=openstack,dc=org user_enabled_emulation = True user_mail_attribute = mail ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_multi_ldap_sql.conf0000664000175000017500000000052000000000000027420 0ustar00zuulzuul00000000000000[database] connection = sqlite:// #For a file based sqlite use #connection = sqlite:////tmp/keystone.db #To Test MySQL: #connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8 #To Test PostgreSQL: #connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8 connection_recycle_time = 200 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_pool_liveldap.conf0000664000175000017500000000153500000000000027247 0ustar00zuulzuul00000000000000[ldap] url = ldap://localhost user = cn=Manager,dc=openstack,dc=org password = test suffix = dc=openstack,dc=org group_tree_dn = ou=UserGroups,dc=openstack,dc=org user_tree_dn = ou=Users,dc=openstack,dc=org user_enabled_emulation = True user_mail_attribute = mail # Connection pooling specific attributes # Enable LDAP connection pooling. (boolean value) use_pool=true # Connection pool size. (integer value) pool_size=5 # Connection lifetime in seconds. # (integer value) pool_connection_lifetime=60 # Enable LDAP connection pooling for end user authentication. # If use_pool is disabled, then this setting is meaningless # and is not used at all. (boolean value) use_auth_pool=true # End user auth connection pool size. (integer value) auth_pool_size=50 # End user auth connection lifetime in seconds. (integer # value) auth_pool_connection_lifetime=300 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_sql.conf0000664000175000017500000000051400000000000025211 0ustar00zuulzuul00000000000000[database] #For a specific location file based SQLite use: #connection = sqlite:////tmp/keystone.db #To Test MySQL: #connection = mysql+pymysql://keystone:keystone@localhost/keystone?charset=utf8 #To Test PostgreSQL: #connection = postgresql://keystone:keystone@localhost/keystone?client_encoding=utf8 connection_recycle_time = 200 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/backend_tls_liveldap.conf0000664000175000017500000000060100000000000027071 0ustar00zuulzuul00000000000000[ldap] url = ldap:// user = dc=Manager,dc=openstack,dc=org password = test suffix = dc=openstack,dc=org group_tree_dn = ou=UserGroups,dc=openstack,dc=org user_tree_dn = ou=Users,dc=openstack,dc=org user_enabled_emulation = True user_mail_attribute = mail use_tls = True tls_cacertfile = /etc/keystone/ssl/certs/cacert.pem tls_cacertdir = /etc/keystone/ssl/certs/ tls_req_cert = demand ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/0000775000175000017500000000000000000000000031277 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf 22 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain0000664000175000017500000000017200000000000034331 0ustar00zuulzuul00000000000000# The domain-specific configuration file for the test domain # 'domain1' for use with unit tests. [identity] driver = sql././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/0000775000175000017500000000000000000000000027265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf0000664000175000017500000000050600000000000033361 0ustar00zuulzuul00000000000000# The domain-specific configuration file for the default domain for # use with unit tests. # # The domain_name of the default domain is 'Default', hence the # strange mix of upper/lower case in the file name. [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com [identity] driver = ldap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf0000664000175000017500000000035100000000000033323 0ustar00zuulzuul00000000000000# The domain-specific configuration file for the test domain # 'domain1' for use with unit tests. [ldap] url = fake://memory1 user = cn=Admin password = password suffix = cn=example,cn=com [identity] driver = ldap list_limit = 101 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf0000664000175000017500000000045500000000000033331 0ustar00zuulzuul00000000000000# The domain-specific configuration file for the test domain # 'domain2' for use with unit tests. [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=myroot,cn=com group_tree_dn = ou=UserGroups,dc=myroot,dc=org user_tree_dn = ou=Users,dc=myroot,dc=org [identity] driver = ldap././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_one_extra_sql/0000775000175000017500000000000000000000000027776 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf0000664000175000017500000000017200000000000034036 0ustar00zuulzuul00000000000000# The domain-specific configuration file for the test domain # 'domain2' for use with unit tests. [identity] driver = sql././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/0000775000175000017500000000000000000000000030434 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf 22 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.co0000664000175000017500000000050500000000000034203 0ustar00zuulzuul00000000000000# The domain-specific configuration file for the default domain for # use with unit tests. # # The domain_name of the default domain is 'Default', hence the # strange mix of upper/lower case in the file name. [ldap] url = fake://memory user = cn=Admin password = password suffix = cn=example,cn=com [identity] driver = ldap././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf 22 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.co0000664000175000017500000000017200000000000034147 0ustar00zuulzuul00000000000000# The domain-specific configuration file for the test domain # 'domain1' for use with unit tests. [identity] driver = sql././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/config_files/test_auth_plugin.conf0000664000175000017500000000026600000000000026325 0ustar00zuulzuul00000000000000[auth] methods = external,password,token,simple_challenge_response,saml2,openid,x509,mapped simple_challenge_response = keystone.tests.unit.test_auth_plugin.SimpleChallengeResponse ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/contrib/0000775000175000017500000000000000000000000021105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/contrib/__init__.py0000664000175000017500000000000000000000000023204 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/contrib/federation/0000775000175000017500000000000000000000000023225 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/contrib/federation/__init__.py0000664000175000017500000000000000000000000025324 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/contrib/federation/test_utils.py0000664000175000017500000012551200000000000026004 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import flask from oslo_config import fixture as config_fixture from oslo_serialization import jsonutils from keystone.auth.plugins import mapped import keystone.conf from keystone import exception from keystone.federation import utils as mapping_utils from keystone.tests import unit from keystone.tests.unit import mapping_fixtures CONF = keystone.conf.CONF FAKE_MAPPING_ID = uuid.uuid4().hex class MappingRuleEngineTests(unit.BaseTestCase): """A class for testing the mapping rule engine.""" def setUp(self): super().setUp() # create dummy app so we can setup a request context for our # tests. self.flask_app = flask.Flask(__name__) self.cleanup_instance('flask_app') def assertValidMappedUserObject( self, mapped_properties, user_type='ephemeral', domain_id=None ): """Check whether mapped properties object has 'user' within. According to today's rules, RuleProcessor does not have to issue user's id or name. What's actually required is user's type. """ self.assertIn( 'user', mapped_properties, message='Missing user object in mapped properties', ) user = mapped_properties['user'] self.assertIn('type', user) self.assertEqual(user_type, user['type']) if domain_id: domain = user['domain'] domain_name_or_id = domain.get('id') or domain.get('name') self.assertEqual(domain_id, domain_name_or_id) def test_rule_engine_any_one_of_and_direct_mapping(self): """Should return user's name and group id EMPLOYEE_GROUP_ID. The ADMIN_ASSERTION should successfully have a match in MAPPING_LARGE. They will test the case where `any_one_of` is valid, and there is a direct mapping for the users name. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.ADMIN_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) fn = assertion.get('FirstName') ln = assertion.get('LastName') full_name = f'{fn} {ln}' group_ids = values.get('group_ids') user_name = values.get('user', {}).get('name') self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids) self.assertEqual(full_name, user_name) def test_rule_engine_no_regex_match(self): """Should deny authorization, the email of the tester won't match. This will not match since the email in the assertion will fail the regex test. It is set to match any @example.com address. But the incoming value is set to eviltester@example.org. RuleProcessor should raise ValidationError. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.BAD_TESTER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_regex_many_groups(self): """Should return group CONTRACTOR_GROUP_ID. The TESTER_ASSERTION should successfully have a match in MAPPING_TESTER_REGEX. This will test the case where many groups are in the assertion, and a regex value is used to try and find a match. """ mapping = mapping_fixtures.MAPPING_TESTER_REGEX assertion = mapping_fixtures.TESTER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids) def test_rule_engine_any_one_of_many_rules(self): """Should return group CONTRACTOR_GROUP_ID. The CONTRACTOR_ASSERTION should successfully have a match in MAPPING_SMALL. This will test the case where many rules must be matched, including an `any_one_of`, and a direct mapping. """ mapping = mapping_fixtures.MAPPING_SMALL assertion = mapping_fixtures.CONTRACTOR_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.CONTRACTOR_GROUP_ID, group_ids) def test_rule_engine_not_any_of_and_direct_mapping(self): """Should return user's name and email. The CUSTOMER_ASSERTION should successfully have a match in MAPPING_LARGE. This will test the case where a requirement has `not_any_of`, and direct mapping to a username, no group. """ mapping = mapping_fixtures.MAPPING_LARGE assertion = mapping_fixtures.CUSTOMER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertEqual( [], group_ids, ) def test_rule_engine_not_any_of_many_rules(self): """Should return group EMPLOYEE_GROUP_ID. The EMPLOYEE_ASSERTION should successfully have a match in MAPPING_SMALL. This will test the case where many remote rules must be matched, including a `not_any_of`. """ mapping = mapping_fixtures.MAPPING_SMALL assertion = mapping_fixtures.EMPLOYEE_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.EMPLOYEE_GROUP_ID, group_ids) def test_rule_engine_not_any_of_regex_verify_pass(self): """Should return group DEVELOPER_GROUP_ID. The DEVELOPER_ASSERTION should successfully have a match in MAPPING_DEVELOPER_REGEX. This will test the case where many remote rules must be matched, including a `not_any_of`, with regex set to True. """ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX assertion = mapping_fixtures.DEVELOPER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids) def test_rule_engine_not_any_of_regex_verify_fail(self): """Should deny authorization. The email in the assertion will fail the regex test. It is set to reject any @example.org address, but the incoming value is set to evildeveloper@example.org. RuleProcessor should yield ValidationError. """ mapping = mapping_fixtures.MAPPING_DEVELOPER_REGEX assertion = mapping_fixtures.BAD_DEVELOPER_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def _rule_engine_regex_match_and_many_groups(self, assertion): """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID. A helper function injecting assertion passed as an argument. Expect DEVELOPER_GROUP_ID and TESTER_GROUP_ID in the results. """ mapping = mapping_fixtures.MAPPING_LARGE rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) user_name = assertion.get('UserName') group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertValidMappedUserObject(values) self.assertEqual(user_name, name) self.assertIn(mapping_fixtures.DEVELOPER_GROUP_ID, group_ids) self.assertIn(mapping_fixtures.TESTER_GROUP_ID, group_ids) def test_rule_engine_regex_match_and_many_groups(self): """Should return group DEVELOPER_GROUP_ID and TESTER_GROUP_ID. The TESTER_ASSERTION should successfully have a match in MAPPING_LARGE. This will test a successful regex match for an `any_one_of` evaluation type, and will have many groups returned. """ self._rule_engine_regex_match_and_many_groups( mapping_fixtures.TESTER_ASSERTION ) def test_rule_engine_discards_nonstring_objects(self): """Check whether RuleProcessor discards non string objects. Despite the fact that assertion is malformed and contains non string objects, RuleProcessor should correctly discard them and successfully have a match in MAPPING_LARGE. """ self._rule_engine_regex_match_and_many_groups( mapping_fixtures.MALFORMED_TESTER_ASSERTION ) def test_rule_engine_regex_blacklist(self): mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_REGEX assertion = mapping_fixtures.EMPLOYEE_PARTTIME_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped = rp.process(assertion) expected = { 'user': {'type': 'ephemeral'}, 'projects': [], 'group_ids': [], 'group_names': [ { 'name': 'Manager', 'domain': {'id': mapping_fixtures.FEDERATED_DOMAIN}, } ], } self.assertEqual(expected, mapped) def test_rule_engine_regex_whitelist(self): mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_REGEX assertion = mapping_fixtures.EMPLOYEE_PARTTIME_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped = rp.process(assertion) expected = { 'user': {'type': 'ephemeral'}, 'projects': [], 'group_ids': [], 'group_names': [ { 'name': 'Employee', 'domain': {'id': mapping_fixtures.FEDERATED_DOMAIN}, }, { 'name': 'PartTimeEmployee', 'domain': {'id': mapping_fixtures.FEDERATED_DOMAIN}, }, ], } self.assertEqual(expected, mapped) def test_rule_engine_fails_after_discarding_nonstring(self): """Check whether RuleProcessor discards non string objects. Expect RuleProcessor to discard non string object, which is required for a correct rule match. RuleProcessor will result with ValidationError. """ mapping = mapping_fixtures.MAPPING_SMALL rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_MALFORMED_ASSERTION self.assertRaises(exception.ValidationError, rp.process, assertion) def test_using_remote_direct_mapping_that_doesnt_exist_fails(self): """Test for the correct error when referring to a bad remote match. The remote match must exist in a rule when a local section refers to a remote matching using the format (e.g. {0} in a local section). """ mapping = mapping_fixtures.MAPPING_DIRECT_MAPPING_THROUGH_KEYWORD rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CUSTOMER_ASSERTION self.assertRaises(exception.DirectMappingError, rp.process, assertion) def test_rule_engine_returns_group_names(self): """Check whether RuleProcessor returns group names with their domains. RuleProcessor should return 'group_names' entry with a list of dictionaries with two entries 'name' and 'domain' identifying group by its name and domain. """ mapping = mapping_fixtures.MAPPING_GROUP_NAMES rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) reference = { mapping_fixtures.DEVELOPER_GROUP_NAME: { "name": mapping_fixtures.DEVELOPER_GROUP_NAME, "domain": { "name": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_NAME }, }, mapping_fixtures.TESTER_GROUP_NAME: { "name": mapping_fixtures.TESTER_GROUP_NAME, "domain": {"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID}, }, } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) def test_rule_engine_whitelist_and_direct_groups_mapping(self): """Should return user's groups Developer and Contractor. The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match in MAPPING_GROUPS_WHITELIST. It will test the case where 'whitelist' correctly filters out Manager and only allows Developer and Contractor. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.DEVELOPER_GROUP_NAME: { "name": mapping_fixtures.DEVELOPER_GROUP_NAME, "domain": {"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID}, }, mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": {"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID}, }, } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_blacklist_and_direct_groups_mapping(self): """Should return user's group Developer. The EMPLOYEE_ASSERTION_MULTIPLE_GROUPS should successfully have a match in MAPPING_GROUPS_BLACKLIST. It will test the case where 'blacklist' correctly filters out Manager and Developer and only allows Contractor. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": {"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID}, } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_blacklist_and_direct_groups_mapping_multiples(self): """Test matching multiple values before the blacklist. Verifies that the local indexes are correct when matching multiple remote values for a field when the field occurs before the blacklist entry in the remote rules. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MULTIPLES assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) reference = { mapping_fixtures.CONTRACTOR_GROUP_NAME: { "name": mapping_fixtures.CONTRACTOR_GROUP_NAME, "domain": {"id": mapping_fixtures.DEVELOPER_GROUP_DOMAIN_ID}, } } for rule in mapped_properties['group_names']: self.assertDictEqual(reference.get(rule.get('name')), rule) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual([], mapped_properties['group_ids']) def test_rule_engine_whitelist_direct_group_mapping_missing_domain(self): """Test if the local rule is rejected upon missing domain value. This is a variation with a ``whitelist`` filter. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_blacklist_direct_group_mapping_missing_domain(self): """Test if the local rule is rejected upon missing domain value. This is a variation with a ``blacklist`` filter. """ mapping = mapping_fixtures.MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN assertion = mapping_fixtures.EMPLOYEE_ASSERTION_MULTIPLE_GROUPS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_no_groups_allowed(self): """Should return user mapped to no groups. The EMPLOYEE_ASSERTION should successfully have a match in MAPPING_GROUPS_WHITELIST, but 'whitelist' should filter out the group values from the assertion and thus map to no groups. """ mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST assertion = mapping_fixtures.EMPLOYEE_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertListEqual(mapped_properties['group_names'], []) self.assertListEqual(mapped_properties['group_ids'], []) self.assertEqual('tbo', mapped_properties['user']['name']) def test_mapping_federated_domain_specified(self): """Test mapping engine when domain 'ephemeral' is explicitly set. For that, we use mapping rule MAPPING_EPHEMERAL_USER and assertion EMPLOYEE_ASSERTION """ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) def test_set_ephemeral_domain_to_ephemeral_users(self): """Test auto assigning service domain to ephemeral users. Test that ephemeral users will always become members of federated service domain. The check depends on ``type`` value which must be set to ``ephemeral`` in case of ephemeral user. """ mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) def test_local_user_local_domain(self): """Test that local users can have non-service domains assigned.""" mapping = mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject( mapped_properties, user_type='local', domain_id=mapping_fixtures.LOCAL_DOMAIN, ) def test_user_identifications_name(self): """Test various mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has property type set ('ephemeral') - Check if user's name is properly mapped from the assertion - Check if unique_id is properly set and equal to display_name, as it was not explicitly specified in the mapping. """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.CONTRACTOR_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) self.assertEqual('jsmith', mapped_properties['user']['name']) resource_api_mock = mock.patch( 'keystone.resource.core.DomainConfigManager' ) idp_domain_id = uuid.uuid4().hex mapped.validate_and_prepare_federated_user( mapped_properties, idp_domain_id, resource_api_mock ) self.assertEqual('jsmith', mapped_properties['user']['id']) self.assertEqual('jsmith', mapped_properties['user']['name']) self.assertEqual( idp_domain_id, mapped_properties['user']['domain']['id'] ) def test_user_identifications_name_and_federated_domain(self): """Test various mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has propert type set ('ephemeral') - Check if user's name is properly mapped from the assertion - Check if the unique_id and display_name are properly set """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.EMPLOYEE_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) resource_api_mock = mock.patch( 'keystone.resource.core.DomainConfigManager' ) idp_domain_id = uuid.uuid4().hex user_domain_id = mapped_properties['user']['domain']['id'] mapped.validate_and_prepare_federated_user( mapped_properties, idp_domain_id, resource_api_mock ) self.assertEqual('tbo', mapped_properties['user']['name']) self.assertEqual( 'abc123%40example.com', mapped_properties['user']['id'] ) self.assertEqual( user_domain_id, mapped_properties['user']['domain']['id'] ) def test_user_identification_id(self): """Test various mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has propert type set ('ephemeral') - Check if user's display_name is properly set and equal to unique_id, as it was not explicitly specified in the mapping. """ mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.ADMIN_ASSERTION mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) with self.flask_app.test_request_context(): resource_api_mock = mock.patch( 'keystone.resource.core.DomainConfigManager' ) idp_domain_id = uuid.uuid4().hex mapped.validate_and_prepare_federated_user( mapped_properties, idp_domain_id, resource_api_mock ) self.assertEqual('bob', mapped_properties['user']['name']) self.assertEqual('bob', mapped_properties['user']['id']) self.assertEqual( idp_domain_id, mapped_properties['user']['domain']['id'] ) def test_get_user_unique_id_and_display_name(self): mapping = mapping_fixtures.MAPPING_USER_IDS assertion = mapping_fixtures.ADMIN_ASSERTION FAKE_MAPPING_ID = uuid.uuid4().hex rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) with self.flask_app.test_request_context( environ_base={'REMOTE_USER': 'remote_user'} ): resource_api_mock = mock.patch( 'keystone.resource.core.DomainConfigManager' ) idp_domain_id = uuid.uuid4().hex mapped.validate_and_prepare_federated_user( mapped_properties, idp_domain_id, resource_api_mock ) self.assertEqual('remote_user', mapped_properties['user']['name']) self.assertEqual('bob', mapped_properties['user']['id']) self.assertEqual( idp_domain_id, mapped_properties['user']['domain']['id'] ) def test_user_identification_id_and_name(self): """Test various mapping options and how users are identified. This test calls mapped.setup_username() for propagating user object. Test plan: - Check if the user has proper domain ('federated') set - Check if the user has proper type set ('ephemeral') - Check if display_name is properly set from the assertion - Check if unique_id is properly set and equal to value hardcoded in the mapping This test does two iterations with different assertions used as input for the Mapping Engine. Different assertions will be matched with different rules in the ruleset, effectively issuing different user_id (hardcoded values). In the first iteration, the hardcoded user_id is not url-safe and we expect Keystone to make it url safe. In the latter iteration, provided user_id is already url-safe and we expect server not to change it. """ testcases = [ (mapping_fixtures.CUSTOMER_ASSERTION, 'bwilliams'), (mapping_fixtures.EMPLOYEE_ASSERTION, 'tbo'), ] for assertion, exp_user_name in testcases: mapping = mapping_fixtures.MAPPING_USER_IDS rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertValidMappedUserObject(mapped_properties) resource_api_mock = mock.patch( 'keystone.resource.core.DomainConfigManager' ) idp_domain_id = uuid.uuid4().hex user_domain_id = mapped_properties['user']['domain']['id'] mapped.validate_and_prepare_federated_user( mapped_properties, idp_domain_id, resource_api_mock ) self.assertEqual(exp_user_name, mapped_properties['user']['name']) self.assertEqual( 'abc123%40example.com', mapped_properties['user']['id'] ) self.assertEqual( user_domain_id, mapped_properties['user']['domain']['id'] ) def test_whitelist_pass_through(self): mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = mapping_fixtures.DEVELOPER_ASSERTION mapped_properties = rp.process(assertion) self.assertValidMappedUserObject(mapped_properties) self.assertEqual('developacct', mapped_properties['user']['name']) self.assertEqual( 'Developer', mapped_properties['group_names'][0]['name'] ) def test_mapping_validation_with_incorrect_local_keys(self): mapping = mapping_fixtures.MAPPING_BAD_LOCAL_SETUP self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validation_with_user_name_and_domain_name(self): mapping = mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME mapping_utils.validate_mapping_structure(mapping) def test_mapping_validation_with_user_name_and_domain_id(self): mapping = mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID mapping_utils.validate_mapping_structure(mapping) def test_mapping_validation_with_user_id_and_domain_id(self): mapping = mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINID mapping_utils.validate_mapping_structure(mapping) def test_mapping_validation_with_group_name_and_domain(self): mapping = mapping_fixtures.MAPPING_GROUP_NAMES mapping_utils.validate_mapping_structure(mapping) def test_mapping_validation_bad_domain(self): mapping = mapping_fixtures.MAPPING_BAD_DOMAIN self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validation_bad_group(self): mapping = mapping_fixtures.MAPPING_BAD_GROUP self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validation_with_group_name_without_domain(self): mapping = mapping_fixtures.MAPPING_GROUP_NAME_WITHOUT_DOMAIN self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validation_with_group_id_and_domain(self): mapping = mapping_fixtures.MAPPING_GROUP_ID_WITH_DOMAIN self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validation_with_bad_local_type_user_in_assertion(self): mapping = mapping_fixtures.MAPPING_BAD_LOCAL_TYPE_USER_IN_ASSERTION self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validation_no_local(self): mapping = mapping_fixtures.MAPPING_MISSING_LOCAL self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validataion_no_remote(self): mapping = mapping_fixtures.MAPPING_NO_REMOTE self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_mapping_validation_no_type(self): mapping = mapping_fixtures.MAPPING_MISSING_TYPE self.assertRaises( exception.ValidationError, mapping_utils.validate_mapping_structure, mapping, ) def test_type_not_in_assertion(self): """Test that if the remote "type" is not in the assertion it fails.""" mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_PASS_THROUGH rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) assertion = {uuid.uuid4().hex: uuid.uuid4().hex} self.assertRaises(exception.ValidationError, rp.process, assertion) def test_rule_engine_groups_mapping_only_one_group(self): """Test mapping engine when groups is explicitly set. If the groups list has only one group, test if the transformation is done correctly """ mapping = mapping_fixtures.MAPPING_GROUPS_WITH_EMAIL assertion = mapping_fixtures.GROUPS_ASSERTION_ONLY_ONE_GROUP rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('jsmith', mapped_properties['user']['name']) self.assertEqual( 'jill@example.com', mapped_properties['user']['email'] ) self.assertEqual( 'ALL USERS', mapped_properties['group_names'][0]['name'] ) def test_rule_engine_groups_mapping_only_one_numerical_group(self): """Test mapping engine when groups is explicitly set. If the groups list has only one group, test if the transformation is done correctly """ mapping = mapping_fixtures.MAPPING_GROUPS_WITH_EMAIL assertion = mapping_fixtures.GROUPS_ASSERTION_ONLY_ONE_NUMERICAL_GROUP rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('jsmith', mapped_properties['user']['name']) self.assertEqual( 'jill@example.com', mapped_properties['user']['email'] ) self.assertEqual('1234', mapped_properties['group_names'][0]['name']) def test_rule_engine_group_ids_mapping_whitelist(self): """Test mapping engine when group_ids is explicitly set. Also test whitelists on group ids """ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST assertion = mapping_fixtures.GROUP_IDS_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('opilotte', mapped_properties['user']['name']) self.assertListEqual([], mapped_properties['group_names']) self.assertCountEqual( ['abc123', 'ghi789', 'klm012'], mapped_properties['group_ids'] ) def test_rule_engine_group_ids_mapping_blacklist(self): """Test mapping engine when group_ids is explicitly set. Also test blacklists on group ids """ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_BLACKLIST assertion = mapping_fixtures.GROUP_IDS_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('opilotte', mapped_properties['user']['name']) self.assertListEqual([], mapped_properties['group_names']) self.assertCountEqual( ['abc123', 'ghi789', 'klm012'], mapped_properties['group_ids'] ) def test_rule_engine_group_ids_mapping_only_one_group(self): """Test mapping engine when group_ids is explicitly set. If the group ids list has only one group, test if the transformation is done correctly """ mapping = mapping_fixtures.MAPPING_GROUPS_IDS_WHITELIST assertion = mapping_fixtures.GROUP_IDS_ASSERTION_ONLY_ONE_GROUP rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) mapped_properties = rp.process(assertion) self.assertIsNotNone(mapped_properties) self.assertEqual('opilotte', mapped_properties['user']['name']) self.assertListEqual([], mapped_properties['group_names']) self.assertCountEqual( ['210mlk', '321cba'], mapped_properties['group_ids'] ) def test_mapping_projects(self): mapping = mapping_fixtures.MAPPING_PROJECTS assertion = mapping_fixtures.EMPLOYEE_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) expected_username = mapping_fixtures.EMPLOYEE_ASSERTION['UserName'] self.assertEqual(expected_username, values['user']['name']) expected_projects = [ {"name": "Production", "roles": [{"name": "observer"}]}, {"name": "Staging", "roles": [{"name": "member"}]}, { "name": "Project for %s" % expected_username, "roles": [{"name": "admin"}], }, ] self.assertEqual(expected_projects, values['projects']) def test_rule_engine_for_groups_and_domain(self): """Should return user's groups and group domain. The GROUP_DOMAIN_ASSERTION should successfully have a match in MAPPING_GROUPS_DOMAIN_OF_USER. This will test the case where a groups with its domain will exist`, and return user's groups and group domain. """ mapping = mapping_fixtures.MAPPING_GROUPS_DOMAIN_OF_USER assertion = mapping_fixtures.GROUPS_DOMAIN_ASSERTION rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) self.assertValidMappedUserObject(values) user_name = assertion.get('openstack_user') user_groups = ['group1', 'group2'] # since we know the input assertion groups = values.get('group_names', {}) group_list = [g.get('name') for g in groups] group_ids = values.get('group_ids') name = values.get('user', {}).get('name') self.assertEqual(user_name, name) self.assertEqual(user_groups, group_list) self.assertEqual( [], group_ids, ) class TestUnicodeAssertionData(unit.BaseTestCase): """Ensure that unicode data in the assertion headers works. Bug #1525250 reported that something was not getting correctly encoded and/or decoded when assertion data contained non-ASCII characters. This test class mimics what happens in a real HTTP request. """ def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config(group='federation', assertion_prefix='PFX') def _pull_mapping_rules_from_the_database(self): # NOTE(dstanek): In a live system. The rules are dumped into JSON bytes # before being # stored in the database. Upon retrieval the bytes are # loaded and the resulting dictionary is full of unicode text strings. # Most of tests in this file incorrectly assume the mapping fixture # dictionary is the same as what it would look like coming out of the # database. The string, when coming out of the database, are all text. return jsonutils.loads( jsonutils.dumps(mapping_fixtures.MAPPING_UNICODE) ) def _pull_assertion_from_the_request_headers(self): # NOTE(dstanek): In a live system the bytes for the assertion are # pulled from the HTTP headers. These bytes may be decodable as # ISO-8859-1 according to Section 3.2.4 of RFC 7230. Let's assume # that our web server plugins are correctly encoding the data. # Create a dummy application app = flask.Flask(__name__) with app.test_request_context( path='/path', environ_overrides=mapping_fixtures.UNICODE_NAME_ASSERTION, ): data = mapping_utils.get_assertion_params_from_env() # NOTE(dstanek): keystone.auth.plugins.mapped return dict(data) def test_unicode(self): mapping = self._pull_mapping_rules_from_the_database() assertion = self._pull_assertion_from_the_request_headers() rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, mapping['rules']) values = rp.process(assertion) fn = assertion.get('PFX_FirstName') ln = assertion.get('PFX_LastName') full_name = f'{fn} {ln}' user_name = values.get('user', {}).get('name') self.assertEqual(full_name, user_name) class TestMappingLocals(unit.BaseTestCase): mapping_split = { 'rules': [ { 'local': [ { 'user': {'name': '{0}'}, }, {'group': {'id': 'd34db33f'}}, ], 'remote': [{'type': 'idp_username'}], } ] } mapping_combined = { 'rules': [ { 'local': [ {'user': {'name': '{0}'}, 'group': {'id': 'd34db33f'}} ], 'remote': [{'type': 'idp_username'}], } ] } mapping_with_duplicate = { 'rules': [ { 'local': [ {'user': {'name': 'test_{0}'}}, {'user': {'name': '{0}'}}, ], 'remote': [{'type': 'idp_username'}], } ] } assertion = {'idp_username': 'a_user'} def process(self, rules): rp = mapping_utils.RuleProcessor(FAKE_MAPPING_ID, rules) return rp.process(self.assertion) def test_local_list_gets_squashed_into_a_single_dictionary(self): expected = { 'user': {'name': 'a_user', 'type': 'ephemeral'}, 'projects': [], 'group_ids': ['d34db33f'], 'group_names': [], } mapped_split = self.process(self.mapping_split['rules']) mapped_combined = self.process(self.mapping_combined['rules']) self.assertEqual(expected, mapped_split) self.assertEqual(mapped_split, mapped_combined) def test_when_local_list_gets_squashed_first_dict_wins(self): expected = { 'user': {'name': 'test_a_user', 'type': 'ephemeral'}, 'projects': [], 'group_ids': [], 'group_names': [], } mapped = self.process(self.mapping_with_duplicate['rules']) self.assertEqual(expected, mapped) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/core.py0000664000175000017500000011304400000000000020752 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import atexit import base64 import contextlib import datetime import functools import hashlib import http.client import json import os import secrets import shutil import socket import sys import unittest import uuid from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.primitives import hashes from cryptography.hazmat.primitives.serialization import Encoding from cryptography import x509 import fixtures import flask from flask import testing as flask_testing import ldap from oslo_config import fixture as config_fixture from oslo_context import context as oslo_context from oslo_context import fixture as oslo_ctx_fixture from oslo_log import fixture as log_fixture from oslo_log import log from oslo_utils import timeutils import testtools import keystone.api from keystone.common import context from keystone.common import json_home from keystone.common import provider_api from keystone.common import sql import keystone.conf from keystone import exception from keystone.identity.backends.ldap import common as ks_ldap from keystone import notifications from keystone.resource.backends import base as resource_base from keystone.server.flask import application as flask_app from keystone.server.flask import core as keystone_flask from keystone.tests.unit import ksfixtures keystone.conf.configure() keystone.conf.set_config_defaults() PID = str(os.getpid()) TESTSDIR = os.path.dirname(os.path.abspath(__file__)) TESTCONF = os.path.join(TESTSDIR, 'config_files') ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..')) VENDOR = os.path.join(ROOTDIR, 'vendor') ETCDIR = os.path.join(ROOTDIR, 'etc') def _calc_tmpdir(): env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR') if not env_val: return os.path.join(TESTSDIR, 'tmp', PID) return os.path.join(env_val, PID) TMPDIR = _calc_tmpdir() CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs log.register_options(CONF) IN_MEM_DB_CONN_STRING = 'sqlite://' # Strictly matches ISO 8601 timestamps with subsecond precision like: # 2016-06-28T20:48:56.000000Z TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ' TIME_FORMAT_REGEX = r'^\d{4}-[0-1]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d{6}Z$' exception._FATAL_EXCEPTION_FORMAT_ERRORS = True os.makedirs(TMPDIR) atexit.register(shutil.rmtree, TMPDIR) class dirs: @staticmethod def root(*p): return os.path.join(ROOTDIR, *p) @staticmethod def etc(*p): return os.path.join(ETCDIR, *p) @staticmethod def tests(*p): return os.path.join(TESTSDIR, *p) @staticmethod def tmp(*p): return os.path.join(TMPDIR, *p) @staticmethod def tests_conf(*p): return os.path.join(TESTCONF, *p) @atexit.register def remove_test_databases(): db = dirs.tmp('test.db') if os.path.exists(db): os.unlink(db) pristine = dirs.tmp('test.db.pristine') if os.path.exists(pristine): os.unlink(pristine) def skip_if_cache_disabled(*sections): """Skip a test if caching is disabled, this is a decorator. Caching can be disabled either globally or for a specific section. In the code fragment:: @skip_if_cache_is_disabled('assignment', 'token') def test_method(*args): ... The method test_method would be skipped if caching is disabled globally via the `enabled` option in the `cache` section of the configuration or if the `caching` option is set to false in either `assignment` or `token` sections of the configuration. This decorator can be used with no arguments to only check global caching. If a specified configuration section does not define the `caching` option, this decorator makes the caching enabled if `enabled` option in the `cache` section of the configuration is true. """ def wrapper(f): @functools.wraps(f) def inner(*args, **kwargs): if not CONF.cache.enabled: raise unittest.SkipTest('Cache globally disabled.') for s in sections: conf_sec = getattr(CONF, s, None) if conf_sec is not None: if not getattr(conf_sec, 'caching', True): raise unittest.SkipTest('%s caching disabled.' % s) return f(*args, **kwargs) return inner return wrapper def skip_if_cache_is_enabled(*sections): def wrapper(f): @functools.wraps(f) def inner(*args, **kwargs): if CONF.cache.enabled: for s in sections: conf_sec = getattr(CONF, s, None) if conf_sec is not None: if getattr(conf_sec, 'caching', True): raise unittest.SkipTest('%s caching enabled.' % s) return f(*args, **kwargs) return inner return wrapper def skip_if_no_multiple_domains_support(f): """Decorator to skip tests for identity drivers limited to one domain.""" @functools.wraps(f) def wrapper(*args, **kwargs): test_obj = args[0] if not test_obj.identity_api.multiple_domains_supported: raise unittest.SkipTest('No multiple domains support') return f(*args, **kwargs) return wrapper class UnexpectedExit(Exception): pass def new_region_ref(parent_region_id=None, **kwargs): ref = { 'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'parent_region_id': parent_region_id, } ref.update(kwargs) return ref def new_service_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, 'type': uuid.uuid4().hex, } ref.update(kwargs) return ref NEEDS_REGION_ID = object() def new_endpoint_ref( service_id, interface='public', region_id=NEEDS_REGION_ID, **kwargs ): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'interface': interface, 'service_id': service_id, 'url': 'https://' + uuid.uuid4().hex + '.com', } if region_id is NEEDS_REGION_ID: ref['region_id'] = uuid.uuid4().hex elif region_id is None and kwargs.get('region') is not None: # pre-3.2 form endpoints are not supported by this function raise NotImplementedError("use new_endpoint_ref_with_region") else: ref['region_id'] = region_id ref.update(kwargs) return ref def new_endpoint_group_ref(filters, **kwargs): ref = { 'id': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'filters': filters, 'name': uuid.uuid4().hex, } ref.update(kwargs) return ref def new_endpoint_ref_with_region( service_id, region, interface='public', **kwargs ): """Define an endpoint_ref having a pre-3.2 form. Contains the deprecated 'region' instead of 'region_id'. """ ref = new_endpoint_ref( service_id, interface, region=region, region_id='invalid', **kwargs ) del ref['region_id'] return ref def new_domain_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, 'tags': [], 'options': {}, } ref.update(kwargs) return ref def new_project_ref(domain_id=None, is_domain=False, **kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, 'domain_id': domain_id, 'is_domain': is_domain, 'tags': [], 'options': {}, } # NOTE(henry-nash): We don't include parent_id in the initial list above # since specifying it is optional depending on where the project sits in # the hierarchy (and a parent_id of None has meaning - i.e. it's a top # level project). ref.update(kwargs) return ref def new_user_ref(domain_id, project_id=None, **kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'enabled': True, 'domain_id': domain_id, 'email': uuid.uuid4().hex, 'password': uuid.uuid4().hex, } if project_id: ref['default_project_id'] = project_id ref.update(kwargs) return ref def new_federated_user_ref(idp_id=None, protocol_id=None, **kwargs): ref = { 'idp_id': idp_id or 'ORG_IDP', 'protocol_id': protocol_id or 'saml2', 'unique_id': uuid.uuid4().hex, 'display_name': uuid.uuid4().hex, } ref.update(kwargs) return ref def new_mapping_ref(mapping_id=None, rules=None, **kwargs): ref = {'id': mapping_id or uuid.uuid4().hex, 'rules': rules or []} ref.update(kwargs) return ref def new_protocol_ref(protocol_id=None, idp_id=None, mapping_id=None, **kwargs): ref = { 'id': protocol_id or 'saml2', 'idp_id': idp_id or 'ORG_IDP', 'mapping_id': mapping_id or uuid.uuid4().hex, } ref.update(kwargs) return ref def new_identity_provider_ref(idp_id=None, **kwargs): ref = { 'id': idp_id or 'ORG_IDP', 'enabled': True, 'description': '', } ref.update(kwargs) return ref def new_service_provider_ref(**kwargs): ref = { 'auth_url': 'https://' + uuid.uuid4().hex + '.com', 'enabled': True, 'description': uuid.uuid4().hex, 'sp_url': 'https://' + uuid.uuid4().hex + '.com', 'relay_state_prefix': CONF.saml.relay_state_prefix, } ref.update(kwargs) return ref def new_group_ref(domain_id, **kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'domain_id': domain_id, } ref.update(kwargs) return ref def new_credential_ref(user_id, project_id=None, type='cert', **kwargs): ref = { 'id': uuid.uuid4().hex, 'user_id': user_id, 'type': type, } if project_id: ref['project_id'] = project_id if 'blob' not in kwargs: ref['blob'] = uuid.uuid4().hex ref.update(kwargs) return ref def new_cert_credential(user_id, project_id=None, blob=None, **kwargs): if blob is None: blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex} credential = new_credential_ref( user_id=user_id, project_id=project_id, blob=json.dumps(blob), type='cert', **kwargs, ) return blob, credential def new_ec2_credential(user_id, project_id=None, blob=None, **kwargs): if blob is None: blob = { 'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex, 'trust_id': None, } if 'id' not in kwargs: access = blob['access'].encode('utf-8') kwargs['id'] = hashlib.sha256(access).hexdigest() credential = new_credential_ref( user_id=user_id, project_id=project_id, blob=json.dumps(blob), type='ec2', **kwargs, ) return blob, credential def new_totp_credential(user_id, project_id=None, blob=None): if not blob: # NOTE(notmorgan): 20 bytes of data from secrets.token_bytes for # a totp secret. blob = base64.b32encode(secrets.token_bytes(20)).decode('utf-8') credential = new_credential_ref( user_id=user_id, project_id=project_id, blob=blob, type='totp' ) return credential def create_dn( common_name=None, locality_name=None, state_or_province_name=None, organization_name=None, organizational_unit_name=None, country_name=None, street_address=None, domain_component=None, user_id=None, email_address=None, ): oid = x509.NameOID attr = x509.NameAttribute dn = [] if common_name: dn.append(attr(oid.COMMON_NAME, common_name)) if locality_name: dn.append(attr(oid.LOCALITY_NAME, locality_name)) if state_or_province_name: dn.append(attr(oid.STATE_OR_PROVINCE_NAME, state_or_province_name)) if organization_name: dn.append(attr(oid.ORGANIZATION_NAME, organization_name)) if organizational_unit_name: dn.append(attr(oid.ORGANIZATIONAL_UNIT_NAME, organizational_unit_name)) if country_name: dn.append(attr(oid.COUNTRY_NAME, country_name)) if street_address: dn.append(attr(oid.STREET_ADDRESS, street_address)) if domain_component: dn.append(attr(oid.DOMAIN_COMPONENT, domain_component)) if user_id: dn.append(attr(oid.USER_ID, user_id)) if email_address: dn.append(attr(oid.EMAIL_ADDRESS, email_address)) return x509.Name(dn) def update_dn(dn1, dn2): dn1_attrs = {attr.oid: attr for attr in dn1} dn2_attrs = {attr.oid: attr for attr in dn2} dn1_attrs.update(dn2_attrs) return x509.Name([attr for attr in dn1_attrs.values()]) def create_certificate(subject_dn, ca=None, ca_key=None): private_key = rsa.generate_private_key( public_exponent=65537, key_size=2048, ) issuer = ca.subject if ca else subject_dn if not ca_key: ca_key = private_key today = datetime.datetime.today() cert = x509.CertificateBuilder( issuer_name=issuer, subject_name=subject_dn, public_key=private_key.public_key(), serial_number=x509.random_serial_number(), not_valid_before=today, not_valid_after=today + datetime.timedelta(365, 0, 0), ).sign(ca_key, hashes.SHA256()) return cert, private_key def create_pem_certificate(subject_dn, ca=None, ca_key=None): cert, _ = create_certificate(subject_dn, ca=ca, ca_key=ca_key) return cert.public_bytes(Encoding.PEM).decode('ascii') def new_application_credential_ref( roles=None, name=None, expires=None, secret=None ): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, } if roles: ref['roles'] = roles if secret: ref['secret'] = secret if isinstance(expires, str): ref['expires_at'] = expires elif isinstance(expires, dict): ref['expires_at'] = ( timeutils.utcnow() + datetime.timedelta(**expires) ).strftime(TIME_FORMAT) elif expires is None: pass else: raise NotImplementedError('Unexpected value for "expires"') return ref def new_role_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'domain_id': None, 'options': {}, } ref.update(kwargs) return ref def new_policy_ref(**kwargs): ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, 'enabled': True, # Store serialized JSON data as the blob to mimic real world usage. 'blob': json.dumps( { 'data': uuid.uuid4().hex, } ), 'type': uuid.uuid4().hex, } ref.update(kwargs) return ref def new_domain_config_ref(**kwargs): ref = { "identity": {"driver": "ldap"}, "ldap": { "url": "ldap://myldap.com:389/", "user_tree_dn": "ou=Users,dc=my_new_root,dc=org", }, } ref.update(kwargs) return ref def new_trust_ref( trustor_user_id, trustee_user_id, project_id=None, impersonation=None, expires=None, role_ids=None, role_names=None, remaining_uses=None, allow_redelegation=False, redelegation_count=None, **kwargs, ): ref = { 'id': uuid.uuid4().hex, 'trustor_user_id': trustor_user_id, 'trustee_user_id': trustee_user_id, 'impersonation': impersonation or False, 'project_id': project_id, 'remaining_uses': remaining_uses, 'allow_redelegation': allow_redelegation, } if isinstance(redelegation_count, int): ref.update(redelegation_count=redelegation_count) if isinstance(expires, str): ref['expires_at'] = expires elif isinstance(expires, dict): ref['expires_at'] = ( timeutils.utcnow() + datetime.timedelta(**expires) ).strftime(TIME_FORMAT) elif expires is None: pass else: raise NotImplementedError('Unexpected value for "expires"') role_ids = role_ids or [] role_names = role_names or [] if role_ids or role_names: ref['roles'] = [] for role_id in role_ids: ref['roles'].append({'id': role_id}) for role_name in role_names: ref['roles'].append({'name': role_name}) ref.update(kwargs) return ref def new_registered_limit_ref(**kwargs): ref = { 'service_id': uuid.uuid4().hex, 'resource_name': uuid.uuid4().hex, 'default_limit': 10, 'description': uuid.uuid4().hex, } ref.update(kwargs) return ref def new_limit_ref(**kwargs): ref = { 'service_id': uuid.uuid4().hex, 'resource_name': uuid.uuid4().hex, 'resource_limit': 10, 'description': uuid.uuid4().hex, } ref.update(kwargs) return ref def create_user(api, domain_id, **kwargs): """Create a user via the API. Keep the created password. The password is saved and restored when api.create_user() is called. Only use this routine if there is a requirement for the user object to have a valid password after api.create_user() is called. """ user = new_user_ref(domain_id=domain_id, **kwargs) password = user['password'] user = api.create_user(user) user['password'] = password return user def _assert_expected_status(f): """Add `expected_status_code` as an argument to the test_client methods. `expected_status_code` must be passed as a kwarg. """ TEAPOT_HTTP_STATUS = 418 _default_expected_responses = { 'get': http.client.OK, 'head': http.client.OK, 'post': http.client.CREATED, 'put': http.client.NO_CONTENT, 'patch': http.client.OK, 'delete': http.client.NO_CONTENT, } @functools.wraps(f) def inner(*args, **kwargs): # Get the "expected_status_code" kwarg if supplied. If not supplied use # the `_default_expected_response` mapping, or fall through to # "HTTP OK" if the method is somehow unknown. expected_status_code = kwargs.pop( 'expected_status_code', _default_expected_responses.get( f.__name__.lower(), http.client.OK ), ) response = f(*args, **kwargs) # Logic to verify the response object is sane. Expand as needed if response.status_code == TEAPOT_HTTP_STATUS: # NOTE(morgan): We use 418 internally during tests to indicate # an un-routed HTTP call was made. This allows us to avoid # misinterpreting HTTP 404 from Flask and HTTP 404 from a # resource that is not found (e.g. USER NOT FOUND) programmatically raise AssertionError("I AM A TEAPOT(418): %s" % response.data) if response.status_code != expected_status_code: raise AssertionError( 'Expected HTTP Status does not match observed HTTP ' 'Status: %(expected)s != %(observed)s (%(data)s)' % { 'expected': expected_status_code, 'observed': response.status_code, 'data': response.data, } ) # return the original response object return response return inner class KeystoneFlaskTestClient(flask_testing.FlaskClient): """Subclass of flask.testing.FlaskClient implementing assertions. Implements custom "expected" HTTP Status assertion for GET/HEAD/PUT/PATCH/DELETE. """ @_assert_expected_status def get(self, *args, **kwargs): return super().get(*args, **kwargs) @_assert_expected_status def head(self, *args, **kwargs): return super().head(*args, **kwargs) @_assert_expected_status def post(self, *args, **kwargs): return super().post(*args, **kwargs) @_assert_expected_status def patch(self, *args, **kwargs): return super().patch(*args, **kwargs) @_assert_expected_status def put(self, *args, **kwargs): return super().put(*args, **kwargs) @_assert_expected_status def delete(self, *args, **kwargs): return super().delete(*args, **kwargs) class BaseTestCase(testtools.TestCase): """Light weight base test class. This is a placeholder that will eventually go away once the setup/teardown in TestCase is properly trimmed down to the bare essentials. This is really just a play to speed up the tests by eliminating unnecessary work. """ def setUp(self): super().setUp() self.useFixture(fixtures.NestedTempfile()) self.useFixture(fixtures.TempHomeDir()) self.useFixture( fixtures.MockPatchObject(sys, 'exit', side_effect=UnexpectedExit) ) self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.stdlog = self.useFixture(ksfixtures.StandardLogging()) self.useFixture(ksfixtures.WarningsFixture()) # Ensure we have an empty threadlocal context at the start of each # test. self.assertIsNone(oslo_context.get_current()) self.useFixture(oslo_ctx_fixture.ClearRequestContext()) orig_debug_level = ldap.get_option(ldap.OPT_DEBUG_LEVEL) self.addCleanup( ldap.set_option, ldap.OPT_DEBUG_LEVEL, orig_debug_level ) orig_tls_cacertfile = ldap.get_option(ldap.OPT_X_TLS_CACERTFILE) if orig_tls_cacertfile is None: orig_tls_cacertfile = '' self.addCleanup( ldap.set_option, ldap.OPT_X_TLS_CACERTFILE, orig_tls_cacertfile ) orig_tls_cacertdir = ldap.get_option(ldap.OPT_X_TLS_CACERTDIR) # Setting orig_tls_cacertdir to None is not allowed. if orig_tls_cacertdir is None: orig_tls_cacertdir = '' self.addCleanup( ldap.set_option, ldap.OPT_X_TLS_CACERTDIR, orig_tls_cacertdir ) orig_tls_require_cert = ldap.get_option(ldap.OPT_X_TLS_REQUIRE_CERT) self.addCleanup( ldap.set_option, ldap.OPT_X_TLS_REQUIRE_CERT, orig_tls_require_cert ) self.addCleanup(ks_ldap.PooledLDAPHandler.connection_pools.clear) def cleanup_instance(self, *names): """Create a function suitable for use with self.addCleanup. :returns: a callable that uses a closure to delete instance attributes """ def cleanup(): for name in names: # TODO(dstanek): remove this 'if' statement once # load_backend in test_backend_ldap is only called once # per test if hasattr(self, name): delattr(self, name) return cleanup def skip_if_env_not_set(self, env_var): if not os.environ.get(env_var): self.skipTest('Env variable %s is not set.' % env_var) def skip_test_overrides(self, *args, **kwargs): if self._check_for_method_in_parents(self._testMethodName): return super().skipTest(*args, **kwargs) raise Exception( '%r is not a previously defined test method' % self._testMethodName ) def _check_for_method_in_parents(self, name): # skip first to get to parents for cls in self.__class__.__mro__[1:]: if hasattr(cls, name): return True return False def loadapp(self, name='public'): app = flask_app.application_factory(name) app.testing = True app.test_client_class = KeystoneFlaskTestClient # NOTE(morgan): any unexpected 404s, not handled by the routed apis, # is a hard error and should not pass testing. def page_not_found_teapot(e): content = ( 'TEST PROGRAMMING ERROR - Reached a 404 from an unrouted (`%s`' ') path. Be sure the test is requesting the right resource ' 'and that all blueprints are registered with the flask app.' % flask.request.url ) return content, 418 app.register_error_handler(404, page_not_found_teapot) self.test_client = app.test_client self.test_request_context = app.test_request_context self.cleanup_instance('test_request_context') self.cleanup_instance('test_client') return keystone_flask.setup_app_middleware(app) class TestCase(BaseTestCase): def config_files(self): return [] def _policy_fixture(self): return ksfixtures.Policy(self.config_fixture) @contextlib.contextmanager def make_request(self, path='/', **kwargs): # standup a fake app and request context with a passed in/known # environment. is_admin = kwargs.pop('is_admin', False) environ = kwargs.setdefault('environ', {}) query_string = kwargs.pop('query_string', None) if query_string: # Make sure query string is properly added to the context path = f'{path}?{query_string}' if not environ.get(context.REQUEST_CONTEXT_ENV): environ[context.REQUEST_CONTEXT_ENV] = context.RequestContext( is_admin=is_admin, authenticated=kwargs.pop('authenticated', True), ) # Create a dummy flask app to work with app = flask.Flask(__name__) with app.test_request_context(path=path, environ_overrides=environ): yield def config_overrides(self): # NOTE(morganfainberg): enforce config_overrides can only ever be # called a single time. assert self.__config_overrides_called is False self.__config_overrides_called = True signing_certfile = 'examples/pki/certs/signing_cert.pem' signing_keyfile = 'examples/pki/private/signing_key.pem' self.useFixture(self._policy_fixture()) self.config_fixture.config( # TODO(morganfainberg): Make Cache Testing a separate test case # in tempest, and move it out of the base unit tests. group='cache', backend='dogpile.cache.memory', enabled=True, proxies=['oslo_cache.testing.CacheIsolatingProxy'], ) self.config_fixture.config( group='catalog', driver='sql', template_file=dirs.tests('default_catalog.templates'), ) self.config_fixture.config( group='saml', certfile=signing_certfile, keyfile=signing_keyfile ) self.config_fixture.config( default_log_levels=[ 'amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'routes.middleware=INFO', 'stevedore.extension=INFO', 'keystone.notifications=INFO', 'keystone.identity.backends.ldap.common=INFO', ] ) # NOTE(notmorgan): Set password rounds low here to ensure speedy # tests. This is explicitly set because the tests here are not testing # the integrity of the password hashing, just that the correct form # of hashing has been used. Note that 4 is the lowest for bcrypt # allowed in the `[identity] password_hash_rounds` setting self.config_fixture.config(group='identity', password_hash_rounds=4) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_receipts', CONF.fernet_receipts.max_active_keys, ) ) def _assert_config_overrides_called(self): assert self.__config_overrides_called is True def setUp(self): super().setUp() self.__config_overrides_called = False self.__load_backends_called = False self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.addCleanup(delattr, self, 'config_fixture') self.config(self.config_files()) # NOTE(morganfainberg): mock the auth plugin setup to use the config # fixture which automatically unregisters options when performing # cleanup. def mocked_register_auth_plugin_opt(conf, opt): self.config_fixture.register_opt(opt, group='auth') self.useFixture( fixtures.MockPatchObject( keystone.conf.auth, '_register_auth_plugin_opt', new=mocked_register_auth_plugin_opt, ) ) self.config_overrides() # explicitly load auth configuration keystone.conf.auth.setup_authentication() # NOTE(morganfainberg): ensure config_overrides has been called. self.addCleanup(self._assert_config_overrides_called) self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) # NOTE(morganfainberg): This code is a copy from the oslo-incubator # log module. This is not in a function or otherwise available to use # without having a CONF object to setup logging. This should help to # reduce the log size by limiting what we log (similar to how Keystone # would run under mod_wsgi). for pair in CONF.default_log_levels: mod, _sep, level_name = pair.partition('=') logger = log.getLogger(mod) logger.logger.setLevel(level_name) self.useFixture(ksfixtures.Cache()) # Clear the registry of providers so that providers from previous # tests aren't used. self.addCleanup(provider_api.ProviderAPIs._clear_registry_instances) # Clear the registry of JSON Home Resources self.addCleanup(json_home.JsonHomeResources._reset) # Ensure Notification subscriptions and resource types are empty self.addCleanup(notifications.clear_subscribers) self.addCleanup(notifications.reset_notifier) def config(self, config_files): sql.initialize() CONF(args=[], project='keystone', default_config_files=config_files) def load_backends(self): """Initialize each manager and assigns them to an attribute.""" # TODO(morgan): Ensure our tests only ever call load_backends # a single time via this method. for now just clear the registry # if we are reloading. provider_api.ProviderAPIs._clear_registry_instances() self.useFixture(ksfixtures.BackendLoader(self)) def load_fixtures(self, fixtures): """Hacky basic and naive fixture loading based on a python module. Expects that the various APIs into the various services are already defined on `self`. """ # NOTE(dstanek): create a list of attribute names to be removed # from this instance during cleanup fixtures_to_cleanup = [] # TODO(termie): doing something from json, probably based on Django's # loaddata will be much preferred. if ( hasattr(self, 'identity_api') and hasattr(self, 'assignment_api') and hasattr(self, 'resource_api') ): try: PROVIDERS.resource_api.create_domain( resource_base.NULL_DOMAIN_ID, fixtures.ROOT_DOMAIN ) except exception.Conflict: # the root domain already exists, skip now. pass for domain in fixtures.DOMAINS: rv = PROVIDERS.resource_api.create_domain(domain['id'], domain) attrname = 'domain_%s' % domain['id'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) for project in fixtures.PROJECTS: project_attr_name = 'project_%s' % project['name'].lower() rv = PROVIDERS.resource_api.create_project( project['id'], project ) setattr(self, project_attr_name, rv) fixtures_to_cleanup.append(project_attr_name) for role in fixtures.ROLES: rv = PROVIDERS.role_api.create_role(role['id'], role) attrname = 'role_%s' % role['name'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) for user in fixtures.USERS: user_copy = user.copy() projects = user_copy.pop('projects') # For users, the manager layer will generate the ID user_copy = PROVIDERS.identity_api.create_user(user_copy) # Our tests expect that the password is still in the user # record so that they can reference it, so put it back into # the dict returned. user_copy['password'] = user['password'] # fixtures.ROLES[2] is the _member_ role. for project_id in projects: PROVIDERS.assignment_api.add_role_to_user_and_project( user_copy['id'], project_id, fixtures.ROLES[2]['id'] ) # Use the ID from the fixture as the attribute name, so # that our tests can easily reference each user dict, while # the ID in the dict will be the real public ID. attrname = 'user_%s' % user['name'] setattr(self, attrname, user_copy) fixtures_to_cleanup.append(attrname) for role_assignment in fixtures.ROLE_ASSIGNMENTS: role_id = role_assignment['role_id'] user = role_assignment['user'] project_id = role_assignment['project_id'] user_id = getattr(self, 'user_%s' % user)['id'] PROVIDERS.assignment_api.add_role_to_user_and_project( user_id, project_id, role_id ) self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup)) def assertCloseEnoughForGovernmentWork(self, a, b, delta=3): """Assert that two datetimes are nearly equal within a small delta. :param delta: Maximum allowable time delta, defined in seconds. """ if a == b: # Short-circuit if the values are the same. return msg = f'{a} != {b} within {delta} delta' self.assertLessEqual(abs(a - b).seconds, delta, msg) def assertTimestampEqual(self, expected, value): # Compare two timestamps but ignore the microseconds part # of the expected timestamp. Keystone does not track microseconds and # is working to eliminate microseconds from it's datetimes used. expected = timeutils.parse_isotime(expected).replace(microsecond=0) value = timeutils.parse_isotime(value).replace(microsecond=0) self.assertEqual(expected, value, f"{expected} != {value}") def assertNotEmpty(self, iterable): self.assertGreater(len(iterable), 0) def assertUserDictEqual(self, expected, observed, message=''): """Assert that a user dict is equal to another user dict. User dictionaries have some variable values that should be ignored in the comparison. This method is a helper that strips those elements out when comparing the user dictionary. This normalized these differences that should not change the comparison. """ # NOTE(notmorgan): An empty option list is the same as no options being # specified in the user_ref. This removes options if it is empty in # observed if options is not specified in the expected value. if ( 'options' in observed and not observed['options'] and 'options' not in expected ): observed = observed.copy() del observed['options'] self.assertDictEqual(expected, observed, message) @property def ipv6_enabled(self): if socket.has_ipv6: sock = None try: sock = socket.socket(socket.AF_INET6) # NOTE(Mouad): Try to bind to IPv6 loopback ip address. sock.bind(("::1", 0)) return True except OSError: pass finally: if sock: sock.close() return False def skip_if_no_ipv6(self): if not self.ipv6_enabled: raise self.skipTest("IPv6 is not enabled in the system") class SQLDriverOverrides: """A mixin for consolidating sql-specific test overrides.""" def config_overrides(self): super().config_overrides() # SQL specific driver overrides self.config_fixture.config(group='catalog', driver='sql') self.config_fixture.config(group='identity', driver='sql') self.config_fixture.config(group='policy', driver='sql') self.config_fixture.config(group='trust', driver='sql') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/credential/0000775000175000017500000000000000000000000021557 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/credential/__init__.py0000664000175000017500000000000000000000000023656 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/credential/test_backend_sql.py0000664000175000017500000001047300000000000025443 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from oslo_config import fixture as config_fixture from keystone.common import provider_api from keystone.credential.backends import sql as credential_sql from keystone.credential.providers import fernet as credential_provider from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database PROVIDERS = provider_api.ProviderAPIs class SqlTests(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() # populate the engine with tables & fixtures self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files class SqlCredential(SqlTests): def _create_credential_with_user_id(self, user_id=None): if not user_id: user_id = uuid.uuid4().hex credential = unit.new_credential_ref( user_id=user_id, extra=uuid.uuid4().hex, type=uuid.uuid4().hex ) PROVIDERS.credential_api.create_credential( credential['id'], credential ) return credential def _validate_credential_list( self, retrieved_credentials, expected_credentials ): self.assertEqual(len(expected_credentials), len(retrieved_credentials)) retrieved_ids = [c['id'] for c in retrieved_credentials] for cred in expected_credentials: self.assertIn(cred['id'], retrieved_ids) def setUp(self): super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_provider.MAX_ACTIVE_KEYS, ) ) self.credentials = [] self.user_credentials = [] # setup 3 credentials with random user ids for _ in range(3): cred = self._create_credential_with_user_id() self.user_credentials.append(cred) self.credentials.append(cred) # setup 3 credentials with specific user ids for _ in range(3): cred = self._create_credential_with_user_id(self.user_foo['id']) self.user_credentials.append(cred) self.credentials.append(cred) def test_backend_credential_sql_hints_none(self): credentials = PROVIDERS.credential_api.list_credentials(hints=None) self._validate_credential_list(credentials, self.user_credentials) def test_backend_credential_sql_no_hints(self): credentials = PROVIDERS.credential_api.list_credentials() self._validate_credential_list(credentials, self.user_credentials) def test_backend_credential_sql_encrypted_string(self): cred_dict = { 'id': uuid.uuid4().hex, 'type': uuid.uuid4().hex, 'hash': uuid.uuid4().hex, 'encrypted_blob': b'randomdata', } ref = credential_sql.CredentialModel.from_dict(cred_dict) # Make sure CredentialModel is handing over a text string # to the database. To avoid encoding issues self.assertIsInstance(ref.encrypted_blob, str) def test_credential_limits(self): config_fixture_ = self.user = self.useFixture(config_fixture.Config()) config_fixture_.config(group='credential', user_limit=4) self._create_credential_with_user_id(self.user_foo['id']) self.assertRaises( exception.CredentialLimitExceeded, self._create_credential_with_user_id, self.user_foo['id'], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/credential/test_fernet_provider.py0000664000175000017500000000717700000000000026401 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import uuid import fixtures from oslo_log import log from keystone.common import fernet_utils from keystone.credential.providers import fernet as credential_fernet from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database class TestFernetCredentialProvider(unit.TestCase): def setUp(self): super().setUp() self.provider = credential_fernet.Provider() self.useFixture(database.Database()) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) def test_valid_data_encryption(self): blob = uuid.uuid4().hex encrypted_blob, primary_key_hash = self.provider.encrypt(blob) decrypted_blob = self.provider.decrypt(encrypted_blob) self.assertNotEqual(blob, encrypted_blob) self.assertEqual(blob, decrypted_blob) self.assertIsNotNone(primary_key_hash) class TestFernetCredentialProviderWithNullKey(unit.TestCase): def setUp(self): super().setUp() self.provider = credential_fernet.Provider() self.useFixture(database.Database()) # Only do this to set the key_repository location in configuration. To # test the null key path, we need to make it so that the key repository # doesn't actually exist. If you're running the tests locally and have # bootstrapped a credential key repository in # `/etc/keystone/credential-keys` this will fail unless we override the # default. self.config_fixture.config( group='credential', key_repository=self.useFixture(fixtures.TempDir()).path, ) def test_encryption_with_null_key(self): null_key = fernet_utils.NULL_KEY # NOTE(lhinds) This is marked as #nosec since bandit will see SHA1 # which is marked insecure. Keystone uses SHA1 in this case as part of # HMAC-SHA1 which is currently not insecure but will still get # caught when scanning with bandit. null_key_hash = hashlib.sha1(null_key).hexdigest() # nosec blob = uuid.uuid4().hex encrypted_blob, primary_key_hash = self.provider.encrypt(blob) self.assertEqual(null_key_hash, primary_key_hash) self.assertNotEqual(blob, encrypted_blob) decrypted_blob = self.provider.decrypt(encrypted_blob) self.assertEqual(blob, decrypted_blob) def test_warning_is_logged_when_encrypting_with_null_key(self): blob = uuid.uuid4().hex logging_fixture = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) expected_output = ( 'Encrypting credentials with the null key. Please properly ' 'encrypt credentials using `keystone-manage credential_setup`, ' '`keystone-manage credential_migrate`, and `keystone-manage ' 'credential_rotate`' ) encrypted_blob, primary_key_hash = self.provider.encrypt(blob) self.assertIn(expected_output, logging_fixture.output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/default_catalog.templates0000664000175000017500000000141000000000000024477 0ustar00zuulzuul00000000000000# config for templated.Catalog, using camelCase because I don't want to do # translations for keystone compat catalog.RegionOne.identity.publicURL = http://localhost:5000/v3 catalog.RegionOne.identity.adminURL = http://localhost:35357/v3 catalog.RegionOne.identity.internalURL = http://localhost:35357/v3 catalog.RegionOne.identity.name = 'Identity Service' catalog.RegionOne.identity.id = 1 # fake compute service for now to help novaclient tests work catalog.RegionOne.compute.publicURL = http://localhost:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.adminURL = http://localhost:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.internalURL = http://localhost:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.name = 'Compute Service' catalog.RegionOne.compute.id = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/default_catalog_multi_region.templates0000664000175000017500000000263300000000000027264 0ustar00zuulzuul00000000000000# config for templated.Catalog, using camelCase because I don't want to do # translations for keystone compat catalog.RegionOne.identity.publicURL = http://region-one:5000/v3 catalog.RegionOne.identity.adminURL = http://region-one:35357/v3 catalog.RegionOne.identity.internalURL = http://region-one:35357/v3 catalog.RegionOne.identity.name = 'Identity Service' catalog.RegionOne.identity.id = 1 # fake compute service for now to help novaclient tests work catalog.RegionOne.compute.publicURL = http://region-one:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.adminURL = http://region-one:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.internalURL = http://region-one:8774/v1.1/$(tenant_id)s catalog.RegionOne.compute.name = 'Compute Service' catalog.RegionOne.compute.id = 2 # second region for multi-region testing catalog.RegionTwo.identity.publicURL = http://region-two:5000/v3 catalog.RegionTwo.identity.adminURL = http://region-two:35357/v3 catalog.RegionTwo.identity.internalURL = http://region-two:35357/v3 catalog.RegionTwo.identity.name = 'Identity Service' catalog.RegionTwo.identity.id = 1 catalog.RegionTwo.compute.publicURL = http://region-two:8774/v1.1/$(tenant_id)s catalog.RegionTwo.compute.adminURL = http://region-two:8774/v1.1/$(tenant_id)s catalog.RegionTwo.compute.internalURL = http://region-two:8774/v1.1/$(tenant_id)s catalog.RegionTwo.compute.name = 'Compute Service' catalog.RegionTwo.compute.id = 2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/default_fixtures.py0000664000175000017500000001225200000000000023376 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(dolph): please try to avoid additional fixtures if possible; test suite # performance may be negatively affected. import uuid BAR_PROJECT_ID = uuid.uuid4().hex BAZ_PROJECT_ID = uuid.uuid4().hex MTU_PROJECT_ID = uuid.uuid4().hex SERVICE_PROJECT_ID = uuid.uuid4().hex DEFAULT_DOMAIN_ID = 'default' ADMIN_ROLE_ID = uuid.uuid4().hex MEMBER_ROLE_ID = uuid.uuid4().hex OTHER_ROLE_ID = uuid.uuid4().hex PROJECTS = [ { 'id': BAR_PROJECT_ID, 'name': 'BAR', 'domain_id': DEFAULT_DOMAIN_ID, 'description': 'description', 'enabled': True, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, 'tags': [], 'options': {}, }, { 'id': BAZ_PROJECT_ID, 'name': 'BAZ', 'domain_id': DEFAULT_DOMAIN_ID, 'description': 'description', 'enabled': True, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, 'tags': [], 'options': {}, }, { 'id': MTU_PROJECT_ID, 'name': 'MTU', 'description': 'description', 'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, 'tags': [], 'options': {}, }, { 'id': SERVICE_PROJECT_ID, 'name': 'service', 'description': 'description', 'enabled': True, 'domain_id': DEFAULT_DOMAIN_ID, 'parent_id': DEFAULT_DOMAIN_ID, 'is_domain': False, 'tags': [], 'options': {}, }, ] # NOTE(ja): a role of keystone_admin is done in setUp USERS = [ # NOTE(morganfainberg): Admin user for replacing admin_token_auth { 'id': uuid.uuid4().hex, 'name': 'req_admin', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'password', 'projects': [], 'enabled': True, 'options': {}, }, { 'id': uuid.uuid4().hex, 'name': 'foo', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'foo2', 'projects': [BAR_PROJECT_ID], 'enabled': True, 'email': 'foo@bar.com', 'options': {}, }, { 'id': uuid.uuid4().hex, 'name': 'two', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'two2', 'enabled': True, 'default_project_id': BAZ_PROJECT_ID, 'projects': [BAZ_PROJECT_ID], 'email': 'two@three.com', 'options': {}, }, { 'id': uuid.uuid4().hex, 'name': 'badguy', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'bad', 'enabled': False, 'default_project_id': BAZ_PROJECT_ID, 'projects': [BAZ_PROJECT_ID], 'email': 'bad@guy.com', 'options': {}, }, { 'id': uuid.uuid4().hex, 'name': 'sna', 'domain_id': DEFAULT_DOMAIN_ID, 'password': 'snafu', 'enabled': True, 'projects': [BAR_PROJECT_ID], 'email': 'sna@snl.coom', 'options': {}, }, ] ROLES = [ { 'id': ADMIN_ROLE_ID, 'name': 'admin', 'domain_id': None, }, { 'id': MEMBER_ROLE_ID, 'name': 'member', 'domain_id': None, }, { 'id': '9fe2ff9ee4384b1894a90878d3e92bab', 'name': '_member_', 'domain_id': None, }, { 'id': OTHER_ROLE_ID, 'name': 'other', 'domain_id': None, }, { 'id': uuid.uuid4().hex, 'name': 'browser', 'domain_id': None, }, { 'id': uuid.uuid4().hex, 'name': 'writer', 'domain_id': None, }, { 'id': uuid.uuid4().hex, 'name': 'service', 'domain_id': None, }, ] # NOTE(morganfainberg): Admin assignment for replacing admin_token_auth ROLE_ASSIGNMENTS = [ { 'user': 'req_admin', 'project_id': SERVICE_PROJECT_ID, 'role_id': ADMIN_ROLE_ID, }, ] # TODO(wxy): We should add the root domain ``<>`` as well # when the FKs is enabled for the test. Merge ROOT_DOMAIN into DOMAINS once all # test enable FKs. ROOT_DOMAIN = { 'enabled': True, 'id': '<>', 'name': '<>', } DOMAINS = [ { 'description': ('The default domain'), 'enabled': True, 'id': DEFAULT_DOMAIN_ID, 'name': 'Default', } ] SERVICES = [ { 'id': uuid.uuid4().hex, 'type': 'type_one', 'enabled': True, 'extra': { 'description': 'This is a service for test.', 'name': 'service_one', }, } ] REGIONS = [{'id': 'region_one'}, {'id': 'region_two'}] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/endpoint_policy/0000775000175000017500000000000000000000000022644 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/endpoint_policy/__init__.py0000664000175000017500000000000000000000000024743 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/endpoint_policy/backends/0000775000175000017500000000000000000000000024416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/endpoint_policy/backends/__init__.py0000664000175000017500000000000000000000000026515 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/endpoint_policy/backends/test_base.py0000664000175000017500000001437700000000000026755 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone import exception class DriverTestCase: """Test cases to validate the endpoint policy driver behavior.""" @property def driver(self): raise exception.NotImplemented() def create_association(self, **kwargs): association = { 'policy_id': uuid.uuid4().hex, 'endpoint_id': None, 'service_id': None, 'region_id': None, } association.update(kwargs) self.driver.create_policy_association(**association) return association def test_create_policy_association(self): association = self.create_association(endpoint_id=uuid.uuid4().hex) self.driver.check_policy_association(**association) association = self.create_association( service_id=uuid.uuid4().hex, region_id=uuid.uuid4().hex ) self.driver.check_policy_association(**association) association = self.create_association(service_id=uuid.uuid4().hex) self.driver.check_policy_association(**association) def test_recreate_policy_association(self): # Creating a policy association to a target that already has a policy # associated to it will cause the original policy to be overridden original_association = self.create_association( service_id=uuid.uuid4().hex ) override_association = original_association.copy() override_association['policy_id'] = uuid.uuid4().hex self.driver.create_policy_association(**override_association) self.driver.check_policy_association(**override_association) self.assertRaises( exception.PolicyAssociationNotFound, self.driver.check_policy_association, **original_association ) def test_check_policy_association(self): association = self.create_association( service_id=uuid.uuid4().hex, region_id=uuid.uuid4().hex ) self.driver.check_policy_association(**association) # An association is uniquely identified by its target. Omitting any # attribute (region_id in this case) will result in a different check association.pop('region_id') self.assertRaises( exception.PolicyAssociationNotFound, self.driver.check_policy_association, **association ) def test_delete_policy_association(self): association = self.create_association(endpoint_id=uuid.uuid4().hex) self.driver.delete_policy_association(**association) self.assertRaises( exception.PolicyAssociationNotFound, self.driver.check_policy_association, **association ) def test_get_policy_association(self): association = self.create_association(service_id=uuid.uuid4().hex) # Extract the policy_id from the association and query it by the target policy_id = association.pop('policy_id') association_ref = self.driver.get_policy_association(**association) self.assertEqual({'policy_id': (policy_id,)}, association_ref) def test_list_associations_for_policy(self): policy_id = uuid.uuid4().hex first = self.create_association( endpoint_id=uuid.uuid4().hex, policy_id=policy_id ) second = self.create_association( service_id=uuid.uuid4().hex, policy_id=policy_id ) associations = self.driver.list_associations_for_policy(policy_id) self.assertCountEqual([first, second], associations) def test_delete_association_by_endpoint(self): endpoint_id = uuid.uuid4().hex associations = [ self.create_association(endpoint_id=endpoint_id), self.create_association(endpoint_id=endpoint_id), ] self.driver.delete_association_by_endpoint(endpoint_id) for association in associations: self.assertRaises( exception.PolicyAssociationNotFound, self.driver.check_policy_association, **association ) def test_delete_association_by_service(self): service_id = uuid.uuid4().hex associations = [ self.create_association(service_id=service_id), self.create_association(service_id=service_id), ] self.driver.delete_association_by_service(service_id) for association in associations: self.assertRaises( exception.PolicyAssociationNotFound, self.driver.check_policy_association, **association ) def test_delete_association_by_region(self): region_id = uuid.uuid4().hex first = self.create_association( service_id=uuid.uuid4().hex, region_id=region_id ) second = self.create_association( service_id=uuid.uuid4().hex, region_id=region_id ) self.driver.delete_association_by_region(region_id) for association in [first, second]: self.assertRaises( exception.PolicyAssociationNotFound, self.driver.check_policy_association, **association ) def test_delete_association_by_policy(self): policy_id = uuid.uuid4().hex first = self.create_association( endpoint_id=uuid.uuid4().hex, policy_id=policy_id ) second = self.create_association( service_id=uuid.uuid4().hex, policy_id=policy_id ) self.driver.delete_association_by_policy(policy_id) for association in [first, second]: self.assertRaises( exception.PolicyAssociationNotFound, self.driver.check_policy_association, **association ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/endpoint_policy/backends/test_sql.py0000664000175000017500000000304700000000000026632 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.endpoint_policy.backends import sql as sql_driver from keystone.tests import unit from keystone.tests.unit.backend import core_sql from keystone.tests.unit.endpoint_policy.backends import test_base from keystone.tests.unit.ksfixtures import database class SQLModelTestCase(core_sql.BaseBackendSqlModels): """Test cases to validate the table structure.""" def test_policy_association_model(self): cols = ( ('id', sql.String, 64), ('policy_id', sql.String, 64), ('endpoint_id', sql.String, 64), ('service_id', sql.String, 64), ('region_id', sql.String, 64), ) self.assertExpectedSchema('policy_association', cols) class SQLDriverTestCase(test_base.DriverTestCase, unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self._driver = sql_driver.EndpointPolicy() @property def driver(self): return self._driver ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.562113 keystone-26.0.0/keystone/tests/unit/external/0000775000175000017500000000000000000000000021267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/external/README.rst0000664000175000017500000000103000000000000022750 0ustar00zuulzuul00000000000000This directory contains interface tests for external libraries. The goal is not to test every possible path through a library's code and get 100% coverage. It's to give us a level of confidence that their general interface remains the same through version upgrades. This gives us a place to put these tests without having to litter our own tests with assertions that are not directly related to the code under test. The expectations for the external library are all in one place so it makes it easier for us to find out what they are. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/external/__init__.py0000664000175000017500000000000000000000000023366 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/external/test_timeutils.py0000664000175000017500000000223100000000000024715 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import keystone.tests.unit as tests class TestTimeUtils(tests.BaseTestCase): def test_parsing_date_strings_returns_a_datetime(self): example_date_str = '2015-09-23T04:45:37.196621Z' dt = datetime.datetime.strptime(example_date_str, tests.TIME_FORMAT) self.assertIsInstance(dt, datetime.datetime) def test_parsing_invalid_date_strings_raises_a_ValueError(self): example_date_str = '' simple_format = '%Y' self.assertRaises( ValueError, datetime.datetime.strptime, example_date_str, simple_format, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/fakeldap.py0000664000175000017500000005631700000000000021602 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fake LDAP server for test harness. This class does very little error checking, and knows nothing about ldap class definitions. It implements the minimum emulation of the python ldap library to work with keystone. """ import random import re import shelve import ldap from oslo_log import log import keystone.conf from keystone import exception from keystone.identity.backends.ldap import common SCOPE_NAMES = { ldap.SCOPE_BASE: 'SCOPE_BASE', ldap.SCOPE_ONELEVEL: 'SCOPE_ONELEVEL', ldap.SCOPE_SUBTREE: 'SCOPE_SUBTREE', } LOG = log.getLogger(__name__) CONF = keystone.conf.CONF def _internal_attr(attr_name, value_or_values): def normalize_value(value): return common.utf8_decode(value) def normalize_dn(dn): # Capitalize the attribute names as an LDAP server might. # NOTE(blk-u): Special case for this tested value, used with # test_user_id_comma. The call to str2dn here isn't always correct # here, because `dn` is escaped for an LDAP filter. str2dn() normally # works only because there's no special characters in `dn`. if dn == 'cn=Doe\\5c, John,ou=Users,cn=example,cn=com': return 'CN=Doe\\, John,OU=Users,CN=example,CN=com' # NOTE(blk-u): Another special case for this tested value. When a # roleOccupant has an escaped comma, it gets converted to \2C. if dn == 'cn=Doe\\, John,ou=Users,cn=example,cn=com': return 'CN=Doe\\2C John,OU=Users,CN=example,CN=com' try: dn = ldap.dn.str2dn(dn) except ldap.DECODING_ERROR: # NOTE(amakarov): In case of IDs instead of DNs in group members # they must be handled as regular values. return normalize_value(dn) norm = [] for part in dn: name, val, i = part[0] name = name.upper() norm.append([(name, val, i)]) return ldap.dn.dn2str(norm) if attr_name in ('member', 'roleOccupant'): attr_fn = normalize_dn else: attr_fn = normalize_value if isinstance(value_or_values, list): return [attr_fn(x) for x in value_or_values] return [attr_fn(value_or_values)] def _match_query(query, attrs, attrs_checked): """Match an ldap query to an attribute dictionary. The characters &, |, and ! are supported in the query. No syntax checking is performed, so malformed queries will not work correctly. """ # cut off the parentheses inner = query[1:-1] if inner.startswith(('&', '|')): if inner[0] == '&': matchfn = all else: matchfn = any # cut off the & or | groups = _paren_groups(inner[1:]) return matchfn( _match_query(group, attrs, attrs_checked) for group in groups ) if inner.startswith('!'): # cut off the ! and the nested parentheses return not _match_query(query[2:-1], attrs, attrs_checked) (k, _sep, v) = inner.partition('=') attrs_checked.add(k.lower()) return _match(k, v, attrs) def _paren_groups(source): """Split a string into parenthesized groups.""" count = 0 start = 0 result = [] for pos in range(len(source)): if source[pos] == '(': if count == 0: start = pos count += 1 if source[pos] == ')': count -= 1 if count == 0: result.append(source[start : pos + 1]) return result def _match(key, value, attrs): """Match a given key and value against an attribute list.""" def match_with_wildcards(norm_val, val_list): # Case insensitive checking with wildcards if norm_val.startswith('*'): if norm_val.endswith('*'): # Is the string anywhere in the target? for x in val_list: if norm_val[1:-1] in x: return True else: # Is the string at the end of the target? for x in val_list: if norm_val[1:] == x[len(x) - len(norm_val) + 1 :]: return True elif norm_val.endswith('*'): # Is the string at the start of the target? for x in val_list: if norm_val[:-1] == x[: len(norm_val) - 1]: return True else: # Is the string an exact match? for x in val_list: if check_value == x: return True return False if key not in attrs: return False # This is a pure wild card search, so the answer must be yes! if value == '*': return True if key == 'serviceId': # For serviceId, the backend is returning a list of numbers. # Make sure we convert them to strings first before comparing # them. str_sids = [str(x) for x in attrs[key]] return str(value) in str_sids if key != 'objectclass': check_value = _internal_attr(key, value)[0].lower() norm_values = list( _internal_attr(key, x)[0].lower() for x in attrs[key] ) return match_with_wildcards(check_value, norm_values) # It is an objectclass check, so check subclasses values = _subs(value) for v in values: if v in attrs[key]: return True return False def _subs(value): """Return a list of subclass strings. The strings represent the ldap objectclass plus any subclasses that inherit from it. Fakeldap doesn't know about the ldap object structure, so subclasses need to be defined manually in the dictionary below. """ subs = { 'groupOfNames': [ 'keystoneProject', 'keystoneRole', 'keystoneProjectRole', ] } if value in subs: return [value] + subs[value] return [value] server_fail = False class FakeShelve(dict): def sync(self): pass FakeShelves = {} PendingRequests = {} class FakeLdap(common.LDAPHandler): """Emulate the python-ldap API. The python-ldap API requires all strings to be UTF-8 encoded with the exception of [1]. This is assured by the caller of this interface (i.e. KeystoneLDAPHandler). However, internally this emulation MUST process and store strings in a canonical form which permits operations on characters. Encoded strings do not provide the ability to operate on characters. Therefore this emulation accepts UTF-8 encoded strings, decodes them to unicode for operations internal to this emulation, and encodes them back to UTF-8 when returning values from the emulation. [1] Some fields (DNs, RDNs, attribute names, queries) are represented as text in python-ldap for Python 3, and for Python 2 when bytes_mode=False. For more details see: http://www.python-ldap.org/en/latest/bytes_mode.html#bytes-mode """ __prefix = 'ldap:' def __init__(self, conn=None): super().__init__(conn=conn) self._ldap_options = {ldap.OPT_DEREF: ldap.DEREF_NEVER} def connect( self, url, page_size=0, alias_dereferencing=None, use_tls=False, tls_cacertfile=None, tls_cacertdir=None, tls_req_cert='demand', chase_referrals=None, debug_level=None, use_pool=None, pool_size=None, pool_retry_max=None, pool_retry_delay=None, pool_conn_timeout=None, pool_conn_lifetime=None, conn_timeout=None, ): if url.startswith('fake://memory'): if url not in FakeShelves: FakeShelves[url] = FakeShelve() self.db = FakeShelves[url] else: self.db = shelve.open(url[7:]) using_ldaps = url.lower().startswith("ldaps") if use_tls and using_ldaps: raise AssertionError('Invalid TLS / LDAPS combination') if use_tls: if tls_cacertfile: ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, tls_cacertfile) elif tls_cacertdir: ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, tls_cacertdir) if tls_req_cert in list(common.LDAP_TLS_CERTS.values()): ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, tls_req_cert) else: raise ValueError( "invalid TLS_REQUIRE_CERT tls_req_cert=%s", tls_req_cert ) if alias_dereferencing is not None: self.set_option(ldap.OPT_DEREF, alias_dereferencing) self.page_size = page_size self.use_pool = use_pool self.pool_size = pool_size self.pool_retry_max = pool_retry_max self.pool_retry_delay = pool_retry_delay self.pool_conn_timeout = pool_conn_timeout self.pool_conn_lifetime = pool_conn_lifetime self.conn_timeout = conn_timeout def _dn_to_id_attr(self, dn): return ldap.dn.str2dn(dn)[0][0][0] def _dn_to_id_value(self, dn): return ldap.dn.str2dn(dn)[0][0][1] def key(self, dn): return f'{self.__prefix}{dn}' def simple_bind_s( self, who='', cred='', serverctrls=None, clientctrls=None ): """Provide for compatibility but this method is ignored.""" if server_fail: raise ldap.SERVER_DOWN whos = ['cn=Admin', CONF.ldap.user] if who in whos and cred in ['password', CONF.ldap.password]: self.connected = True self.who = who self.cred = cred return attrs = self.db.get(self.key(who)) if not attrs: LOG.debug('who=%s not found, binding anonymously', who) db_password = '' if attrs: try: db_password = attrs['userPassword'][0] except (KeyError, IndexError): LOG.debug('bind fail: password for who=%s not found', who) raise ldap.INAPPROPRIATE_AUTH if cred != db_password: LOG.debug('bind fail: password for who=%s does not match', who) raise ldap.INVALID_CREDENTIALS def unbind_s(self): """Provide for compatibility but this method is ignored.""" self.connected = False self.who = None self.cred = None if server_fail: raise ldap.SERVER_DOWN def add_s(self, dn, modlist): """Add an object with the specified attributes at dn.""" if server_fail: raise ldap.SERVER_DOWN id_attr_in_modlist = False id_attr = self._dn_to_id_attr(dn) id_value = self._dn_to_id_value(dn) # The LDAP API raises a TypeError if attr name is None. for k, dummy_v in modlist: if k is None: raise TypeError( 'must be string, not None. modlist=%s' % modlist ) if k == id_attr: for val in dummy_v: if common.utf8_decode(val) == id_value: id_attr_in_modlist = True if not id_attr_in_modlist: LOG.debug( 'id_attribute=%(attr)s missing, attributes=%(attrs)s', {'attr': id_attr, 'attrs': modlist}, ) raise ldap.NAMING_VIOLATION key = self.key(dn) LOG.debug( 'add item: dn=%(dn)s, attrs=%(attrs)s', {'dn': dn, 'attrs': modlist}, ) if key in self.db: LOG.debug('add item failed: dn=%s is already in store.', dn) raise ldap.ALREADY_EXISTS(dn) self.db[key] = {k: _internal_attr(k, v) for k, v in modlist} self.db.sync() def delete_s(self, dn): """Remove the ldap object at specified dn.""" return self.delete_ext_s(dn, serverctrls=[]) def _getChildren(self, dn): return [ k for k, v in self.db.items() if re.match(f'{re.escape(self.__prefix)}.*,{re.escape(dn)}', k) ] def delete_ext_s(self, dn, serverctrls, clientctrls=None): """Remove the ldap object at specified dn.""" if server_fail: raise ldap.SERVER_DOWN try: key = self.key(dn) LOG.debug('FakeLdap delete item: dn=%s', dn) del self.db[key] except KeyError: LOG.debug('delete item failed: dn=%s not found.', dn) raise ldap.NO_SUCH_OBJECT self.db.sync() def modify_s(self, dn, modlist): """Modify the object at dn using the attribute list. :param dn: an LDAP DN :param modlist: a list of tuples in the following form: ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ if server_fail: raise ldap.SERVER_DOWN key = self.key(dn) LOG.debug( 'modify item: dn=%(dn)s attrs=%(attrs)s', {'dn': dn, 'attrs': modlist}, ) try: entry = self.db[key] except KeyError: LOG.debug('modify item failed: dn=%s not found.', dn) raise ldap.NO_SUCH_OBJECT for cmd, k, v in modlist: values = entry.setdefault(k, []) if cmd == ldap.MOD_ADD: v = _internal_attr(k, v) for x in v: if x in values: raise ldap.TYPE_OR_VALUE_EXISTS values += v elif cmd == ldap.MOD_REPLACE: values[:] = _internal_attr(k, v) elif cmd == ldap.MOD_DELETE: if v is None: if not values: LOG.debug( 'modify item failed: ' 'item has no attribute "%s" to delete', k, ) raise ldap.NO_SUCH_ATTRIBUTE values[:] = [] else: for val in _internal_attr(k, v): try: values.remove(val) except ValueError: LOG.debug( 'modify item failed: ' 'item has no attribute "%(k)s" with ' 'value "%(v)s" to delete', {'k': k, 'v': val}, ) raise ldap.NO_SUCH_ATTRIBUTE else: LOG.debug('modify item failed: unknown command %s', cmd) raise NotImplementedError( 'modify_s action %s not implemented' % cmd ) self.db[key] = entry self.db.sync() def search_s( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, ): """Search for all matching objects under base using the query. Args: base -- dn to search under scope -- search scope (base, subtree, onelevel) filterstr -- filter objects by attrlist -- attrs to return. Returns all attrs if not specified """ if server_fail: raise ldap.SERVER_DOWN if (not filterstr) and (scope != ldap.SCOPE_BASE): raise AssertionError( 'Search without filter on onelevel or subtree scope' ) if scope == ldap.SCOPE_BASE: try: item_dict = self.db[self.key(base)] except KeyError: LOG.debug('search fail: dn not found for SCOPE_BASE') raise ldap.NO_SUCH_OBJECT results = [(base, item_dict)] elif scope == ldap.SCOPE_SUBTREE: # FIXME - LDAP search with SUBTREE scope must return the base # entry, but the code below does _not_. Unfortunately, there are # several tests that depend on this broken behavior, and fail # when the base entry is returned in the search results. The # fix is easy here, just initialize results as above for # the SCOPE_BASE case. # https://bugs.launchpad.net/keystone/+bug/1368772 try: item_dict = self.db[self.key(base)] except KeyError: LOG.debug('search fail: dn not found for SCOPE_SUBTREE') raise ldap.NO_SUCH_OBJECT results = [(base, item_dict)] extraresults = [ (k[len(self.__prefix) :], v) for k, v in self.db.items() if re.match( '{}.*,{}'.format( re.escape(self.__prefix), re.escape(base) ), k, ) ] results.extend(extraresults) elif scope == ldap.SCOPE_ONELEVEL: def get_entries(): base_dn = ldap.dn.str2dn(base) base_len = len(base_dn) for k, v in self.db.items(): if not k.startswith(self.__prefix): continue k_dn_str = k[len(self.__prefix) :] k_dn = ldap.dn.str2dn(k_dn_str) if len(k_dn) != base_len + 1: continue if k_dn[-base_len:] != base_dn: continue yield (k_dn_str, v) results = list(get_entries()) else: # openldap client/server raises PROTOCOL_ERROR for unexpected scope raise ldap.PROTOCOL_ERROR objects = [] for dn, attrs in results: # filter the objects by filterstr id_attr, id_val, _ = ldap.dn.str2dn(dn)[0][0] match_attrs = attrs.copy() match_attrs[id_attr] = [id_val] attrs_checked = set() if not filterstr or _match_query( filterstr, match_attrs, attrs_checked ): if ( filterstr and (scope != ldap.SCOPE_BASE) and ('objectclass' not in attrs_checked) ): raise AssertionError('No objectClass in search filter') # filter the attributes by attrlist attrs = { k: v for k, v in attrs.items() if not attrlist or k in attrlist } objects.append((dn, attrs)) return objects def set_option(self, option, invalue): self._ldap_options[option] = invalue def get_option(self, option): value = self._ldap_options.get(option) return value def search_ext( self, base, scope, filterstr='(objectClass=*)', attrlist=None, attrsonly=0, serverctrls=None, clientctrls=None, timeout=-1, sizelimit=0, ): if clientctrls is not None or timeout != -1 or sizelimit != 0: raise exception.NotImplemented() # only passing a single server control is supported by this fake ldap if serverctrls and len(serverctrls) > 1: raise exception.NotImplemented() # search_ext is async and returns an identifier used for # retrieving the results via result3(). This will be emulated by # storing the request in a variable with random integer key and # performing the real lookup in result3() msgid = random.randint(0, 1000) PendingRequests[msgid] = ( base, scope, filterstr, attrlist, attrsonly, serverctrls, ) return msgid def result3( self, msgid=ldap.RES_ANY, all=1, timeout=None, resp_ctrl_classes=None ): """Execute async request. Only msgid param is supported. Request info is fetched from global variable `PendingRequests` by msgid, executed using search_s and limited if requested. """ if all != 1 or timeout is not None or resp_ctrl_classes is not None: raise exception.NotImplemented() params = PendingRequests[msgid] # search_s accepts a subset of parameters of search_ext, # that's why we use only the first 5. results = self.search_s(*params[:5]) # extract limit from serverctrl serverctrls = params[5] ctrl = serverctrls[0] if ctrl.size: rdata = results[: ctrl.size] else: rdata = results # real result3 returns various service info -- rtype, rmsgid, # serverctrls. Now this info is not used, so all this info is None rtype = None rmsgid = None serverctrls = None return (rtype, rdata, rmsgid, serverctrls) class FakeLdapPool(FakeLdap): """Emulate the python-ldap API with pooled connections. This class is used as connector class in PooledLDAPHandler. """ def __init__(self, uri, retry_max=None, retry_delay=None, conn=None): super().__init__(conn=conn) self.url = uri self._uri = uri self.connected = None self.conn = self self._connection_time = 5 # any number greater than 0 def get_lifetime(self): return self._connection_time def simple_bind_s( self, who=None, cred=None, serverctrls=None, clientctrls=None ): if self.url.startswith('fakepool://memory'): if self.url not in FakeShelves: FakeShelves[self.url] = FakeShelve() self.db = FakeShelves[self.url] else: self.db = shelve.open(self.url[11:]) if not who: who = 'cn=Admin' if not cred: cred = 'password' super().simple_bind_s( who=who, cred=cred, serverctrls=serverctrls, clientctrls=clientctrls, ) def unbind_ext_s(self): """Added to extend FakeLdap as connector class.""" pass class FakeLdapNoSubtreeDelete(FakeLdap): """FakeLdap subclass that does not support subtree delete. Same as FakeLdap except delete will throw the LDAP error ldap.NOT_ALLOWED_ON_NONLEAF if there is an attempt to delete an entry that has children. """ def delete_ext_s(self, dn, serverctrls, clientctrls=None): """Remove the ldap object at specified dn.""" if server_fail: raise ldap.SERVER_DOWN try: children = self._getChildren(dn) if children: raise ldap.NOT_ALLOWED_ON_NONLEAF except KeyError: LOG.debug('delete item failed: dn=%s not found.', dn) raise ldap.NO_SUCH_OBJECT super().delete_ext_s(dn, serverctrls, clientctrls) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.566113 keystone-26.0.0/keystone/tests/unit/federation/0000775000175000017500000000000000000000000021565 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/federation/__init__.py0000664000175000017500000000000000000000000023664 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/federation/test_core.py0000664000175000017500000001120700000000000024127 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import provider_api from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit import mapping_fixtures PROVIDERS = provider_api.ProviderAPIs class TestFederationProtocol(unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) self.idp = { 'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, } PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) self.mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER self.mapping['id'] = uuid.uuid4().hex PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) def test_create_protocol(self): protocol = {'id': uuid.uuid4().hex, 'mapping_id': self.mapping['id']} protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) self.assertEqual(protocol['id'], protocol_ret['id']) def test_create_protocol_with_invalid_mapping_id(self): protocol = {'id': uuid.uuid4().hex, 'mapping_id': uuid.uuid4().hex} self.assertRaises( exception.ValidationError, PROVIDERS.federation_api.create_protocol, self.idp['id'], protocol['id'], protocol, ) def test_create_protocol_with_remote_id_attribute(self): protocol = { 'id': uuid.uuid4().hex, 'mapping_id': self.mapping['id'], 'remote_id_attribute': uuid.uuid4().hex, } protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) self.assertEqual( protocol['remote_id_attribute'], protocol_ret['remote_id_attribute'], ) def test_update_protocol(self): protocol = {'id': uuid.uuid4().hex, 'mapping_id': self.mapping['id']} protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) self.assertEqual(protocol['id'], protocol_ret['id']) new_mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER new_mapping['id'] = uuid.uuid4().hex PROVIDERS.federation_api.create_mapping(new_mapping['id'], new_mapping) protocol['mapping_id'] = new_mapping['id'] protocol_ret = PROVIDERS.federation_api.update_protocol( self.idp['id'], protocol['id'], protocol ) self.assertEqual(protocol['id'], protocol_ret['id']) self.assertEqual(new_mapping['id'], protocol_ret['mapping_id']) def test_update_protocol_with_invalid_mapping_id(self): protocol = {'id': uuid.uuid4().hex, 'mapping_id': self.mapping['id']} protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) self.assertEqual(protocol['id'], protocol_ret['id']) protocol['mapping_id'] = uuid.uuid4().hex self.assertRaises( exception.ValidationError, PROVIDERS.federation_api.update_protocol, self.idp['id'], protocol['id'], protocol, ) def test_update_protocol_with_remote_id_attribute(self): protocol = {'id': uuid.uuid4().hex, 'mapping_id': self.mapping['id']} protocol_ret = PROVIDERS.federation_api.create_protocol( self.idp['id'], protocol['id'], protocol ) new_remote_id_attribute = uuid.uuid4().hex protocol['remote_id_attribute'] = new_remote_id_attribute protocol_ret = PROVIDERS.federation_api.update_protocol( self.idp['id'], protocol['id'], protocol ) self.assertEqual( protocol['remote_id_attribute'], protocol_ret['remote_id_attribute'], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/federation/test_utils.py0000664000175000017500000002443100000000000024342 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from keystone.exception import ValidationError from keystone.federation import utils from keystone.tests import unit class TestFederationUtils(unit.TestCase): def setUp(self): super().setUp() self.mapping_id_mock = uuid.uuid4().hex self.domain_id_mock = uuid.uuid4().hex self.domain_mock = {'id': self.domain_id_mock} self.attribute_mapping_schema_1_0 = { "id": self.mapping_id_mock, "schema_version": '1.0', "rules": [ { "remote": [ {"type": "OIDC-preferred_username"}, {"type": "OIDC-email"}, {"type": "OIDC-openstack-user-domain"}, {"type": "OIDC-openstack-default-project"}, { "type": "OIDC-openstack-user-status", "any_one_of": ["local"], }, ], "local": [ { "domain": {"name": "{2}"}, "user": { "domain": {"name": "{2}"}, "type": "local", "name": "{0}", "email": "{1}", }, "projects": [ {"name": "{3}", "roles": [{"name": "member"}]} ], } ], } ], } self.attribute_mapping_schema_2_0 = copy.deepcopy( self.attribute_mapping_schema_1_0 ) self.attribute_mapping_schema_2_0['schema_version'] = '2.0' self.attribute_mapping_schema_2_0['rules'][0]['local'][0]["projects"][ 0 ]['domain'] = {"name": "{some_place_holder}"} self.rule_processor = utils.RuleProcessor( self.mapping_id_mock, self.attribute_mapping_schema_1_0 ) self.rule_processor_schema_2_0 = ( utils.RuleProcessorToHonorDomainOption( self.mapping_id_mock, self.attribute_mapping_schema_2_0 ) ) def test_validate_mapping_structure_schema1_0(self): utils.validate_mapping_structure(self.attribute_mapping_schema_1_0) def test_validate_mapping_structure_schema2_0(self): utils.validate_mapping_structure(self.attribute_mapping_schema_2_0) def test_normalize_user_no_type_set(self): user = {} self.rule_processor.normalize_user(user, self.domain_mock) self.assertEqual(utils.UserType.EPHEMERAL, user['type']) def test_normalize_user_unexpected_type(self): user = {'type': "weird-type"} self.assertRaises( ValidationError, self.rule_processor.normalize_user, user, self.domain_mock, ) def test_normalize_user_type_local(self): user = {'type': utils.UserType.LOCAL} self.rule_processor.normalize_user(user, self.domain_mock) self.assertEqual(utils.UserType.LOCAL, user['type']) def test_normalize_user_type_ephemeral(self): user = {'type': utils.UserType.EPHEMERAL} self.rule_processor.normalize_user(user, self.domain_mock) self.assertEqual(utils.UserType.EPHEMERAL, user['type']) def test_extract_groups(self): group1 = {'name': "group1", 'domain': self.domain_id_mock} group_by_domain = {self.domain_id_mock: [group1]} result = utils.RuleProcessor( self.mapping_id_mock, self.attribute_mapping_schema_1_0 ).extract_groups(group_by_domain) self.assertEqual([group1], list(result)) def test_process_group_by_name_domain_with_name_only(self): domain = {'name': "domain1"} group1 = {'name': "group1", 'domain': domain} group_by_domain = {} result = self.rule_processor.process_group_by_name( group1, group_by_domain ) self.assertEqual([group1], list(result)) self.assertEqual([domain["name"]], list(group_by_domain.keys())) def test_process_group_by_name_domain_with_id_only(self): group1 = {'name': "group1", 'domain': self.domain_mock} group_by_domain = {} result = self.rule_processor.process_group_by_name( group1, group_by_domain ) self.assertEqual([group1], list(result)) self.assertEqual([self.domain_id_mock], list(group_by_domain.keys())) def test_process_group_by_name_domain_with_id_and_name(self): self.domain_mock['name'] = "domain1" group1 = {'name': "group1", 'domain': self.domain_mock} group_by_domain = {} result = self.rule_processor.process_group_by_name( group1, group_by_domain ) self.assertEqual([group1], list(result)) self.assertEqual(["domain1"], list(group_by_domain.keys())) def test_process_group_by_name_groups_same_domain(self): group1 = {'name': "group1", 'domain': self.domain_mock} group2 = {'name': "group2", 'domain': self.domain_mock} group_by_domain = {self.domain_id_mock: [group1]} result = self.rule_processor.process_group_by_name( group2, group_by_domain ) self.assertEqual([group1, group2], list(result)) self.assertEqual([self.domain_id_mock], list(group_by_domain.keys())) def test_process_group_by_name_groups_different_domain(self): domain = {'name': "domain1"} group1 = {'name': "group1", 'domain': domain} group2 = {'name': "group2", 'domain': self.domain_mock} group_by_domain = {"domain1": [group1]} result = self.rule_processor.process_group_by_name( group2, group_by_domain ) self.assertEqual([group1, group2], list(result)) self.assertEqual( ["domain1", self.domain_id_mock], list(group_by_domain.keys()) ) def test_rule_processor_extract_projects_schema1_0_no_projects(self): result = self.rule_processor.extract_projects({}) self.assertEqual([], result) def test_rule_processor_extract_projects_schema1_0(self): projects_list = [{'name': "project1", 'domain': self.domain_mock}] identity_values = {'projects': projects_list} result = self.rule_processor.extract_projects(identity_values) self.assertEqual(projects_list, result) def test_rule_processor_extract_projects_schema2_0_no_projects(self): result = self.rule_processor_schema_2_0.extract_projects({}) self.assertEqual([], result) def test_rule_processor_extract_projects_schema2_0_domain_in_project(self): projects_list = [{'name': "project1", 'domain': self.domain_mock}] identity_values = {'projects': projects_list} result = self.rule_processor_schema_2_0.extract_projects( identity_values ) self.assertEqual(projects_list, result) def test_rule_processor_extract_projects_schema2_0_no_domain(self): projects_list = [{'name': "project1"}] identity_values = {'projects': projects_list} result = self.rule_processor_schema_2_0.extract_projects( identity_values ) self.assertEqual(projects_list, result) def test_rule_processor_extract_projects_schema2_0_no_domain_project(self): project = {'name': "project1"} identity_values = { 'projects': [project.copy()], 'domain': self.domain_mock, } result = self.rule_processor_schema_2_0.extract_projects( identity_values ) expected_project = project.copy() expected_project['domain'] = self.domain_mock self.assertEqual([expected_project], result) def test_normalize_user_no_type_set_schema_2_0(self): user = {} self.rule_processor_schema_2_0.normalize_user(user, self.domain_mock) self.assertEqual(utils.UserType.EPHEMERAL, user['type']) def test_normalize_user_unexpected_type_schema_2_0(self): user = {'type': "weird-type"} self.assertRaises( ValidationError, self.rule_processor_schema_2_0.normalize_user, user, self.domain_mock, ) def test_normalize_user_type_local_schema_2_0(self): user = {'type': utils.UserType.LOCAL} self.rule_processor_schema_2_0.normalize_user(user, self.domain_mock) self.assertEqual(utils.UserType.LOCAL, user['type']) def test_normalize_user_type_ephemeral_schema_2_0(self): user = {'type': utils.UserType.EPHEMERAL} self.rule_processor_schema_2_0.normalize_user(user, self.domain_mock) self.assertEqual(utils.UserType.EPHEMERAL, user['type']) def test_normalize_user_no_domain_schema_2_0(self): user = {} self.rule_processor_schema_2_0.normalize_user(user, self.domain_mock) self.assertEqual(utils.UserType.EPHEMERAL, user['type']) self.assertEqual(self.domain_mock, user.get("domain")) def test_create_attribute_mapping_rules_processor_default(self): result = utils.create_attribute_mapping_rules_processor( self.attribute_mapping_schema_1_0 ) self.assertIsInstance(result, utils.RuleProcessor) def test_create_attribute_mapping_rules_processor_schema1_0(self): result = utils.create_attribute_mapping_rules_processor( self.attribute_mapping_schema_1_0 ) self.assertIsInstance(result, utils.RuleProcessor) def test_create_attribute_mapping_rules_processor_schema2_0(self): result = utils.create_attribute_mapping_rules_processor( self.attribute_mapping_schema_2_0 ) self.assertIsInstance(result, utils.RuleProcessorToHonorDomainOption) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/federation_fixtures.py0000664000175000017500000000203200000000000024065 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. IDP_ENTITY_ID = 'https://localhost/v3/OS-FEDERATION/saml2/idp' IDP_SSO_ENDPOINT = 'https://localhost/v3/OS-FEDERATION/saml2/SSO' # Organization info IDP_ORGANIZATION_NAME = 'ACME INC' IDP_ORGANIZATION_DISPLAY_NAME = 'ACME' IDP_ORGANIZATION_URL = 'https://acme.example.com' # Contact info IDP_CONTACT_COMPANY = 'ACME Sub' IDP_CONTACT_GIVEN_NAME = 'Joe' IDP_CONTACT_SURNAME = 'Hacker' IDP_CONTACT_EMAIL = 'joe@acme.example.com' IDP_CONTACT_TELEPHONE_NUMBER = '1234567890' IDP_CONTACT_TYPE = 'technical' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/filtering.py0000664000175000017500000001166700000000000022015 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import provider_api import keystone.conf from keystone import exception PROVIDERS = provider_api.ProviderAPIs CONF = keystone.conf.CONF class FilterTests: # Provide support for checking if a batch of list items all # exist within a contiguous range in a total list def _match_with_list( self, this_batch, total_list, batch_size=None, list_start=None, list_end=None, ): if batch_size is None: batch_size = len(this_batch) if list_start is None: list_start = 0 if list_end is None: list_end = len(total_list) for batch_item in range(0, batch_size): found = False for list_item in range(list_start, list_end): if this_batch[batch_item]['id'] == total_list[list_item]['id']: found = True self.assertTrue(found) def _create_entity(self, entity_type): """Find the create_ method. Searches through the [identity_api, resource_api, assignment_api] managers for a method called create_ and returns the first one. """ f = getattr(PROVIDERS.identity_api, 'create_%s' % entity_type, None) if f is None: f = getattr( PROVIDERS.resource_api, 'create_%s' % entity_type, None ) if f is None: f = getattr(PROVIDERS.assignment_api, 'create_%s' % entity_type) return f def _delete_entity(self, entity_type): """Find the delete_ method. Searches through the [identity_api, resource_api, assignment_api] managers for a method called delete_ and returns the first one. """ f = getattr(PROVIDERS.identity_api, 'delete_%s' % entity_type, None) if f is None: f = getattr( PROVIDERS.resource_api, 'delete_%s' % entity_type, None ) if f is None: f = getattr(PROVIDERS.assignment_api, 'delete_%s' % entity_type) return f def _list_entities(self, entity_type): """Find the list_ method. Searches through the [identity_api, resource_api, assignment_api] managers for a method called list_ and returns the first one. """ f = getattr(PROVIDERS.identity_api, 'list_%ss' % entity_type, None) if f is None: f = getattr(PROVIDERS.resource_api, 'list_%ss' % entity_type, None) if f is None: f = getattr(PROVIDERS.assignment_api, 'list_%ss' % entity_type) return f def _create_one_entity(self, entity_type, domain_id, name): new_entity = {'name': name, 'domain_id': domain_id} if entity_type in ['user', 'group']: # The manager layer creates the ID for users and groups new_entity = self._create_entity(entity_type)(new_entity) else: new_entity['id'] = uuid.uuid4().hex self._create_entity(entity_type)(new_entity['id'], new_entity) return new_entity def _create_test_data( self, entity_type, number, domain_id=None, name_dict=None ): """Create entity test data. :param entity_type: type of entity to create, e.g. 'user', group' etc. :param number: number of entities to create, :param domain_id: if not defined, all users will be created in the default domain. :param name_dict: optional dict containing entity number and name pairs """ entity_list = [] if domain_id is None: domain_id = CONF.identity.default_domain_id name_dict = name_dict or {} for x in range(number): # If this index has a name defined in the name_dict, then use it name = name_dict.get(x, uuid.uuid4().hex) new_entity = self._create_one_entity(entity_type, domain_id, name) entity_list.append(new_entity) return entity_list def _delete_test_data(self, entity_type, entity_list): for entity in entity_list: try: self._delete_entity(entity_type)(entity['id']) except exception.Forbidden: # Note(knikolla): Some identity backends such as LDAP are # read only break ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.566113 keystone-26.0.0/keystone/tests/unit/identity/0000775000175000017500000000000000000000000021276 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/__init__.py0000664000175000017500000000000000000000000023375 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.566113 keystone-26.0.0/keystone/tests/unit/identity/backends/0000775000175000017500000000000000000000000023050 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/backends/__init__.py0000664000175000017500000000000000000000000025147 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/backends/fake_driver.py0000664000175000017500000000644100000000000025710 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fake driver to test out-of-tree drivers handling.""" from oslo_config import cfg from keystone import exception from keystone.identity.backends import base class FooDriver(base.IdentityDriverBase): """Fake out-of-tree driver. It does not make much sense to inherit from BaseClass, but in certain places across the code methods are invoked """ @classmethod def register_opts(cls, conf): grp = cfg.OptGroup("foo") opts = [cfg.StrOpt("opt1")] conf.register_group(grp) conf.register_opts(opts, group=grp) def authenticate(self, user_id, password): raise exception.NotImplemented() # pragma: no cover def create_user(self, user_id, user): raise exception.NotImplemented() # pragma: no cover def list_users(self, hints): raise exception.NotImplemented() # pragma: no cover def unset_default_project_id(self, project_id): raise exception.NotImplemented() # pragma: no cover def list_users_in_group(self, group_id, hints): raise exception.NotImplemented() # pragma: no cover def get_user(self, user_id): raise exception.NotImplemented() # pragma: no cover def update_user(self, user_id, user): raise exception.NotImplemented() # pragma: no cover def change_password(self, user_id, new_password): raise exception.NotImplemented() # pragma: no cover def add_user_to_group(self, user_id, group_id): raise exception.NotImplemented() # pragma: no cover def check_user_in_group(self, user_id, group_id): raise exception.NotImplemented() # pragma: no cover def remove_user_from_group(self, user_id, group_id): raise exception.NotImplemented() # pragma: no cover def delete_user(self, user_id): raise exception.NotImplemented() # pragma: no cover def get_user_by_name(self, user_name, domain_id): raise exception.NotImplemented() # pragma: no cover def reset_last_active(self): raise exception.NotImplemented() # pragma: no cover def create_group(self, group_id, group): raise exception.NotImplemented() # pragma: no cover def list_groups(self, hints): raise exception.NotImplemented() # pragma: no cover def list_groups_for_user(self, user_id, hints): raise exception.NotImplemented() # pragma: no cover def get_group(self, group_id): raise exception.NotImplemented() # pragma: no cover def get_group_by_name(self, group_name, domain_id): raise exception.NotImplemented() # pragma: no cover def update_group(self, group_id, group): raise exception.NotImplemented() # pragma: no cover def delete_group(self, group_id): raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/backends/test_base.py0000664000175000017500000005036500000000000025404 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import driver_hints from keystone import exception class IdentityDriverTests: driver = None # subclasses must override driver to the actual driver. # subclasses that don't allow name updates must set this to False. allows_name_update = True # subclasses that don't allow self-service password changes must set this # to False. allows_self_service_change_password = True # Subclasses must override this to indicate whether it's domain-aware or # not. expected_is_domain_aware = True # Subclasses must override this to the expected default assignment driver. expected_default_assignment_driver = 'sql' # Subclasses must override this to the expected is_sql value. expected_is_sql = False # Subclasses must override this to the expected expected_generates_uuids # value. expected_generates_uuids = True def create_user(self, domain_id=None, **kwargs): """Get a user for the test. Subclasses can override this to provide their own way to provide a user for the test. By default, driver.create_user is used. For drivers that don't support create_user, this may go directly to the backend, or maybe it gets a user from a set of pre-created users. """ user_id = uuid.uuid4().hex user = { 'id': user_id, 'name': uuid.uuid4().hex, 'enabled': True, } if self.driver.is_domain_aware(): user['domain_id'] = domain_id or uuid.uuid4().hex user.update(kwargs) return self.driver.create_user(user_id, user) def create_group(self, domain_id=None): """Get a group for the test. Similar to :meth:`~.create_user`, subclasses can override this to provide their own way to provide a group for the test. """ group_id = uuid.uuid4().hex group = { 'id': group_id, 'name': uuid.uuid4().hex, } if self.driver.is_domain_aware(): group['domain_id'] = domain_id or uuid.uuid4().hex return self.driver.create_group(group_id, group) def test_is_domain_aware(self): self.assertIs( self.expected_is_domain_aware, self.driver.is_domain_aware() ) def test_is_sql(self): self.assertIs(self.expected_is_sql, self.driver.is_sql) def test_generates_uuids(self): self.assertIs( self.expected_generates_uuids, self.driver.generates_uuids() ) def test_create_user(self): # Don't use self.create_user since this needs to test the driver # interface and create_user might not use the driver. user_id = uuid.uuid4().hex user = {'id': user_id, 'name': uuid.uuid4().hex, 'enabled': True} if self.driver.is_domain_aware(): user['domain_id'] = uuid.uuid4().hex ret = self.driver.create_user(user_id, user) self.assertEqual(user_id, ret['id']) def test_create_user_all_attributes(self): user_id = uuid.uuid4().hex user = { 'id': user_id, 'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex, 'enabled': True, 'default_project_id': uuid.uuid4().hex, 'password_expires_at': None, 'options': {}, } if self.driver.is_domain_aware(): user['domain_id'] = uuid.uuid4().hex ret = self.driver.create_user(user_id, user) exp_user = user.copy() del exp_user['password'] self.assertEqual(exp_user, ret) def test_create_user_same_id_exc(self): user_id = uuid.uuid4().hex user = { 'id': user_id, 'name': uuid.uuid4().hex, 'enabled': True, } if self.driver.is_domain_aware(): user['domain_id'] = uuid.uuid4().hex self.driver.create_user(user_id, user) self.assertRaises( exception.Conflict, self.driver.create_user, user_id, user ) def test_create_user_same_name_and_domain_exc(self): user1_id = uuid.uuid4().hex name = uuid.uuid4().hex domain_id = uuid.uuid4().hex user = { 'id': user1_id, 'name': name, 'enabled': True, } if self.driver.is_domain_aware(): user['domain_id'] = domain_id self.driver.create_user(user1_id, user) user2_id = uuid.uuid4().hex user = { 'id': user2_id, 'name': name, 'enabled': True, } if self.driver.is_domain_aware(): user['domain_id'] = domain_id self.assertRaises( exception.Conflict, self.driver.create_user, user2_id, user ) def test_list_users_no_users(self): hints = driver_hints.Hints() self.assertEqual([], self.driver.list_users(hints)) def test_list_users_when_users(self): user = self.create_user() hints = driver_hints.Hints() users = self.driver.list_users(hints) self.assertEqual([user['id']], [u['id'] for u in users]) def test_get_user(self): user = self.create_user() actual_user = self.driver.get_user(user['id']) self.assertEqual(user['id'], actual_user['id']) def test_get_user_no_user_exc(self): self.assertRaises( exception.UserNotFound, self.driver.get_user, uuid.uuid4().hex ) def test_get_user_by_name(self): domain_id = uuid.uuid4().hex user = self.create_user(domain_id=domain_id) actual_user = self.driver.get_user_by_name(user['name'], domain_id) self.assertEqual(user['id'], actual_user['id']) def test_get_user_by_name_no_user_exc(self): # When the user doesn't exist, UserNotFound is raised. self.assertRaises( exception.UserNotFound, self.driver.get_user_by_name, user_name=uuid.uuid4().hex, domain_id=uuid.uuid4().hex, ) def test_update_user(self): user = self.create_user() user_mod = {'enabled': False} actual_user = self.driver.update_user(user['id'], user_mod) self.assertEqual(user['id'], actual_user['id']) self.assertIs(False, actual_user['enabled']) def test_update_user_remove_optional_attribute(self): # When the attribute has a value of None it's supposed to be removed. user = self.create_user(default_project_id=uuid.uuid4().hex) self.assertIn('default_project_id', user) user_mod = {'default_project_id': None} actual_user = self.driver.update_user(user['id'], user_mod) self.assertNotIn('default_project_id', actual_user) def test_update_user_same_name_exc(self): # For drivers that allow name update, if the name of a user is changed # to the same as another user in the same domain, Conflict is raised. if not self.allows_name_update: self.skipTest("Backend doesn't allow name update.") domain_id = uuid.uuid4().hex user1 = self.create_user(domain_id=domain_id) user2 = self.create_user(domain_id=domain_id) user_mod = {'name': user2['name']} self.assertRaises( exception.Conflict, self.driver.update_user, user1['id'], user_mod ) def test_update_user_no_user_exc(self): user_id = uuid.uuid4().hex user_mod = {'enabled': False} self.assertRaises( exception.UserNotFound, self.driver.update_user, user_id, user_mod ) def test_update_user_name_not_allowed_exc(self): # For drivers that do not allow name update, attempting to change the # name causes an exception. if self.allows_name_update: self.skipTest("Backend allows name update.") user = self.create_user() user_mod = {'name': uuid.uuid4().hex} self.assertRaises( exception.Conflict, self.driver.update_user, user['id'], user_mod ) def test_change_password(self): if not self.allows_self_service_change_password: self.skipTest("Backend doesn't allow change password.") # create user password = uuid.uuid4().hex domain_id = uuid.uuid4().hex user = self.create_user(domain_id=domain_id, password=password) # change password new_password = uuid.uuid4().hex self.driver.change_password(user['id'], new_password) self.driver.authenticate(user['id'], new_password) def test_delete_user(self): user = self.create_user() self.driver.delete_user(user['id']) self.assertRaises( exception.UserNotFound, self.driver.get_user, user['id'] ) def test_delete_user_no_user_exc(self): # When the user doesn't exist, UserNotFound is raised. self.assertRaises( exception.UserNotFound, self.driver.delete_user, user_id=uuid.uuid4().hex, ) def test_create_group(self): group_id = uuid.uuid4().hex group = { 'id': group_id, 'name': uuid.uuid4().hex, } if self.driver.is_domain_aware(): group['domain_id'] = uuid.uuid4().hex new_group = self.driver.create_group(group_id, group) self.assertEqual(group_id, new_group['id']) def test_create_group_all_attrs(self): group_id = uuid.uuid4().hex group = { 'id': group_id, 'name': uuid.uuid4().hex, 'description': uuid.uuid4().hex, } if self.driver.is_domain_aware(): group['domain_id'] = uuid.uuid4().hex new_group = self.driver.create_group(group_id, group) self.assertEqual(group, new_group) def test_create_group_duplicate_exc(self): group1_id = uuid.uuid4().hex name = uuid.uuid4().hex domain = uuid.uuid4().hex group1 = { 'id': group1_id, 'name': name, } if self.driver.is_domain_aware(): group1['domain_id'] = domain self.driver.create_group(group1_id, group1) group2_id = uuid.uuid4().hex group2 = { 'id': group2_id, 'name': name, } if self.driver.is_domain_aware(): group2['domain_id'] = domain self.assertRaises( exception.Conflict, self.driver.create_group, group2_id, group2 ) def test_get_group(self): group = self.create_group() actual_group = self.driver.get_group(group['id']) self.assertEqual(group['id'], actual_group['id']) def test_get_group_no_group_exc(self): # When the group doesn't exist, get_group raises GroupNotFound. self.assertRaises( exception.GroupNotFound, self.driver.get_group, group_id=uuid.uuid4().hex, ) def test_get_group_by_name(self): domain_id = uuid.uuid4().hex group = self.create_group(domain_id=domain_id) actual_group = self.driver.get_group_by_name(group['name'], domain_id) self.assertEqual(group['id'], actual_group['id']) def test_get_group_by_name_no_user_exc(self): # When the group doesn't exist, get_group raises GroupNotFound. self.assertRaises( exception.GroupNotFound, self.driver.get_group_by_name, group_name=uuid.uuid4().hex, domain_id=uuid.uuid4().hex, ) def test_update_group(self): group = self.create_group() new_description = uuid.uuid4().hex group_mod = {'description': new_description} actual_group = self.driver.update_group(group['id'], group_mod) self.assertEqual(new_description, actual_group['description']) def test_update_group_no_group(self): # When the group doesn't exist, GroupNotFound is raised. group_mod = {'description': uuid.uuid4().hex} self.assertRaises( exception.GroupNotFound, self.driver.update_group, group_id=uuid.uuid4().hex, group=group_mod, ) def test_update_group_name_already_exists(self): # For drivers that support renaming, when the group is renamed to a # name that already exists, Conflict is raised. if not self.allows_name_update: self.skipTest("driver doesn't allow name update") domain_id = uuid.uuid4().hex group1 = self.create_group(domain_id=domain_id) group2 = self.create_group(domain_id=domain_id) group_mod = {'name': group1['name']} self.assertRaises( exception.Conflict, self.driver.update_group, group2['id'], group_mod, ) def test_update_group_name_not_allowed(self): # For drivers that do not support renaming, when the group is attempted # to be renamed ValidationError is raised. if self.allows_name_update: self.skipTest("driver allows name update") group = self.create_group() group_mod = {'name': uuid.uuid4().hex} self.assertRaises( exception.ValidationError, self.driver.update_group, group['id'], group_mod, ) def test_delete_group(self): group = self.create_group() self.driver.delete_group(group['id']) self.assertRaises( exception.GroupNotFound, self.driver.get_group, group['id'] ) def test_delete_group_doesnt_exist_exc(self): self.assertRaises( exception.GroupNotFound, self.driver.delete_group, group_id=uuid.uuid4().hex, ) def test_list_groups_no_groups(self): groups = self.driver.list_groups(driver_hints.Hints()) self.assertEqual([], groups) def test_list_groups_one_group(self): group = self.create_group() groups = self.driver.list_groups(driver_hints.Hints()) self.assertEqual(group['id'], groups[0]['id']) def test_add_user_to_group(self): user = self.create_user() group = self.create_group() self.driver.add_user_to_group(user['id'], group['id']) # No assert since if doesn't raise, then successful. self.driver.check_user_in_group(user['id'], group['id']) def test_add_user_to_group_no_user_exc(self): group = self.create_group() user_id = uuid.uuid4().hex self.assertRaises( exception.UserNotFound, self.driver.add_user_to_group, user_id, group['id'], ) def test_add_user_to_group_no_group_exc(self): user = self.create_user() group_id = uuid.uuid4().hex self.assertRaises( exception.GroupNotFound, self.driver.add_user_to_group, user['id'], group_id, ) def test_check_user_in_group(self): user = self.create_user() group = self.create_group() self.driver.add_user_to_group(user['id'], group['id']) # No assert since if doesn't raise, then successful. self.driver.check_user_in_group(user['id'], group['id']) def test_check_user_in_group_user_not_in_group_exc(self): user = self.create_user() group = self.create_group() self.assertRaises( exception.NotFound, self.driver.check_user_in_group, user['id'], group['id'], ) def test_check_user_in_group_user_doesnt_exist_exc(self): # When the user doesn't exist, UserNotFound is raised. group = self.create_group() user_id = uuid.uuid4().hex self.assertRaises( exception.UserNotFound, self.driver.check_user_in_group, user_id, group['id'], ) def test_check_user_in_group_group_doesnt_exist_exc(self): # When the group doesn't exist, UserNotFound is raised. user = self.create_user() group_id = uuid.uuid4().hex self.assertRaises( exception.GroupNotFound, self.driver.check_user_in_group, user['id'], group_id, ) def test_list_users_in_group_no_users(self): group = self.create_group() users = self.driver.list_users_in_group( group['id'], driver_hints.Hints() ) self.assertEqual([], users) def test_list_users_in_group_user(self): group = self.create_group() user = self.create_user() self.driver.add_user_to_group(user['id'], group['id']) users = self.driver.list_users_in_group( group['id'], driver_hints.Hints() ) self.assertEqual([user['id']], [u['id'] for u in users]) def test_list_users_in_group_no_group(self): group_id = uuid.uuid4().hex self.assertRaises( exception.GroupNotFound, self.driver.list_users_in_group, group_id, driver_hints.Hints(), ) def test_list_groups_for_user_no_groups(self): user = self.create_user() groups = self.driver.list_groups_for_user( user['id'], driver_hints.Hints() ) self.assertEqual([], groups) def test_list_groups_for_user_group(self): user = self.create_user() group = self.create_group() self.driver.add_user_to_group(user['id'], group['id']) groups = self.driver.list_groups_for_user( user['id'], driver_hints.Hints() ) self.assertEqual([group['id']], [g['id'] for g in groups]) def test_list_groups_for_user_no_user(self): user_id = uuid.uuid4().hex self.assertRaises( exception.UserNotFound, self.driver.list_groups_for_user, user_id, driver_hints.Hints(), ) def test_remove_user_from_group(self): user = self.create_user() group = self.create_group() self.driver.add_user_to_group(user['id'], group['id']) self.driver.remove_user_from_group(user['id'], group['id']) self.assertRaises( exception.NotFound, self.driver.check_user_in_group, user['id'], group['id'], ) def test_remove_user_from_group_not_in_group(self): user = self.create_user() group = self.create_group() # FIXME(blk-u): ldap is returning UserNotFound rather than NotFound, # fix this. self.assertRaises( exception.NotFound, self.driver.remove_user_from_group, user['id'], group['id'], ) def test_remove_user_from_group_no_user(self): group = self.create_group() user_id = uuid.uuid4().hex self.assertRaises( exception.UserNotFound, self.driver.remove_user_from_group, user_id, group['id'], ) def test_remove_user_from_group_no_group(self): user = self.create_user() group_id = uuid.uuid4().hex self.assertRaises( exception.GroupNotFound, self.driver.remove_user_from_group, user['id'], group_id, ) def test_authenticate(self): password = uuid.uuid4().hex user = self.create_user(password=password) actual_user = self.driver.authenticate(user['id'], password) self.assertEqual(user['id'], actual_user['id']) def test_authenticate_wrong_password(self): user = self.create_user(password=uuid.uuid4().hex) password = uuid.uuid4().hex self.assertRaises( AssertionError, self.driver.authenticate, user['id'], password ) def test_authenticate_no_user(self): user_id = uuid.uuid4().hex password = uuid.uuid4().hex self.assertRaises( AssertionError, self.driver.authenticate, user_id, password ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/backends/test_ldap.py0000664000175000017500000000454400000000000025410 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as config_fixture from keystone.identity.backends import ldap from keystone.tests.unit import core from keystone.tests.unit.identity.backends import test_base from keystone.tests.unit.ksfixtures import ldapdb class TestIdentityDriver(core.BaseTestCase, test_base.IdentityDriverTests): allows_name_update = False allows_self_service_change_password = False expected_is_domain_aware = False expected_default_assignment_driver = 'sql' expected_is_sql = False expected_generates_uuids = False def setUp(self): super().setUp() config_fixture_ = self.useFixture(config_fixture.Config()) config_fixture_.config( group='ldap', url='fake://memory', user='cn=Admin', password='password', suffix='cn=example,cn=com', ) self.useFixture(ldapdb.LDAPDatabase()) self.driver = ldap.Identity() def test_delete_user(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_delete_user_no_user_exc(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_delete_group(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_delete_group_doesnt_exist_exc(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_remove_user_from_group(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_remove_user_from_group_not_in_group(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_remove_user_from_group_no_user(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_remove_user_from_group_no_group(self): self.skip_test_overrides('N/A: LDAP has no write support') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/backends/test_ldap_common.py0000664000175000017500000006403700000000000026763 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import tempfile from unittest import mock import uuid import fixtures import ldap.dn from oslo_config import fixture as config_fixture from keystone.common import driver_hints from keystone.common import provider_api import keystone.conf from keystone import exception as ks_exception from keystone.identity.backends.ldap import common as common_ldap from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import fakeldap from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.ksfixtures import ldapdb CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class DnCompareTest(unit.BaseTestCase): """Test for the DN comparison functions in keystone.common.ldap.core.""" def test_prep(self): # prep_case_insensitive returns the string with spaces at the front and # end if it's already lowercase and no insignificant characters. value = 'lowercase value' self.assertEqual(value, common_ldap.prep_case_insensitive(value)) def test_prep_lowercase(self): # prep_case_insensitive returns the string with spaces at the front and # end and lowercases the value. value = 'UPPERCASE VALUE' exp_value = value.lower() self.assertEqual(exp_value, common_ldap.prep_case_insensitive(value)) def test_prep_insignificant(self): # prep_case_insensitive remove insignificant spaces. value = 'before after' exp_value = 'before after' self.assertEqual(exp_value, common_ldap.prep_case_insensitive(value)) def test_prep_insignificant_pre_post(self): # prep_case_insensitive remove insignificant spaces. value = ' value ' exp_value = 'value' self.assertEqual(exp_value, common_ldap.prep_case_insensitive(value)) def test_ava_equal_same(self): # is_ava_value_equal returns True if the two values are the same. value = 'val1' self.assertTrue(common_ldap.is_ava_value_equal('cn', value, value)) def test_ava_equal_complex(self): # is_ava_value_equal returns True if the two values are the same using # a value that's got different capitalization and insignificant chars. val1 = 'before after' val2 = ' BEFORE afTer ' self.assertTrue(common_ldap.is_ava_value_equal('cn', val1, val2)) def test_ava_different(self): # is_ava_value_equal returns False if the values aren't the same. self.assertFalse(common_ldap.is_ava_value_equal('cn', 'val1', 'val2')) def test_rdn_same(self): # is_rdn_equal returns True if the two values are the same. rdn = ldap.dn.str2dn('cn=val1')[0] self.assertTrue(common_ldap.is_rdn_equal(rdn, rdn)) def test_rdn_diff_length(self): # is_rdn_equal returns False if the RDNs have a different number of # AVAs. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] self.assertFalse(common_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_same_order(self): # is_rdn_equal returns True if the RDNs have the same number of AVAs # and the values are the same. rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('cn=CN1+ou=OU1')[0] self.assertTrue(common_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_diff_order(self): # is_rdn_equal returns True if the RDNs have the same number of AVAs # and the values are the same, even if in a different order rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('ou=OU1+cn=CN1')[0] self.assertTrue(common_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_multi_ava_diff_type(self): # is_rdn_equal returns False if the RDNs have the same number of AVAs # and the attribute types are different. rdn1 = ldap.dn.str2dn('cn=cn1+ou=ou1')[0] rdn2 = ldap.dn.str2dn('cn=cn1+sn=sn1')[0] self.assertFalse(common_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_attr_type_case_diff(self): # is_rdn_equal returns True for same RDNs even when attr type case is # different. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('CN=cn1')[0] self.assertTrue(common_ldap.is_rdn_equal(rdn1, rdn2)) def test_rdn_attr_type_alias(self): # is_rdn_equal returns False for same RDNs even when attr type alias is # used. Note that this is a limitation since an LDAP server should # consider them equal. rdn1 = ldap.dn.str2dn('cn=cn1')[0] rdn2 = ldap.dn.str2dn('2.5.4.3=cn1')[0] self.assertFalse(common_ldap.is_rdn_equal(rdn1, rdn2)) def test_dn_same(self): # is_dn_equal returns True if the DNs are the same. dn = 'cn=Babs Jansen,ou=OpenStack' self.assertTrue(common_ldap.is_dn_equal(dn, dn)) def test_dn_equal_unicode(self): # is_dn_equal can accept unicode dn = 'cn=fäké,ou=OpenStack' self.assertTrue(common_ldap.is_dn_equal(dn, dn)) def test_dn_diff_length(self): # is_dn_equal returns False if the DNs don't have the same number of # RDNs dn1 = 'cn=Babs Jansen,ou=OpenStack' dn2 = 'cn=Babs Jansen,ou=OpenStack,dc=example.com' self.assertFalse(common_ldap.is_dn_equal(dn1, dn2)) def test_dn_equal_rdns(self): # is_dn_equal returns True if the DNs have the same number of RDNs # and each RDN is the same. dn1 = 'cn=Babs Jansen,ou=OpenStack+cn=OpenSource' dn2 = 'CN=Babs Jansen,cn=OpenSource+ou=OpenStack' self.assertTrue(common_ldap.is_dn_equal(dn1, dn2)) def test_dn_parsed_dns(self): # is_dn_equal can also accept parsed DNs. dn_str1 = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack+cn=OpenSource') dn_str2 = ldap.dn.str2dn('CN=Babs Jansen,cn=OpenSource+ou=OpenStack') self.assertTrue(common_ldap.is_dn_equal(dn_str1, dn_str2)) def test_startswith_under_child(self): # dn_startswith returns True if descendant_dn is a child of dn. child = 'cn=Babs Jansen,ou=OpenStack' parent = 'ou=OpenStack' self.assertTrue(common_ldap.dn_startswith(child, parent)) def test_startswith_parent(self): # dn_startswith returns False if descendant_dn is a parent of dn. child = 'cn=Babs Jansen,ou=OpenStack' parent = 'ou=OpenStack' self.assertFalse(common_ldap.dn_startswith(parent, child)) def test_startswith_same(self): # dn_startswith returns False if DNs are the same. dn = 'cn=Babs Jansen,ou=OpenStack' self.assertFalse(common_ldap.dn_startswith(dn, dn)) def test_startswith_not_parent(self): # dn_startswith returns False if descendant_dn is not under the dn child = 'cn=Babs Jansen,ou=OpenStack' parent = 'dc=example.com' self.assertFalse(common_ldap.dn_startswith(child, parent)) def test_startswith_descendant(self): # dn_startswith returns True if descendant_dn is a descendant of dn. descendant = 'cn=Babs Jansen,ou=Keystone,ou=OpenStack,dc=example.com' dn = 'ou=OpenStack,dc=example.com' self.assertTrue(common_ldap.dn_startswith(descendant, dn)) descendant = 'uid=12345,ou=Users,dc=example,dc=com' dn = 'ou=Users,dc=example,dc=com' self.assertTrue(common_ldap.dn_startswith(descendant, dn)) def test_startswith_parsed_dns(self): # dn_startswith also accepts parsed DNs. descendant = ldap.dn.str2dn('cn=Babs Jansen,ou=OpenStack') dn = ldap.dn.str2dn('ou=OpenStack') self.assertTrue(common_ldap.dn_startswith(descendant, dn)) def test_startswith_unicode(self): # dn_startswith accepts unicode. child = 'cn=fäké,ou=OpenStäck' parent = 'ou=OpenStäck' self.assertTrue(common_ldap.dn_startswith(child, parent)) class LDAPDeleteTreeTest(unit.TestCase): def setUp(self): super().setUp() self.useFixture( ldapdb.LDAPDatabase(dbclass=fakeldap.FakeLdapNoSubtreeDelete) ) self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files class MultiURLTests(unit.TestCase): """Test for setting multiple LDAP URLs.""" @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_multiple_urls_with_comma_no_conn_pool(self, mock_ldap_bind): urls = 'ldap://localhost,ldap://backup.localhost' self.config_fixture.config(group='ldap', url=urls, use_pool=False) base_ldap = common_ldap.BaseLdap(CONF) ldap_connection = base_ldap.get_connection() self.assertEqual(urls, ldap_connection.conn.conn._uri) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_multiple_urls_with_comma_with_conn_pool(self, mock_ldap_bind): urls = 'ldap://localhost,ldap://backup.localhost' self.config_fixture.config(group='ldap', url=urls, use_pool=True) base_ldap = common_ldap.BaseLdap(CONF) ldap_connection = base_ldap.get_connection() self.assertEqual(urls, ldap_connection.conn.conn_pool.uri) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_multiple_urls_with_comma_randomized(self, mock_ldap_bind): urls = ( 'ldap://localhost1,ldap://localhost2,' 'ldap://localhost3,ldap://localhost4,' 'ldap://localhost5,ldap://localhost6,' 'ldap://localhost7,ldap://localhost8,' 'ldap://localhost9,ldap://localhost0' ) self.config_fixture.config(group='ldap', url=urls, randomize_urls=True) base_ldap = common_ldap.BaseLdap(CONF) ldap_connection = base_ldap.get_connection() # Sanity check self.assertEqual(len(urls.split(',')), 10) # Check that the list is split into the same number of URIs self.assertEqual( len(urls.split(',')), len(ldap_connection.conn.conn_pool.uri.split(',')), ) # Check that the list is randomized self.assertNotEqual( urls.split(','), ldap_connection.conn.conn_pool.uri.split(',') ) # Check that the list contains the same URIs self.assertEqual( set(urls.split(',')), set(ldap_connection.conn.conn_pool.uri.split(',')), ) class LDAPConnectionTimeoutTest(unit.TestCase): """Test for Network Connection timeout on LDAP URL connection.""" @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_connectivity_timeout_no_conn_pool(self, mock_ldap_bind): url = 'ldap://localhost' conn_timeout = 1 # 1 second self.config_fixture.config( group='ldap', url=url, connection_timeout=conn_timeout, use_pool=False, ) base_ldap = common_ldap.BaseLdap(CONF) ldap_connection = base_ldap.get_connection() self.assertIsInstance( ldap_connection.conn, common_ldap.PythonLDAPHandler ) # Ensure that the Network Timeout option is set. # Also ensure that the URL is set. # # We will not verify if an LDAP bind returns the timeout # exception as that would fall under the realm of # integration testing. If the LDAP option is set properly, # and we get back a valid connection URI then that should # suffice for this unit test. self.assertEqual( conn_timeout, ldap.get_option(ldap.OPT_NETWORK_TIMEOUT) ) self.assertEqual(url, ldap_connection.conn.conn._uri) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_connectivity_timeout_with_conn_pool(self, mock_ldap_bind): url = 'ldap://localhost' conn_timeout = 1 # 1 second self.config_fixture.config( group='ldap', url=url, pool_connection_timeout=conn_timeout, use_pool=True, pool_retry_max=1, ) base_ldap = common_ldap.BaseLdap(CONF) ldap_connection = base_ldap.get_connection() self.assertIsInstance( ldap_connection.conn, common_ldap.PooledLDAPHandler ) # Ensure that the Network Timeout option is set. # Also ensure that the URL is set. # # We will not verify if an LDAP bind returns the timeout # exception as that would fall under the realm of # integration testing. If the LDAP option is set properly, # and we get back a valid connection URI then that should # suffice for this unit test. self.assertEqual( conn_timeout, ldap.get_option(ldap.OPT_NETWORK_TIMEOUT) ) self.assertEqual(url, ldap_connection.conn.conn_pool.uri) class SslTlsTest(unit.BaseTestCase): """Test for the SSL/TLS functionality in keystone.common.ldap.core.""" def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') @mock.patch.object(ldap.ldapobject.LDAPObject, 'start_tls_s') def _init_ldap_connection(self, config, mock_ldap_one, mock_ldap_two): # Attempt to connect to initialize python-ldap. base_ldap = common_ldap.BaseLdap(config) base_ldap.get_connection() def test_certfile_trust_tls(self): # We need this to actually exist, so we create a tempfile. (handle, certfile) = tempfile.mkstemp() self.addCleanup(os.unlink, certfile) self.addCleanup(os.close, handle) self.config_fixture.config( group='ldap', url='ldap://localhost', use_tls=True, tls_cacertfile=certfile, ) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)) def test_certdir_trust_tls(self): # We need this to actually exist, so we create a tempdir. certdir = self.useFixture(fixtures.TempDir()).path self.config_fixture.config( group='ldap', url='ldap://localhost', use_tls=True, tls_cacertdir=certdir, ) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)) def test_certfile_trust_ldaps(self): # We need this to actually exist, so we create a tempfile. (handle, certfile) = tempfile.mkstemp() self.addCleanup(os.unlink, certfile) self.addCleanup(os.close, handle) self.config_fixture.config( group='ldap', url='ldaps://localhost', use_tls=False, tls_cacertfile=certfile, ) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certfile, ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)) def test_certdir_trust_ldaps(self): # We need this to actually exist, so we create a tempdir. certdir = self.useFixture(fixtures.TempDir()).path self.config_fixture.config( group='ldap', url='ldaps://localhost', use_tls=False, tls_cacertdir=certdir, ) self._init_ldap_connection(CONF) # Ensure the cert trust option is set. self.assertEqual(certdir, ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)) class LDAPPagedResultsTest(unit.TestCase): """Test the paged results functionality in keystone.common.ldap.core.""" def setUp(self): super().setUp() self.useFixture(ldapdb.LDAPDatabase()) self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files @mock.patch.object(fakeldap.FakeLdap, 'search_ext') @mock.patch.object(fakeldap.FakeLdap, 'result3') def test_paged_results_control_api(self, mock_result3, mock_search_ext): mock_result3.return_value = ('', [], 1, []) self.config_fixture.config(group='ldap', page_size=1) conn = PROVIDERS.identity_api.user.get_connection() conn._paged_search_s( 'dc=example,dc=test', ldap.SCOPE_SUBTREE, 'objectclass=*', ['mail', 'userPassword'], ) # verify search_ext() args - attrlist is tricky due to ordering args, _ = mock_search_ext.call_args self.assertEqual(('dc=example,dc=test', 2, 'objectclass=*'), args[0:3]) attrlist = sorted([attr for attr in args[3] if attr]) self.assertEqual(['mail', 'userPassword'], attrlist) class CommonLdapTestCase(unit.BaseTestCase): """These test cases call functions in keystone.common.ldap.""" def test_binary_attribute_values(self): result = [ ( 'cn=junk,dc=example,dc=com', { 'cn': ['junk'], 'sn': [uuid.uuid4().hex], 'mail': [uuid.uuid4().hex], 'binary_attr': [b'\x00\xFF\x00\xFF'], }, ), ] py_result = common_ldap.convert_ldap_result(result) # The attribute containing the binary value should # not be present in the converted result. self.assertNotIn('binary_attr', py_result[0][1]) def test_utf8_conversion(self): value_unicode = 'fäké1' value_utf8 = value_unicode.encode('utf-8') result_utf8 = common_ldap.utf8_encode(value_unicode) self.assertEqual(value_utf8, result_utf8) result_utf8 = common_ldap.utf8_encode(value_utf8) self.assertEqual(value_utf8, result_utf8) result_unicode = common_ldap.utf8_decode(value_utf8) self.assertEqual(value_unicode, result_unicode) result_unicode = common_ldap.utf8_decode(value_unicode) self.assertEqual(value_unicode, result_unicode) self.assertRaises(TypeError, common_ldap.utf8_encode, 100) result_unicode = common_ldap.utf8_decode(100) self.assertEqual('100', result_unicode) def test_user_id_begins_with_0(self): user_id = '0123456' result = [ ( 'cn=dummy,dc=example,dc=com', {'user_id': [user_id], 'enabled': ['TRUE']}, ), ] py_result = common_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be True self.assertIs(True, py_result[0][1]['enabled'][0]) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_begins_with_0_and_enabled_bit_mask(self): user_id = '0123456' bitmask = '225' expected_bitmask = 225 result = [ ( 'cn=dummy,dc=example,dc=com', {'user_id': [user_id], 'enabled': [bitmask]}, ), ] py_result = common_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be 225 self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0]) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_and_bitmask_begins_with_0(self): user_id = '0123456' bitmask = '0225' expected_bitmask = 225 result = [ ( 'cn=dummy,dc=example,dc=com', {'user_id': [user_id], 'enabled': [bitmask]}, ), ] py_result = common_ldap.convert_ldap_result(result) # The user id should be 0123456, and the enabled # flag should be 225, the 0 is dropped. self.assertEqual(expected_bitmask, py_result[0][1]['enabled'][0]) self.assertEqual(user_id, py_result[0][1]['user_id'][0]) def test_user_id_and_user_name_with_boolean_string(self): boolean_strings = [ 'TRUE', 'FALSE', 'true', 'false', 'True', 'False', 'TrUe', 'FaLse', ] for user_name in boolean_strings: user_id = uuid.uuid4().hex result = [ ( 'cn=dummy,dc=example,dc=com', {'user_id': [user_id], 'user_name': [user_name]}, ), ] py_result = common_ldap.convert_ldap_result(result) # The user name should still be a string value. self.assertEqual(user_name, py_result[0][1]['user_name'][0]) def test_user_id_attribute_is_uuid_in_byte_form(self): results = [ ( 'cn=alice,dc=example,dc=com', { 'cn': [b'cn=alice'], 'objectGUID': [ b'\xdd\xd8Rt\xee]bA\x8e(\xe39\x0b\xe1\xf8\xe8' ], 'email': [uuid.uuid4().hex], 'sn': [uuid.uuid4().hex], }, ) ] py_result = common_ldap.convert_ldap_result(results) exp_object_guid = '7452d8dd-5dee-4162-8e28-e3390be1f8e8' self.assertEqual(exp_object_guid, py_result[0][1]['objectGUID'][0]) class LDAPFilterQueryCompositionTest(unit.BaseTestCase): """These test cases test LDAP filter generation.""" def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.base_ldap = common_ldap.BaseLdap(self.config_fixture.conf) # The tests need an attribute mapping to use. self.attribute_name = uuid.uuid4().hex self.filter_attribute_name = uuid.uuid4().hex self.base_ldap.attribute_mapping = { self.attribute_name: self.filter_attribute_name } def test_return_query_with_no_hints(self): hints = driver_hints.Hints() # NOTE: doesn't have to be a real query, we just need to make sure the # same string is returned if there are no hints. query = uuid.uuid4().hex self.assertEqual( query, self.base_ldap.filter_query(hints=hints, query=query) ) # make sure the default query is an empty string self.assertEqual('', self.base_ldap.filter_query(hints=hints)) def test_filter_with_empty_query_and_hints_set(self): hints = driver_hints.Hints() username = uuid.uuid4().hex hints.add_filter( name=self.attribute_name, value=username, comparator='equals', case_sensitive=False, ) expected_ldap_filter = '(&({}={}))'.format( self.filter_attribute_name, username, ) self.assertEqual( expected_ldap_filter, self.base_ldap.filter_query(hints=hints) ) def test_filter_with_both_query_and_hints_set(self): hints = driver_hints.Hints() # NOTE: doesn't have to be a real query, we just need to make sure the # filter string is concatenated correctly query = uuid.uuid4().hex username = uuid.uuid4().hex expected_result = '(&%(query)s(%(user_name_attr)s=%(username)s))' % ( { 'query': query, 'user_name_attr': self.filter_attribute_name, 'username': username, } ) hints.add_filter(self.attribute_name, username) self.assertEqual( expected_result, self.base_ldap.filter_query(hints=hints, query=query), ) def test_filter_with_hints_and_query_is_none(self): hints = driver_hints.Hints() username = uuid.uuid4().hex hints.add_filter( name=self.attribute_name, value=username, comparator='equals', case_sensitive=False, ) expected_ldap_filter = '(&({}={}))'.format( self.filter_attribute_name, username, ) self.assertEqual( expected_ldap_filter, self.base_ldap.filter_query(hints=hints, query=None), ) class LDAPSizeLimitTest(unit.TestCase): """Test the size limit exceeded handling in keystone.common.ldap.core.""" def setUp(self): super().setUp() self.useFixture(ldapdb.LDAPDatabase()) self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files @mock.patch.object(fakeldap.FakeLdap, 'search_s') def test_search_s_sizelimit_exceeded(self, mock_search_s): mock_search_s.side_effect = ldap.SIZELIMIT_EXCEEDED conn = PROVIDERS.identity_api.user.get_connection() self.assertRaises( ks_exception.LDAPSizeLimitExceeded, conn.search_s, 'dc=example,dc=test', ldap.SCOPE_SUBTREE, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/backends/test_sql.py0000664000175000017500000000441400000000000025263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures as db_fixtures from oslotest import base as test_base from keystone.common import sql from keystone.identity.backends import sql as sql_backend from keystone.tests.unit.identity.backends import test_base as id_test_base from keystone.tests.unit.ksfixtures import database class TestIdentityDriver( db_fixtures.OpportunisticDBTestMixin, test_base.BaseTestCase, id_test_base.IdentityDriverTests, ): expected_is_domain_aware = True expected_default_assignment_driver = 'sql' expected_is_sql = True expected_generates_uuids = True def setUp(self): super().setUp() self.engine = enginefacade.writer.get_engine() self.sessionmaker = enginefacade.writer.get_sessionmaker() # Set keystone's connection URL to be the test engine's url. Close # sqlite FK to avoid conflicting with sql upgrade test. database.initialize_sql_session( self.engine.url, enforce_sqlite_fks=False ) # Override keystone's context manager to be oslo.db's global context # manager. sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True self.addCleanup( setattr, sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False ) self.addCleanup(sql.cleanup) database._load_sqlalchemy_models() sql.ModelBase.metadata.create_all(bind=self.engine) self.driver = sql_backend.Identity() class MySQLOpportunisticIdentityDriverTestCase(TestIdentityDriver): FIXTURE = db_fixtures.MySQLOpportunisticFixture class PostgreSQLOpportunisticIdentityDriverTestCase(TestIdentityDriver): FIXTURE = db_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.566113 keystone-26.0.0/keystone/tests/unit/identity/shadow_users/0000775000175000017500000000000000000000000024004 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/shadow_users/__init__.py0000664000175000017500000000000000000000000026103 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/shadow_users/test_backend.py0000664000175000017500000002024200000000000027004 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from oslo_utils import timeutils from keystone.common import provider_api from keystone.common import sql import keystone.conf from keystone import exception from keystone.identity.backends import sql_model as model from keystone.identity.shadow_backends import sql as shadow_sql from keystone.tests import unit CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class ShadowUsersBackendTests: def test_create_nonlocal_user_unique_constraint(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user_created = PROVIDERS.shadow_users_api.create_nonlocal_user(user) self.assertNotIn('password', user_created) self.assertEqual(user_created['id'], user['id']) self.assertEqual(user_created['domain_id'], user['domain_id']) self.assertEqual(user_created['name'], user['name']) new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) new_user['name'] = user['name'] self.assertRaises( exception.Conflict, PROVIDERS.shadow_users_api.create_nonlocal_user, new_user, ) def test_create_nonlocal_user_does_not_create_local_user(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) new_nonlocal_user = PROVIDERS.shadow_users_api.create_nonlocal_user( user ) user_ref = self._get_user_ref(new_nonlocal_user['id']) self.assertIsNone(user_ref.local_user) def test_nonlocal_user_unique_user_id_constraint(self): user_ref = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.shadow_users_api.create_nonlocal_user(user_ref) # attempt to create a nonlocal_user with the same user_id nonlocal_user = { 'domain_id': CONF.identity.default_domain_id, 'name': uuid.uuid4().hex, 'user_id': user['id'], } self.assertRaises( sql.DBDuplicateEntry, self._add_nonlocal_user, nonlocal_user ) def test_get_user(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user.pop('email') user.pop('password') user_created = PROVIDERS.shadow_users_api.create_nonlocal_user(user) self.assertEqual(user_created['id'], user['id']) user_found = PROVIDERS.shadow_users_api.get_user(user_created['id']) self.assertCountEqual(user_created, user_found) def test_create_federated_user_unique_constraint(self): user_dict = PROVIDERS.shadow_users_api.create_federated_user( self.domain_id, self.federated_user ) user_dict = PROVIDERS.shadow_users_api.get_user(user_dict["id"]) self.assertIsNotNone(user_dict["id"]) self.assertRaises( exception.Conflict, PROVIDERS.shadow_users_api.create_federated_user, self.domain_id, self.federated_user, ) def test_create_federated_user_domain(self): user = PROVIDERS.shadow_users_api.create_federated_user( self.domain_id, self.federated_user ) self.assertEqual(user['domain_id'], self.domain_id) def test_create_federated_user_email(self): user = PROVIDERS.shadow_users_api.create_federated_user( self.domain_id, self.federated_user, self.email ) self.assertEqual(user['email'], self.email) def test_get_federated_user(self): user_dict_create = PROVIDERS.shadow_users_api.create_federated_user( self.domain_id, self.federated_user ) user_dict_get = PROVIDERS.shadow_users_api.get_federated_user( self.federated_user["idp_id"], self.federated_user["protocol_id"], self.federated_user["unique_id"], ) self.assertCountEqual(user_dict_create, user_dict_get) self.assertEqual(user_dict_create["id"], user_dict_get["id"]) def test_update_federated_user_display_name(self): user_dict_create = PROVIDERS.shadow_users_api.create_federated_user( self.domain_id, self.federated_user ) new_display_name = uuid.uuid4().hex PROVIDERS.shadow_users_api.update_federated_user_display_name( self.federated_user["idp_id"], self.federated_user["protocol_id"], self.federated_user["unique_id"], new_display_name, ) user_ref = PROVIDERS.shadow_users_api._get_federated_user( self.federated_user["idp_id"], self.federated_user["protocol_id"], self.federated_user["unique_id"], ) self.assertEqual( user_ref.federated_users[0].display_name, new_display_name ) self.assertEqual(user_dict_create["id"], user_ref.id) def test_set_last_active_at(self): self.config_fixture.config( group='security_compliance', disable_user_account_days_inactive=90 ) now = timeutils.utcnow().date() password = uuid.uuid4().hex user = self._create_user(password) with self.make_request(): user_auth = PROVIDERS.identity_api.authenticate( user_id=user['id'], password=password ) user_ref = self._get_user_ref(user_auth['id']) self.assertGreaterEqual(now, user_ref.last_active_at) def test_set_last_active_at_on_non_existing_user(self): self.config_fixture.config( group='security_compliance', disable_user_account_days_inactive=90 ) password = uuid.uuid4().hex user = self._create_user(password) # the user can be deleted while authentication is running; to imitate # this, set_last_active_at is mocked to delete the user and then run # normally real_last_active_at = shadow_sql.ShadowUsers.set_last_active_at test_self = self def fake_last_active_at(self, user_id): test_self._delete_user(user_id) real_last_active_at(self, user_id) with mock.patch.object( shadow_sql.ShadowUsers, 'set_last_active_at', fake_last_active_at ): with self.make_request(): # the call is expected to just succeed without exceptions PROVIDERS.identity_api.authenticate( user_id=user['id'], password=password ) def test_set_last_active_at_when_config_setting_is_none(self): self.config_fixture.config( group='security_compliance', disable_user_account_days_inactive=None, ) now = timeutils.utcnow().date() password = uuid.uuid4().hex user = self._create_user(password) with self.make_request(): user_auth = PROVIDERS.identity_api.authenticate( user_id=user['id'], password=password ) user_ref = self._get_user_ref(user_auth['id']) self.assertGreaterEqual(now, user_ref.last_active_at) def _add_nonlocal_user(self, nonlocal_user): with sql.session_for_write() as session: nonlocal_user_ref = model.NonLocalUser.from_dict(nonlocal_user) session.add(nonlocal_user_ref) def _create_user(self, password): user = { 'name': uuid.uuid4().hex, 'domain_id': self.domain_id, 'enabled': True, 'password': password, } return PROVIDERS.identity_api.create_user(user) def _delete_user(self, user_id): return PROVIDERS.identity_api.delete_user(user_id) def _get_user_ref(self, user_id): with sql.session_for_read() as session: return session.get(model.User, user_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/shadow_users/test_core.py0000664000175000017500000001134100000000000026345 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from keystone.common import driver_hints from keystone.common import provider_api PROVIDERS = provider_api.ProviderAPIs class ShadowUsersCoreTests: def test_shadow_federated_user(self): federated_user1 = copy.deepcopy(self.federated_user) ShadowUsersCoreTests.normalize_federated_user_properties_for_test( federated_user1, email=self.email ) user = PROVIDERS.identity_api.shadow_federated_user( self.federated_user['idp_id'], self.federated_user['protocol_id'], federated_user1, ) self.assertIsNotNone(user['id']) self.assertEqual(7, len(user.keys())) self.assertIsNotNone(user['name']) self.assertIsNone(user['password_expires_at']) self.assertIsNotNone(user['domain_id']) # NOTE(breton): below, attribute `enabled` is explicitly tested to be # equal True. assertTrue should not be used, because it converts # the passed value to bool(). self.assertEqual(True, user['enabled']) self.assertIsNotNone(user['email']) def test_shadow_existing_federated_user(self): federated_user1 = copy.deepcopy(self.federated_user) ShadowUsersCoreTests.normalize_federated_user_properties_for_test( federated_user1, email=self.email ) # introduce the user to keystone for the first time shadow_user1 = PROVIDERS.identity_api.shadow_federated_user( self.federated_user['idp_id'], self.federated_user['protocol_id'], federated_user1, ) self.assertEqual(federated_user1['display_name'], shadow_user1['name']) # shadow the user again, with another name to invalidate the cache # internally, this operation causes request to the driver. It should # not fail. federated_user2 = copy.deepcopy(self.federated_user) federated_user2['display_name'] = uuid.uuid4().hex ShadowUsersCoreTests.normalize_federated_user_properties_for_test( federated_user2, email=self.email ) shadow_user2 = PROVIDERS.identity_api.shadow_federated_user( self.federated_user['idp_id'], self.federated_user['protocol_id'], federated_user2, ) self.assertEqual(federated_user2['display_name'], shadow_user2['name']) self.assertNotEqual(shadow_user1['name'], shadow_user2['name']) # The shadowed users still share the same unique ID. self.assertEqual(shadow_user1['id'], shadow_user2['id']) def test_shadow_federated_user_not_creating_a_local_user(self): federated_user1 = copy.deepcopy(self.federated_user) ShadowUsersCoreTests.normalize_federated_user_properties_for_test( federated_user1, email="some_id@mail.provider" ) PROVIDERS.identity_api.shadow_federated_user( federated_user1['idp_id'], federated_user1['protocol_id'], federated_user1, ) hints = driver_hints.Hints() hints.add_filter('name', federated_user1['display_name']) users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(1, len(users)) federated_user2 = copy.deepcopy(federated_user1) # Avoid caching federated_user2['name'] = uuid.uuid4().hex federated_user2['id'] = uuid.uuid4().hex federated_user2['email'] = "some_id_2@mail.provider" PROVIDERS.identity_api.shadow_federated_user( federated_user2['idp_id'], federated_user2['protocol_id'], federated_user2, ) hints.add_filter('name', federated_user2['display_name']) users = PROVIDERS.identity_api.list_users(hints=hints) # The number os users must remain 1 self.assertEqual(1, len(users)) @staticmethod def normalize_federated_user_properties_for_test( federated_user, email=None ): federated_user['email'] = email federated_user['id'] = federated_user['unique_id'] federated_user['name'] = federated_user['display_name'] if not federated_user.get('domain'): federated_user['domain'] = {'id': uuid.uuid4().hex} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/test_backend_sql.py0000664000175000017500000012757000000000000025171 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import freezegun from oslo_utils import timeutils import passlib.hash from keystone.common import password_hashing from keystone.common import provider_api from keystone.common import resource_options from keystone.common import sql import keystone.conf from keystone import exception from keystone.identity.backends import base from keystone.identity.backends import resource_options as iro from keystone.identity.backends import sql_model as model from keystone.tests.unit import test_backend_sql CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class UserPasswordCreatedAtIntTests(test_backend_sql.SqlTests): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='security_compliance', password_expires_days=1 ) def test_user_password_created_expired_at_int_matches_created_at(self): with sql.session_for_read() as session: user_ref = PROVIDERS.identity_api._get_user( session, self.user_foo['id'] ) self.assertIsNotNone(user_ref.password_ref._created_at) self.assertIsNotNone(user_ref.password_ref._expires_at) self.assertEqual( user_ref.password_ref._created_at, user_ref.password_ref.created_at_int, ) self.assertEqual( user_ref.password_ref._expires_at, user_ref.password_ref.expires_at_int, ) self.assertEqual( user_ref.password_ref.created_at, user_ref.password_ref.created_at_int, ) self.assertEqual( user_ref.password_ref.expires_at, user_ref.password_ref.expires_at_int, ) class UserPasswordHashingTestsNoCompat(test_backend_sql.SqlTests): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='identity', password_hash_algorithm='scrypt' ) def test_configured_algorithm_used(self): with sql.session_for_read() as session: user_ref = PROVIDERS.identity_api._get_user( session, self.user_foo['id'] ) self.assertEqual( passlib.hash.scrypt, password_hashing._get_hasher_from_ident(user_ref.password), ) class UserResourceOptionTests(test_backend_sql.SqlTests): def setUp(self): super().setUp() # RESET STATE OF REGISTRY OPTIONS self.addCleanup(iro.register_user_options) self.addCleanup(iro.USER_OPTIONS_REGISTRY._registered_options.clear) self.option1 = resource_options.ResourceOption('opt1', 'option1') self.option2 = resource_options.ResourceOption('opt2', 'option2') self.cleanup_instance('option1', 'option2') iro.USER_OPTIONS_REGISTRY._registered_options.clear() iro.USER_OPTIONS_REGISTRY.register_option(self.option1) iro.USER_OPTIONS_REGISTRY.register_option(self.option2) def test_user_set_option_in_resource_option(self): user = self._create_user(self._get_user_dict()) opt_value = uuid.uuid4().hex user['options'][self.option1.option_name] = opt_value new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual( opt_value, new_ref['options'][self.option1.option_name] ) raw_ref = self._get_user_ref(user['id']) self.assertIn(self.option1.option_id, raw_ref._resource_option_mapper) self.assertEqual( opt_value, raw_ref._resource_option_mapper[ self.option1.option_id ].option_value, ) api_get_ref = PROVIDERS.identity_api.get_user(user['id']) # Ensure options are properly set in a .get_user call. self.assertEqual( opt_value, api_get_ref['options'][self.option1.option_name] ) def test_user_add_update_delete_option_in_resource_option(self): user = self._create_user(self._get_user_dict()) opt_value = uuid.uuid4().hex new_opt_value = uuid.uuid4().hex # Update user to add the new value option user['options'][self.option1.option_name] = opt_value new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual( opt_value, new_ref['options'][self.option1.option_name] ) # Update the option Value and confirm it is updated user['options'][self.option1.option_name] = new_opt_value new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual( new_opt_value, new_ref['options'][self.option1.option_name] ) # Set the option value to None, meaning delete the option user['options'][self.option1.option_name] = None new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertNotIn(self.option1.option_name, new_ref['options']) def test_user_add_delete_resource_option_existing_option_values(self): user = self._create_user(self._get_user_dict()) opt_value = uuid.uuid4().hex opt2_value = uuid.uuid4().hex # Update user to add the new value option user['options'][self.option1.option_name] = opt_value new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual( opt_value, new_ref['options'][self.option1.option_name] ) # Update the option value for option 2 and confirm it is updated and # option1's value remains the same. Option 1 is not specified in the # updated user ref. del user['options'][self.option1.option_name] user['options'][self.option2.option_name] = opt2_value new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual( opt_value, new_ref['options'][self.option1.option_name] ) self.assertEqual( opt2_value, new_ref['options'][self.option2.option_name] ) raw_ref = self._get_user_ref(user['id']) self.assertEqual( opt_value, raw_ref._resource_option_mapper[ self.option1.option_id ].option_value, ) self.assertEqual( opt2_value, raw_ref._resource_option_mapper[ self.option2.option_id ].option_value, ) # Set the option value to None, meaning delete the option, ensure # option 2 still remains and has the right value user['options'][self.option1.option_name] = None new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertNotIn(self.option1.option_name, new_ref['options']) self.assertEqual( opt2_value, new_ref['options'][self.option2.option_name] ) raw_ref = self._get_user_ref(user['id']) self.assertNotIn( raw_ref._resource_option_mapper, self.option1.option_id ) self.assertEqual( opt2_value, raw_ref._resource_option_mapper[ self.option2.option_id ].option_value, ) def test_unregistered_resource_option_deleted(self): user = self._create_user(self._get_user_dict()) opt_value = uuid.uuid4().hex opt2_value = uuid.uuid4().hex # Update user to add the new value option user['options'][self.option1.option_name] = opt_value new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual( opt_value, new_ref['options'][self.option1.option_name] ) # Update the option value for option 2 and confirm it is updated and # option1's value remains the same. Option 1 is not specified in the # updated user ref. del user['options'][self.option1.option_name] user['options'][self.option2.option_name] = opt2_value new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual( opt_value, new_ref['options'][self.option1.option_name] ) self.assertEqual( opt2_value, new_ref['options'][self.option2.option_name] ) raw_ref = self._get_user_ref(user['id']) self.assertEqual( opt_value, raw_ref._resource_option_mapper[ self.option1.option_id ].option_value, ) self.assertEqual( opt2_value, raw_ref._resource_option_mapper[ self.option2.option_id ].option_value, ) # clear registered options and only re-register option1, update user # and confirm option2 is gone from the ref and returned dict iro.USER_OPTIONS_REGISTRY._registered_options.clear() iro.USER_OPTIONS_REGISTRY.register_option(self.option1) user['name'] = uuid.uuid4().hex new_ref = PROVIDERS.identity_api.update_user(user['id'], user) self.assertNotIn(self.option2.option_name, new_ref['options']) self.assertEqual( opt_value, new_ref['options'][self.option1.option_name] ) raw_ref = self._get_user_ref(user['id']) self.assertNotIn( raw_ref._resource_option_mapper, self.option2.option_id ) self.assertEqual( opt_value, raw_ref._resource_option_mapper[ self.option1.option_id ].option_value, ) def _get_user_ref(self, user_id): with sql.session_for_read() as session: return session.get(model.User, user_id) def _create_user(self, user_dict): user_dict['id'] = uuid.uuid4().hex with sql.session_for_write() as session: user_ref = model.User.from_dict(user_dict) session.add(user_ref) return base.filter_user(user_ref.to_dict()) def _get_user_dict(self): user = { 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'enabled': True, 'password': uuid.uuid4().hex, } return user class DisableInactiveUserTests(test_backend_sql.SqlTests): def setUp(self): super().setUp() self.password = uuid.uuid4().hex self.user_dict = self._get_user_dict(self.password) self.max_inactive_days = 90 self.config_fixture.config( group='security_compliance', disable_user_account_days_inactive=self.max_inactive_days, ) def test_authenticate_user_disabled_due_to_inactivity(self): # create user and set last_active_at beyond the max last_active_at = timeutils.utcnow() - datetime.timedelta( days=self.max_inactive_days + 1 ) user = self._create_user(self.user_dict, last_active_at.date()) with self.make_request(): self.assertRaises( exception.UserDisabled, PROVIDERS.identity_api.authenticate, user_id=user['id'], password=self.password, ) # verify that the user is actually disabled user = PROVIDERS.identity_api.get_user(user['id']) self.assertFalse(user['enabled']) # set the user to enabled and authenticate user['enabled'] = True PROVIDERS.identity_api.update_user(user['id'], user) user = PROVIDERS.identity_api.authenticate( user_id=user['id'], password=self.password ) self.assertTrue(user['enabled']) def test_authenticate_user_not_disabled_due_to_inactivity(self): # create user and set last_active_at just below the max last_active_at = ( timeutils.utcnow() - datetime.timedelta(days=self.max_inactive_days - 1) ).date() user = self._create_user(self.user_dict, last_active_at) with self.make_request(): user = PROVIDERS.identity_api.authenticate( user_id=user['id'], password=self.password ) self.assertTrue(user['enabled']) def test_get_user_disabled_due_to_inactivity(self): user = PROVIDERS.identity_api.create_user(self.user_dict) # set last_active_at just beyond the max last_active_at = ( timeutils.utcnow() - datetime.timedelta(self.max_inactive_days + 1) ).date() self._update_user_last_active_at(user['id'], last_active_at) # get user and verify that the user is actually disabled user = PROVIDERS.identity_api.get_user(user['id']) self.assertFalse(user['enabled']) # set enabled and test user['enabled'] = True PROVIDERS.identity_api.update_user(user['id'], user) user = PROVIDERS.identity_api.get_user(user['id']) self.assertTrue(user['enabled']) def test_get_user_not_disabled_due_to_inactivity(self): user = PROVIDERS.identity_api.create_user(self.user_dict) self.assertTrue(user['enabled']) # set last_active_at just below the max last_active_at = ( timeutils.utcnow() - datetime.timedelta(self.max_inactive_days - 1) ).date() self._update_user_last_active_at(user['id'], last_active_at) # get user and verify that the user is still enabled user = PROVIDERS.identity_api.get_user(user['id']) self.assertTrue(user['enabled']) def test_enabled_after_create_update_user(self): self.config_fixture.config( group='security_compliance', disable_user_account_days_inactive=90 ) # create user without enabled; assert enabled del self.user_dict['enabled'] user = PROVIDERS.identity_api.create_user(self.user_dict) user_ref = self._get_user_ref(user['id']) self.assertTrue(user_ref.enabled) now = timeutils.utcnow().date() self.assertGreaterEqual(now, user_ref.last_active_at) # set enabled and test user['enabled'] = True PROVIDERS.identity_api.update_user(user['id'], user) user_ref = self._get_user_ref(user['id']) self.assertTrue(user_ref.enabled) # set disabled and test user['enabled'] = False PROVIDERS.identity_api.update_user(user['id'], user) user_ref = self._get_user_ref(user['id']) self.assertFalse(user_ref.enabled) # re-enable user and test user['enabled'] = True PROVIDERS.identity_api.update_user(user['id'], user) user_ref = self._get_user_ref(user['id']) self.assertTrue(user_ref.enabled) def test_ignore_user_inactivity(self): self.user_dict['options'] = {'ignore_user_inactivity': True} user = PROVIDERS.identity_api.create_user(self.user_dict) # set last_active_at just beyond the max last_active_at = ( timeutils.utcnow() - datetime.timedelta(self.max_inactive_days + 1) ).date() self._update_user_last_active_at(user['id'], last_active_at) # get user and verify that the user is not disabled user = PROVIDERS.identity_api.get_user(user['id']) self.assertTrue(user['enabled']) def test_ignore_user_inactivity_with_user_disabled(self): user = PROVIDERS.identity_api.create_user(self.user_dict) # set last_active_at just beyond the max last_active_at = ( timeutils.utcnow() - datetime.timedelta(self.max_inactive_days + 1) ).date() self._update_user_last_active_at(user['id'], last_active_at) # get user and verify that the user is disabled user = PROVIDERS.identity_api.get_user(user['id']) self.assertFalse(user['enabled']) # update disabled user with ignore_user_inactivity to true user['options'] = {'ignore_user_inactivity': True} user = PROVIDERS.identity_api.update_user(user['id'], user) # user is not enabled user = PROVIDERS.identity_api.get_user(user['id']) self.assertFalse(user['enabled']) # Manually set enabled and test user['enabled'] = True PROVIDERS.identity_api.update_user(user['id'], user) user = PROVIDERS.identity_api.get_user(user['id']) self.assertTrue(user['enabled']) def _get_user_dict(self, password): user = { 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'enabled': True, 'password': password, } return user def _get_user_ref(self, user_id): with sql.session_for_read() as session: return session.get(model.User, user_id) def _create_user(self, user_dict, last_active_at): user_dict['id'] = uuid.uuid4().hex with sql.session_for_write() as session: user_ref = model.User.from_dict(user_dict) user_ref.last_active_at = last_active_at session.add(user_ref) return base.filter_user(user_ref.to_dict()) def _update_user_last_active_at(self, user_id, last_active_at): with sql.session_for_write() as session: user_ref = session.get(model.User, user_id) user_ref.last_active_at = last_active_at return user_ref class PasswordHistoryValidationTests(test_backend_sql.SqlTests): def setUp(self): super().setUp() self.max_cnt = 3 self.config_fixture.config( group='security_compliance', unique_last_password_count=self.max_cnt, ) def test_validate_password_history_with_invalid_password(self): password = uuid.uuid4().hex user = self._create_user(password) # Attempt to change to the same password with self.make_request(): self.assertRaises( exception.PasswordValidationError, PROVIDERS.identity_api.change_password, user_id=user['id'], original_password=password, new_password=password, ) # Attempt to change to a unique password new_password = uuid.uuid4().hex self.assertValidChangePassword(user['id'], password, new_password) # Attempt to change back to the initial password self.assertRaises( exception.PasswordValidationError, PROVIDERS.identity_api.change_password, user_id=user['id'], original_password=new_password, new_password=password, ) def test_validate_password_history_with_valid_password(self): passwords = [ uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex, ] user = self._create_user(passwords[0]) self.assertValidChangePassword(user['id'], passwords[0], passwords[1]) self.assertValidChangePassword(user['id'], passwords[1], passwords[2]) self.assertValidChangePassword(user['id'], passwords[2], passwords[3]) # Now you should be able to change the password to match the initial # password because the password history only contains password elements # 1, 2, 3 self.assertValidChangePassword(user['id'], passwords[3], passwords[0]) def test_validate_password_history_with_valid_password_only_once(self): self.config_fixture.config( group='security_compliance', unique_last_password_count=1 ) passwords = [uuid.uuid4().hex, uuid.uuid4().hex] user = self._create_user(passwords[0]) self.assertValidChangePassword(user['id'], passwords[0], passwords[1]) self.assertValidChangePassword(user['id'], passwords[1], passwords[0]) def test_validate_password_history_but_start_with_password_none(self): passwords = [uuid.uuid4().hex, uuid.uuid4().hex] # Create user and confirm password is None user = self._create_user(None) user_ref = self._get_user_ref(user['id']) self.assertIsNone(user_ref.password) # Admin password reset user['password'] = passwords[0] PROVIDERS.identity_api.update_user(user['id'], user) # Self-service change password self.assertValidChangePassword(user['id'], passwords[0], passwords[1]) # Attempt to update with a previous password with self.make_request(): self.assertRaises( exception.PasswordValidationError, PROVIDERS.identity_api.change_password, user_id=user['id'], original_password=passwords[1], new_password=passwords[0], ) def test_disable_password_history_and_repeat_same_password(self): self.config_fixture.config( group='security_compliance', unique_last_password_count=0 ) password = uuid.uuid4().hex user = self._create_user(password) # Repeatedly change password with the same password self.assertValidChangePassword(user['id'], password, password) self.assertValidChangePassword(user['id'], password, password) def test_admin_password_reset_is_not_validated_by_password_history(self): passwords = [uuid.uuid4().hex, uuid.uuid4().hex] user = self._create_user(passwords[0]) # Attempt to change password to a unique password user['password'] = passwords[1] with self.make_request(): PROVIDERS.identity_api.update_user(user['id'], user) PROVIDERS.identity_api.authenticate( user_id=user['id'], password=passwords[1] ) # Attempt to change password with the same password user['password'] = passwords[1] PROVIDERS.identity_api.update_user(user['id'], user) PROVIDERS.identity_api.authenticate( user_id=user['id'], password=passwords[1] ) # Attempt to change password with the initial password user['password'] = passwords[0] PROVIDERS.identity_api.update_user(user['id'], user) PROVIDERS.identity_api.authenticate( user_id=user['id'], password=passwords[0] ) def test_truncate_passwords(self): user = self._create_user(uuid.uuid4().hex) self._add_passwords_to_history(user, n=4) user_ref = self._get_user_ref(user['id']) self.assertEqual( len(user_ref.local_user.passwords), (self.max_cnt + 1) ) def test_truncate_passwords_when_max_is_default(self): self.max_cnt = 1 expected_length = self.max_cnt + 1 self.config_fixture.config( group='security_compliance', unique_last_password_count=self.max_cnt, ) user = self._create_user(uuid.uuid4().hex) self._add_passwords_to_history(user, n=4) user_ref = self._get_user_ref(user['id']) self.assertEqual(len(user_ref.local_user.passwords), expected_length) # Start with multiple passwords and then change max_cnt to one self.max_cnt = 4 self.config_fixture.config( group='security_compliance', unique_last_password_count=self.max_cnt, ) self._add_passwords_to_history(user, n=self.max_cnt) user_ref = self._get_user_ref(user['id']) self.assertEqual( len(user_ref.local_user.passwords), (self.max_cnt + 1) ) self.max_cnt = 1 self.config_fixture.config( group='security_compliance', unique_last_password_count=self.max_cnt, ) self._add_passwords_to_history(user, n=1) user_ref = self._get_user_ref(user['id']) self.assertEqual(len(user_ref.local_user.passwords), expected_length) def test_truncate_passwords_when_max_is_default_and_no_password(self): expected_length = 1 self.max_cnt = 1 self.config_fixture.config( group='security_compliance', unique_last_password_count=self.max_cnt, ) user = { 'name': uuid.uuid4().hex, 'domain_id': 'default', 'enabled': True, } user = PROVIDERS.identity_api.create_user(user) self._add_passwords_to_history(user, n=1) user_ref = self._get_user_ref(user['id']) self.assertEqual(len(user_ref.local_user.passwords), expected_length) def _create_user(self, password): user = { 'name': uuid.uuid4().hex, 'domain_id': 'default', 'enabled': True, 'password': password, } return PROVIDERS.identity_api.create_user(user) def assertValidChangePassword(self, user_id, password, new_password): with self.make_request(): PROVIDERS.identity_api.change_password( user_id=user_id, original_password=password, new_password=new_password, ) PROVIDERS.identity_api.authenticate( user_id=user_id, password=new_password ) def _add_passwords_to_history(self, user, n): for _ in range(n): user['password'] = uuid.uuid4().hex PROVIDERS.identity_api.update_user(user['id'], user) def _get_user_ref(self, user_id): with sql.session_for_read() as session: return PROVIDERS.identity_api._get_user(session, user_id) class LockingOutUserTests(test_backend_sql.SqlTests): def setUp(self): super().setUp() self.config_fixture.config( group='security_compliance', lockout_failure_attempts=6 ) self.config_fixture.config( group='security_compliance', lockout_duration=5 ) # create user self.password = uuid.uuid4().hex user_dict = { 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'enabled': True, 'password': self.password, } self.user = PROVIDERS.identity_api.create_user(user_dict) def test_locking_out_user_after_max_failed_attempts(self): with self.make_request(): # authenticate with wrong password self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) # authenticate with correct password PROVIDERS.identity_api.authenticate( user_id=self.user['id'], password=self.password ) # test locking out user after max failed attempts self._fail_auth_repeatedly(self.user['id']) self.assertRaises( exception.Unauthorized, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) def test_lock_out_for_ignored_user(self): # mark the user as exempt from failed password attempts # ignore user and reset password, password not expired self.user['options'][iro.IGNORE_LOCKOUT_ATTEMPT_OPT.option_name] = True PROVIDERS.identity_api.update_user(self.user['id'], self.user) # fail authentication repeatedly the max number of times self._fail_auth_repeatedly(self.user['id']) # authenticate with wrong password, account should not be locked with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) # authenticate with correct password, account should not be locked PROVIDERS.identity_api.authenticate( user_id=self.user['id'], password=self.password ) def test_set_enabled_unlocks_user(self): with self.make_request(): # lockout user self._fail_auth_repeatedly(self.user['id']) self.assertRaises( exception.Unauthorized, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) # set enabled, user should be unlocked self.user['enabled'] = True PROVIDERS.identity_api.update_user(self.user['id'], self.user) user_ret = PROVIDERS.identity_api.authenticate( user_id=self.user['id'], password=self.password ) self.assertTrue(user_ret['enabled']) def test_lockout_duration(self): # freeze time with freezegun.freeze_time(timeutils.utcnow()) as frozen_time: with self.make_request(): # lockout user self._fail_auth_repeatedly(self.user['id']) self.assertRaises( exception.Unauthorized, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) # freeze time past the duration, user should be unlocked and # failed auth count should get reset frozen_time.tick( delta=datetime.timedelta( seconds=CONF.security_compliance.lockout_duration + 1 ) ) PROVIDERS.identity_api.authenticate( user_id=self.user['id'], password=self.password ) # test failed auth count was reset by authenticating with the # wrong password, should raise an assertion error and not # account locked self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) def test_lockout_duration_failed_auth_cnt_resets(self): # freeze time with freezegun.freeze_time(timeutils.utcnow()) as frozen_time: with self.make_request(): # lockout user self._fail_auth_repeatedly(self.user['id']) self.assertRaises( exception.Unauthorized, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) # freeze time past the duration, failed_auth_cnt should reset frozen_time.tick( delta=datetime.timedelta( seconds=CONF.security_compliance.lockout_duration + 1 ) ) # repeat failed auth the max times self._fail_auth_repeatedly(self.user['id']) # test user account is locked self.assertRaises( exception.Unauthorized, PROVIDERS.identity_api.authenticate, user_id=self.user['id'], password=uuid.uuid4().hex, ) def _fail_auth_repeatedly(self, user_id): wrong_password = uuid.uuid4().hex for _ in range(CONF.security_compliance.lockout_failure_attempts): with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=user_id, password=wrong_password, ) class PasswordExpiresValidationTests(test_backend_sql.SqlTests): def setUp(self): super().setUp() self.password = uuid.uuid4().hex self.user_dict = self._get_test_user_dict(self.password) self.config_fixture.config( group='security_compliance', password_expires_days=90 ) def test_authenticate_with_expired_password(self): # set password created_at so that the password will expire password_created_at = timeutils.utcnow() - datetime.timedelta( days=CONF.security_compliance.password_expires_days + 1 ) user = self._create_user(self.user_dict, password_created_at) # test password is expired with self.make_request(): self.assertRaises( exception.PasswordExpired, PROVIDERS.identity_api.authenticate, user_id=user['id'], password=self.password, ) def test_authenticate_with_non_expired_password(self): # set password created_at so that the password will not expire password_created_at = timeutils.utcnow() - datetime.timedelta( days=CONF.security_compliance.password_expires_days - 1 ) user = self._create_user(self.user_dict, password_created_at) # test password is not expired with self.make_request(): PROVIDERS.identity_api.authenticate( user_id=user['id'], password=self.password ) def test_authenticate_with_expired_password_for_ignore_user_option(self): # set user to have the 'ignore_password_expiry' option set to False self.user_dict.setdefault('options', {})[ iro.IGNORE_PASSWORD_EXPIRY_OPT.option_name ] = False # set password created_at so that the password will expire password_created_at = timeutils.utcnow() - datetime.timedelta( days=CONF.security_compliance.password_expires_days + 1 ) user = self._create_user(self.user_dict, password_created_at) with self.make_request(): self.assertRaises( exception.PasswordExpired, PROVIDERS.identity_api.authenticate, user_id=user['id'], password=self.password, ) # update user to explicitly have the expiry option to True user['options'][iro.IGNORE_PASSWORD_EXPIRY_OPT.option_name] = True user = PROVIDERS.identity_api.update_user(user['id'], user) # test password is not expired due to ignore option PROVIDERS.identity_api.authenticate( user_id=user['id'], password=self.password ) def _get_test_user_dict(self, password): test_user_dict = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'enabled': True, 'password': password, } return test_user_dict def _create_user(self, user_dict, password_created_at): # Bypass business logic and go straight for the identity driver # (SQL in this case) driver = PROVIDERS.identity_api.driver driver.create_user(user_dict['id'], user_dict) with sql.session_for_write() as session: user_ref = session.get(model.User, user_dict['id']) user_ref.password_ref.created_at = password_created_at user_ref.password_ref.expires_at = ( user_ref._get_password_expires_at(password_created_at) ) return base.filter_user(user_ref.to_dict()) class MinimumPasswordAgeTests(test_backend_sql.SqlTests): def setUp(self): super().setUp() self.config_fixture.config( group='security_compliance', minimum_password_age=1 ) self.initial_password = uuid.uuid4().hex self.user = self._create_new_user(self.initial_password) def test_user_cannot_change_password_before_min_age(self): # user can change password after create new_password = uuid.uuid4().hex self.assertValidChangePassword( self.user['id'], self.initial_password, new_password ) # user cannot change password before min age with self.make_request(): self.assertRaises( exception.PasswordAgeValidationError, PROVIDERS.identity_api.change_password, user_id=self.user['id'], original_password=new_password, new_password=uuid.uuid4().hex, ) def test_user_can_change_password_after_min_age(self): # user can change password after create new_password = uuid.uuid4().hex self.assertValidChangePassword( self.user['id'], self.initial_password, new_password ) # set password_created_at so that the min password age has past password_created_at = timeutils.utcnow() - datetime.timedelta( days=CONF.security_compliance.minimum_password_age + 1 ) self._update_password_created_at(self.user['id'], password_created_at) # user can change their password after min password age has past self.assertValidChangePassword( self.user['id'], new_password, uuid.uuid4().hex ) def test_user_can_change_password_after_admin_reset(self): # user can change password after create new_password = uuid.uuid4().hex self.assertValidChangePassword( self.user['id'], self.initial_password, new_password ) # user cannot change password before min age with self.make_request(): self.assertRaises( exception.PasswordAgeValidationError, PROVIDERS.identity_api.change_password, user_id=self.user['id'], original_password=new_password, new_password=uuid.uuid4().hex, ) # admin reset new_password = uuid.uuid4().hex self.user['password'] = new_password PROVIDERS.identity_api.update_user(self.user['id'], self.user) # user can change password after admin reset self.assertValidChangePassword( self.user['id'], new_password, uuid.uuid4().hex ) def assertValidChangePassword(self, user_id, password, new_password): with self.make_request(): PROVIDERS.identity_api.change_password( user_id=user_id, original_password=password, new_password=new_password, ) PROVIDERS.identity_api.authenticate( user_id=user_id, password=new_password ) def _create_new_user(self, password): user = { 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'enabled': True, 'password': password, } return PROVIDERS.identity_api.create_user(user) def _update_password_created_at(self, user_id, password_create_at): # User instance has an attribute password_ref. This attribute is used # in authentication. It always points to the last created password. The # order of passwords is determined by `created_at` field. # By changing `created_at`, this method interferes with password_ref # behaviour, making it return not last value. That's why all passwords # except the latest, need to have `created_at` slightly less than # the latest password. with sql.session_for_write() as session: user_ref = session.get(model.User, user_id) latest_password = user_ref.password_ref slightly_less = datetime.timedelta(minutes=1) for password_ref in user_ref.local_user.passwords: password_ref.created_at = password_create_at - slightly_less latest_password.created_at = password_create_at class ChangePasswordRequiredAfterFirstUse(test_backend_sql.SqlTests): def _create_user(self, password, change_password_upon_first_use): self.config_fixture.config( group='security_compliance', change_password_upon_first_use=change_password_upon_first_use, ) user_dict = { 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, 'enabled': True, 'password': password, } return PROVIDERS.identity_api.create_user(user_dict) def assertPasswordIsExpired(self, user_id, password): with self.make_request(): self.assertRaises( exception.PasswordExpired, PROVIDERS.identity_api.authenticate, user_id=user_id, password=password, ) def assertPasswordIsNotExpired(self, user_id, password): with self.make_request(): PROVIDERS.identity_api.authenticate( user_id=user_id, password=password ) def test_password_expired_after_create(self): # create user, password expired initial_password = uuid.uuid4().hex user = self._create_user(initial_password, True) self.assertPasswordIsExpired(user['id'], initial_password) # change password (self-service), password not expired new_password = uuid.uuid4().hex with self.make_request(): PROVIDERS.identity_api.change_password( user['id'], initial_password, new_password ) self.assertPasswordIsNotExpired(user['id'], new_password) def test_password_expired_after_reset(self): # create user with feature disabled, password not expired initial_password = uuid.uuid4().hex user = self._create_user(initial_password, False) self.assertPasswordIsNotExpired(user['id'], initial_password) # enable change_password_upon_first_use self.config_fixture.config( group='security_compliance', change_password_upon_first_use=True ) # admin reset, password expired admin_password = uuid.uuid4().hex user['password'] = admin_password PROVIDERS.identity_api.update_user(user['id'], user) self.assertPasswordIsExpired(user['id'], admin_password) # change password (self-service), password not expired new_password = uuid.uuid4().hex with self.make_request(): PROVIDERS.identity_api.change_password( user['id'], admin_password, new_password ) self.assertPasswordIsNotExpired(user['id'], new_password) def test_password_not_expired_when_feature_disabled(self): # create user with feature disabled initial_password = uuid.uuid4().hex user = self._create_user(initial_password, False) self.assertPasswordIsNotExpired(user['id'], initial_password) # admin reset admin_password = uuid.uuid4().hex user['password'] = admin_password PROVIDERS.identity_api.update_user(user['id'], user) self.assertPasswordIsNotExpired(user['id'], admin_password) def test_password_not_expired_for_ignore_user(self): # create user with feature disabled, password not expired initial_password = uuid.uuid4().hex user = self._create_user(initial_password, False) self.assertPasswordIsNotExpired(user['id'], initial_password) # enable change_password_upon_first_use self.config_fixture.config( group='security_compliance', change_password_upon_first_use=True ) # ignore user and reset password, password not expired user['options'][iro.IGNORE_CHANGE_PASSWORD_OPT.option_name] = True admin_password = uuid.uuid4().hex user['password'] = admin_password PROVIDERS.identity_api.update_user(user['id'], user) self.assertPasswordIsNotExpired(user['id'], admin_password) # set ignore user to false and reset password, password is expired user['options'][iro.IGNORE_CHANGE_PASSWORD_OPT.option_name] = False admin_password = uuid.uuid4().hex user['password'] = admin_password PROVIDERS.identity_api.update_user(user['id'], user) self.assertPasswordIsExpired(user['id'], admin_password) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/test_backends.py0000664000175000017500000017165300000000000024476 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from testtools import matchers from keystone.common import driver_hints from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import filtering CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class IdentityTests: def _get_domain_fixture(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) return domain def _set_domain_scope(self, domain_id): # We only provide a domain scope if we have multiple drivers if CONF.identity.domain_specific_drivers_enabled: return domain_id def test_authenticate_bad_user(self): with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=uuid.uuid4().hex, password=self.user_foo['password'], ) def test_authenticate_bad_password(self): with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=self.user_foo['id'], password=uuid.uuid4().hex, ) def test_authenticate(self): with self.make_request(): user_ref = PROVIDERS.identity_api.authenticate( user_id=self.user_sna['id'], password=self.user_sna['password'] ) # NOTE(termie): the password field is left in user_sna to make # it easier to authenticate in tests, but should # not be returned by the api self.user_sna.pop('password') self.user_sna['enabled'] = True self.assertUserDictEqual(self.user_sna, user_ref) def test_authenticate_and_get_roles_no_metadata(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) # Remove user id. It is ignored by create_user() and will break the # subset test below. del user['id'] new_user = PROVIDERS.identity_api.create_user(user) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( new_user['id'], self.project_baz['id'], role_member['id'] ) with self.make_request(): user_ref = PROVIDERS.identity_api.authenticate( user_id=new_user['id'], password=user['password'] ) self.assertNotIn('password', user_ref) # NOTE(termie): the password field is left in user_sna to make # it easier to authenticate in tests, but should # not be returned by the api user.pop('password') self.assertLessEqual(user.items(), user_ref.items()) role_list = PROVIDERS.assignment_api.get_roles_for_user_and_project( new_user['id'], self.project_baz['id'] ) self.assertEqual(1, len(role_list)) self.assertIn(role_member['id'], role_list) def test_authenticate_if_no_password_set(self): id_ = uuid.uuid4().hex user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(user) with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=id_, password='password', ) def test_create_unicode_user_name(self): unicode_name = 'name \u540d\u5b57' user = unit.new_user_ref( name=unicode_name, domain_id=CONF.identity.default_domain_id ) ref = PROVIDERS.identity_api.create_user(user) self.assertEqual(unicode_name, ref['name']) def test_get_user(self): user_ref = PROVIDERS.identity_api.get_user(self.user_foo['id']) # NOTE(termie): the password field is left in user_foo to make # it easier to authenticate in tests, but should # not be returned by the api self.user_foo.pop('password') # NOTE(edmondsw): check that options is set, even if it's just an # empty dict, because otherwise auth will blow up for whatever # case misses this. self.assertIn('options', user_ref) self.assertDictEqual(self.user_foo, user_ref) def test_get_user_returns_required_attributes(self): user_ref = PROVIDERS.identity_api.get_user(self.user_foo['id']) self.assertIn('id', user_ref) self.assertIn('name', user_ref) self.assertIn('enabled', user_ref) self.assertIn('password_expires_at', user_ref) @unit.skip_if_cache_disabled('identity') def test_cache_layer_get_user(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(user) ref = PROVIDERS.identity_api.get_user_by_name( user['name'], user['domain_id'] ) # cache the result. PROVIDERS.identity_api.get_user(ref['id']) # delete bypassing identity api domain_id, driver, entity_id = ( PROVIDERS.identity_api._get_domain_driver_and_entity_id(ref['id']) ) driver.delete_user(entity_id) self.assertDictEqual(ref, PROVIDERS.identity_api.get_user(ref['id'])) PROVIDERS.identity_api.get_user.invalidate( PROVIDERS.identity_api, ref['id'] ) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, ref['id'] ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) ref = PROVIDERS.identity_api.get_user_by_name( user['name'], user['domain_id'] ) user['description'] = uuid.uuid4().hex # cache the result. PROVIDERS.identity_api.get_user(ref['id']) # update using identity api and get back updated user. user_updated = PROVIDERS.identity_api.update_user(ref['id'], user) self.assertLessEqual( PROVIDERS.identity_api.get_user(ref['id']).items(), user_updated.items(), ) self.assertLessEqual( PROVIDERS.identity_api.get_user_by_name( ref['name'], ref['domain_id'] ).items(), user_updated.items(), ) def test_get_user_returns_not_found(self): self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, uuid.uuid4().hex, ) def test_get_user_by_name(self): user_ref = PROVIDERS.identity_api.get_user_by_name( self.user_foo['name'], CONF.identity.default_domain_id ) # NOTE(termie): the password field is left in user_foo to make # it easier to authenticate in tests, but should # not be returned by the api self.user_foo.pop('password') self.assertDictEqual(self.user_foo, user_ref) @unit.skip_if_cache_disabled('identity') def test_cache_layer_get_user_by_name(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(user) ref = PROVIDERS.identity_api.get_user_by_name( user['name'], user['domain_id'] ) # delete bypassing the identity api. domain_id, driver, entity_id = ( PROVIDERS.identity_api._get_domain_driver_and_entity_id(ref['id']) ) driver.delete_user(entity_id) self.assertDictEqual( ref, PROVIDERS.identity_api.get_user_by_name( user['name'], CONF.identity.default_domain_id ), ) PROVIDERS.identity_api.get_user_by_name.invalidate( PROVIDERS.identity_api, user['name'], CONF.identity.default_domain_id, ) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user_by_name, user['name'], CONF.identity.default_domain_id, ) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) ref = PROVIDERS.identity_api.get_user_by_name( user['name'], user['domain_id'] ) user['description'] = uuid.uuid4().hex user_updated = PROVIDERS.identity_api.update_user(ref['id'], user) self.assertLessEqual( PROVIDERS.identity_api.get_user(ref['id']).items(), user_updated.items(), ) self.assertLessEqual( PROVIDERS.identity_api.get_user_by_name( ref['name'], ref['domain_id'] ).items(), user_updated.items(), ) def test_get_user_by_name_returns_not_found(self): self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user_by_name, uuid.uuid4().hex, CONF.identity.default_domain_id, ) def test_create_duplicate_user_name_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) self.assertRaises( exception.Conflict, PROVIDERS.identity_api.create_user, user ) def test_create_duplicate_user_name_in_different_domains(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = unit.new_user_ref( name=user1['name'], domain_id=new_domain['id'] ) PROVIDERS.identity_api.create_user(user1) PROVIDERS.identity_api.create_user(user2) def test_move_user_between_domains(self): domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) user = unit.new_user_ref(domain_id=domain1['id']) user = PROVIDERS.identity_api.create_user(user) user['domain_id'] = domain2['id'] self.assertRaises( exception.ValidationError, PROVIDERS.identity_api.update_user, user['id'], user, ) def test_rename_duplicate_user_name_fails(self): user1 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(user1) user2 = PROVIDERS.identity_api.create_user(user2) user2['name'] = user1['name'] self.assertRaises( exception.Conflict, PROVIDERS.identity_api.update_user, user2['id'], user2, ) def test_update_user_id_fails(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) original_id = user['id'] user['id'] = 'fake2' self.assertRaises( exception.ValidationError, PROVIDERS.identity_api.update_user, original_id, user, ) user_ref = PROVIDERS.identity_api.get_user(original_id) self.assertEqual(original_id, user_ref['id']) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, 'fake2' ) def test_delete_user_with_group_project_domain_links(self): role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=project1['id'], role_id=role1['id'] ) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], domain_id=domain1['id'], role_id=role1['id'] ) PROVIDERS.identity_api.add_user_to_group( user_id=user1['id'], group_id=group1['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], project_id=project1['id'] ) self.assertEqual(1, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=user1['id'], domain_id=domain1['id'] ) self.assertEqual(1, len(roles_ref)) PROVIDERS.identity_api.check_user_in_group( user_id=user1['id'], group_id=group1['id'] ) PROVIDERS.identity_api.delete_user(user1['id']) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.check_user_in_group, user1['id'], group1['id'], ) def test_delete_group_with_user_project_domain_links(self): role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) user1 = unit.new_user_ref(domain_id=domain1['id']) user1 = PROVIDERS.identity_api.create_user(user1) group1 = unit.new_group_ref(domain_id=domain1['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=project1['id'], role_id=role1['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=domain1['id'], role_id=role1['id'] ) PROVIDERS.identity_api.add_user_to_group( user_id=user1['id'], group_id=group1['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], project_id=project1['id'] ) self.assertEqual(1, len(roles_ref)) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=group1['id'], domain_id=domain1['id'] ) self.assertEqual(1, len(roles_ref)) PROVIDERS.identity_api.check_user_in_group( user_id=user1['id'], group_id=group1['id'] ) PROVIDERS.identity_api.delete_group(group1['id']) PROVIDERS.identity_api.get_user(user1['id']) def test_update_user_returns_not_found(self): user_id = uuid.uuid4().hex self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.update_user, user_id, {'id': user_id, 'domain_id': CONF.identity.default_domain_id}, ) def test_delete_user_returns_not_found(self): self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.delete_user, uuid.uuid4().hex, ) def test_create_user_with_long_password(self): user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, password='a' * 2000 ) # success create a user with long password PROVIDERS.identity_api.create_user(user) def test_create_user_missed_password(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) PROVIDERS.identity_api.get_user(user['id']) # Make sure the user is not allowed to login # with a password that is empty string or None with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=user['id'], password='', ) self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=user['id'], password=None, ) def test_create_user_none_password(self): user = unit.new_user_ref( password=None, domain_id=CONF.identity.default_domain_id ) user = PROVIDERS.identity_api.create_user(user) PROVIDERS.identity_api.get_user(user['id']) # Make sure the user is not allowed to login # with a password that is empty string or None with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=user['id'], password='', ) self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=user['id'], password=None, ) def test_list_users(self): users = PROVIDERS.identity_api.list_users( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id ) ) self.assertEqual(len(default_fixtures.USERS), len(users)) user_ids = {user['id'] for user in users} expected_user_ids = { getattr(self, 'user_%s' % user['name'])['id'] for user in default_fixtures.USERS } for user_ref in users: self.assertNotIn('password', user_ref) self.assertEqual(expected_user_ids, user_ids) def _build_hints(self, hints, filters, fed_dict): for key in filters: hints.add_filter(key, fed_dict[key], comparator='equals') return hints def _build_fed_resource(self): # create one test mapping, two idps and two protocols for federation # test. new_mapping = unit.new_mapping_ref() PROVIDERS.federation_api.create_mapping(new_mapping['id'], new_mapping) for idp_id, protocol_id in [('ORG_IDP', 'saml2'), ('myidp', 'mapped')]: new_idp = unit.new_identity_provider_ref( idp_id=idp_id, domain_id='default' ) new_protocol = unit.new_protocol_ref( protocol_id=protocol_id, idp_id=idp_id, mapping_id=new_mapping['id'], ) PROVIDERS.federation_api.create_idp(new_idp['id'], new_idp) PROVIDERS.federation_api.create_protocol( new_idp['id'], new_protocol['id'], new_protocol ) def _test_list_users_with_attribute(self, filters, fed_dict): self._build_fed_resource() domain = self._get_domain_fixture() # Call list_users while no match exists for the federated user hints = driver_hints.Hints() hints = self._build_hints(hints, filters, fed_dict) users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(0, len(users)) # list_users with a new relational user and federated user hints = self._build_hints(hints, filters, fed_dict) PROVIDERS.shadow_users_api.create_federated_user( domain['id'], fed_dict ) users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(1, len(users)) # create another federated user that shouldnt be matched and ensure # that still only one match is found hints = self._build_hints(hints, filters, fed_dict) fed_dict2 = unit.new_federated_user_ref() fed_dict2['idp_id'] = 'myidp' fed_dict2['protocol_id'] = 'mapped' PROVIDERS.shadow_users_api.create_federated_user( domain['id'], fed_dict2 ) users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(1, len(users)) # create another federated user that should also be matched and ensure # that there are now two matches in the users list. Unless there is a # unique id in the filter since unique_ids must be unique and would # therefore cause a duplicate error. hints = self._build_hints(hints, filters, fed_dict) if not any('unique_id' in x['name'] for x in hints.filters): hints = self._build_hints(hints, filters, fed_dict) fed_dict3 = unit.new_federated_user_ref() # check which filters are here and create another match for filters_ in hints.filters: if filters_['name'] == 'idp_id': fed_dict3['idp_id'] = fed_dict['idp_id'] elif filters_['name'] == 'protocol_id': fed_dict3['protocol_id'] = fed_dict['protocol_id'] PROVIDERS.shadow_users_api.create_federated_user( domain['id'], fed_dict3 ) users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(2, len(users)) def test_list_users_with_unique_id(self): federated_dict = unit.new_federated_user_ref() filters = ['unique_id'] self._test_list_users_with_attribute(filters, federated_dict) def test_list_users_with_idp_id(self): federated_dict = unit.new_federated_user_ref() filters = ['idp_id'] self._test_list_users_with_attribute(filters, federated_dict) def test_list_users_with_protocol_id(self): federated_dict = unit.new_federated_user_ref() filters = ['protocol_id'] self._test_list_users_with_attribute(filters, federated_dict) def test_list_users_with_unique_id_and_idp_id(self): federated_dict = unit.new_federated_user_ref() filters = ['unique_id', 'idp_id'] self._test_list_users_with_attribute(filters, federated_dict) def test_list_users_with_unique_id_and_protocol_id(self): federated_dict = unit.new_federated_user_ref() filters = ['unique_id', 'protocol_id'] self._test_list_users_with_attribute(filters, federated_dict) def test_list_users_with_idp_id_protocol_id(self): federated_dict = unit.new_federated_user_ref() filters = ['idp_id', 'protocol_id'] self._test_list_users_with_attribute(filters, federated_dict) def test_list_users_with_all_federated_attributes(self): federated_dict = unit.new_federated_user_ref() filters = ['unique_id', 'idp_id', 'protocol_id'] self._test_list_users_with_attribute(filters, federated_dict) def test_list_users_with_name(self): self._build_fed_resource() federated_dict_1 = unit.new_federated_user_ref( display_name='test1@federation.org' ) federated_dict_2 = unit.new_federated_user_ref( display_name='test2@federation.org' ) domain = self._get_domain_fixture() hints = driver_hints.Hints() hints.add_filter('name', 'test1@federation.org') users = self.identity_api.list_users(hints=hints) self.assertEqual(0, len(users)) self.shadow_users_api.create_federated_user( domain['id'], federated_dict_1 ) self.shadow_users_api.create_federated_user( domain['id'], federated_dict_2 ) hints = driver_hints.Hints() hints.add_filter('name', 'test1@federation.org') users = self.identity_api.list_users(hints=hints) self.assertEqual(1, len(users)) hints = driver_hints.Hints() hints.add_filter('name', 'test1@federation.org') hints.add_filter('idp_id', 'ORG_IDP') users = self.identity_api.list_users(hints=hints) self.assertEqual(1, len(users)) def test_list_groups(self): group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group2 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group1 = PROVIDERS.identity_api.create_group(group1) group2 = PROVIDERS.identity_api.create_group(group2) groups = PROVIDERS.identity_api.list_groups( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id ) ) self.assertEqual(2, len(groups)) group_ids = [] for group in groups: group_ids.append(group.get('id')) self.assertIn(group1['id'], group_ids) self.assertIn(group2['id'], group_ids) def test_create_user_doesnt_modify_passed_in_dict(self): new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) original_user = new_user.copy() PROVIDERS.identity_api.create_user(new_user) self.assertDictEqual(original_user, new_user) def test_update_user_enable(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertTrue(user_ref['enabled']) user['enabled'] = False PROVIDERS.identity_api.update_user(user['id'], user) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertEqual(user['enabled'], user_ref['enabled']) # If not present, enabled field should not be updated del user['enabled'] PROVIDERS.identity_api.update_user(user['id'], user) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertFalse(user_ref['enabled']) user['enabled'] = True PROVIDERS.identity_api.update_user(user['id'], user) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertEqual(user['enabled'], user_ref['enabled']) del user['enabled'] PROVIDERS.identity_api.update_user(user['id'], user) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertTrue(user_ref['enabled']) def test_update_user_name(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertEqual(user['name'], user_ref['name']) changed_name = user_ref['name'] + '_changed' user_ref['name'] = changed_name updated_user = PROVIDERS.identity_api.update_user( user_ref['id'], user_ref ) # NOTE(dstanek): the SQL backend adds an 'extra' field containing a # dictionary of the extra fields in addition to the # fields in the object. For the details see: # SqlIdentity.test_update_project_returns_extra updated_user.pop('extra', None) self.assertDictEqual(user_ref, updated_user) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertEqual(changed_name, user_ref['name']) def test_add_user_to_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) groups = PROVIDERS.identity_api.list_groups_for_user(new_user['id']) found = False for x in groups: if x['id'] == new_group['id']: found = True self.assertTrue(found) def test_add_user_to_group_returns_not_found(self): domain = self._get_domain_fixture() new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.add_user_to_group, new_user['id'], uuid.uuid4().hex, ) new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.add_user_to_group, uuid.uuid4().hex, new_group['id'], ) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.add_user_to_group, uuid.uuid4().hex, uuid.uuid4().hex, ) def test_check_user_in_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) PROVIDERS.identity_api.check_user_in_group( new_user['id'], new_group['id'] ) def test_check_user_not_in_group(self): new_group = unit.new_group_ref( domain_id=CONF.identity.default_domain_id ) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) new_user = PROVIDERS.identity_api.create_user(new_user) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.check_user_in_group, new_user['id'], new_group['id'], ) def test_check_user_in_group_returns_not_found(self): new_user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) new_user = PROVIDERS.identity_api.create_user(new_user) new_group = unit.new_group_ref( domain_id=CONF.identity.default_domain_id ) new_group = PROVIDERS.identity_api.create_group(new_group) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.check_user_in_group, uuid.uuid4().hex, new_group['id'], ) self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.check_user_in_group, new_user['id'], uuid.uuid4().hex, ) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.check_user_in_group, uuid.uuid4().hex, uuid.uuid4().hex, ) def test_list_users_in_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) # Make sure we get an empty list back on a new group, not an error. user_refs = PROVIDERS.identity_api.list_users_in_group(new_group['id']) self.assertEqual([], user_refs) # Make sure we get the correct users back once they have been added # to the group. new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) user_refs = PROVIDERS.identity_api.list_users_in_group(new_group['id']) found = False for x in user_refs: if x['id'] == new_user['id']: found = True self.assertNotIn('password', x) self.assertTrue(found) def test_list_users_in_group_returns_not_found(self): self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.list_users_in_group, uuid.uuid4().hex, ) def test_list_groups_for_user(self): domain = self._get_domain_fixture() test_groups = [] test_users = [] GROUP_COUNT = 3 USER_COUNT = 2 for x in range(0, USER_COUNT): new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) test_users.append(new_user) positive_user = test_users[0] negative_user = test_users[1] for x in range(0, USER_COUNT): group_refs = PROVIDERS.identity_api.list_groups_for_user( test_users[x]['id'] ) self.assertEqual(0, len(group_refs)) for x in range(0, GROUP_COUNT): before_count = x after_count = x + 1 new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) test_groups.append(new_group) # add the user to the group and ensure that the # group count increases by one for each group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(before_count, len(group_refs)) PROVIDERS.identity_api.add_user_to_group( positive_user['id'], new_group['id'] ) group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(after_count, len(group_refs)) # Make sure the group count for the unrelated user did not change group_refs = PROVIDERS.identity_api.list_groups_for_user( negative_user['id'] ) self.assertEqual(0, len(group_refs)) def test_remove_user_from_group(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) groups = PROVIDERS.identity_api.list_groups_for_user(new_user['id']) self.assertIn(new_group['id'], [x['id'] for x in groups]) PROVIDERS.identity_api.remove_user_from_group( new_user['id'], new_group['id'] ) groups = PROVIDERS.identity_api.list_groups_for_user(new_user['id']) self.assertNotIn(new_group['id'], [x['id'] for x in groups]) def test_remove_user_from_group_returns_not_found(self): domain = self._get_domain_fixture() new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.remove_user_from_group, new_user['id'], uuid.uuid4().hex, ) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.remove_user_from_group, uuid.uuid4().hex, new_group['id'], ) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.remove_user_from_group, uuid.uuid4().hex, uuid.uuid4().hex, ) def test_group_crud(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) group = unit.new_group_ref(domain_id=domain['id']) group = PROVIDERS.identity_api.create_group(group) group_ref = PROVIDERS.identity_api.get_group(group['id']) self.assertLessEqual(group.items(), group_ref.items()) group['name'] = uuid.uuid4().hex PROVIDERS.identity_api.update_group(group['id'], group) group_ref = PROVIDERS.identity_api.get_group(group['id']) self.assertLessEqual(group.items(), group_ref.items()) PROVIDERS.identity_api.delete_group(group['id']) self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.get_group, group['id'], ) def test_create_group_name_with_trailing_whitespace(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_name = group['name'] = group['name'] + ' ' group_returned = PROVIDERS.identity_api.create_group(group) self.assertEqual(group_returned['name'], group_name.strip()) def test_update_group_name_with_trailing_whitespace(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_create = PROVIDERS.identity_api.create_group(group) group_name = group['name'] = group['name'] + ' ' group_update = PROVIDERS.identity_api.update_group( group_create['id'], group ) self.assertEqual(group_update['id'], group_create['id']) self.assertEqual(group_update['name'], group_name.strip()) def test_get_group_by_name(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_name = group['name'] group = PROVIDERS.identity_api.create_group(group) spoiler = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.identity_api.create_group(spoiler) group_ref = PROVIDERS.identity_api.get_group_by_name( group_name, CONF.identity.default_domain_id ) self.assertDictEqual(group, group_ref) def test_get_group_by_name_returns_not_found(self): self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.get_group_by_name, uuid.uuid4().hex, CONF.identity.default_domain_id, ) @unit.skip_if_cache_disabled('identity') def test_cache_layer_group_crud(self): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) # cache the result group_ref = PROVIDERS.identity_api.get_group(group['id']) # delete the group bypassing identity api. domain_id, driver, entity_id = ( PROVIDERS.identity_api._get_domain_driver_and_entity_id( group['id'] ) ) driver.delete_group(entity_id) self.assertEqual( group_ref, PROVIDERS.identity_api.get_group(group['id']) ) PROVIDERS.identity_api.get_group.invalidate( PROVIDERS.identity_api, group['id'] ) self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.get_group, group['id'], ) group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) # cache the result PROVIDERS.identity_api.get_group(group['id']) group['name'] = uuid.uuid4().hex group_ref = PROVIDERS.identity_api.update_group(group['id'], group) # after updating through identity api, get updated group self.assertLessEqual( PROVIDERS.identity_api.get_group(group['id']).items(), group_ref.items(), ) def test_create_duplicate_group_name_fails(self): group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group2 = unit.new_group_ref( domain_id=CONF.identity.default_domain_id, name=group1['name'] ) group1 = PROVIDERS.identity_api.create_group(group1) self.assertRaises( exception.Conflict, PROVIDERS.identity_api.create_group, group2 ) def test_create_duplicate_group_name_in_different_domains(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) group1 = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group2 = unit.new_group_ref( domain_id=new_domain['id'], name=group1['name'] ) group1 = PROVIDERS.identity_api.create_group(group1) group2 = PROVIDERS.identity_api.create_group(group2) def test_move_group_between_domains(self): domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) group = unit.new_group_ref(domain_id=domain1['id']) group = PROVIDERS.identity_api.create_group(group) group['domain_id'] = domain2['id'] self.assertRaises( exception.ValidationError, PROVIDERS.identity_api.update_group, group['id'], group, ) def test_user_crud(self): user_dict = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) del user_dict['id'] user = PROVIDERS.identity_api.create_user(user_dict) user_ref = PROVIDERS.identity_api.get_user(user['id']) del user_dict['password'] user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertLessEqual(user_dict.items(), user_ref_dict.items()) user_dict['password'] = uuid.uuid4().hex PROVIDERS.identity_api.update_user(user['id'], user_dict) user_ref = PROVIDERS.identity_api.get_user(user['id']) del user_dict['password'] user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertLessEqual(user_dict.items(), user_ref_dict.items()) PROVIDERS.identity_api.delete_user(user['id']) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, user['id'] ) def test_arbitrary_attributes_are_returned_from_create_user(self): attr_value = uuid.uuid4().hex user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, arbitrary_attr=attr_value, ) user = PROVIDERS.identity_api.create_user(user_data) self.assertEqual(attr_value, user['arbitrary_attr']) def test_arbitrary_attributes_are_returned_from_get_user(self): attr_value = uuid.uuid4().hex user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, arbitrary_attr=attr_value, ) user_data = PROVIDERS.identity_api.create_user(user_data) user = PROVIDERS.identity_api.get_user(user_data['id']) self.assertEqual(attr_value, user['arbitrary_attr']) def test_new_arbitrary_attributes_are_returned_from_update_user(self): user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) user = PROVIDERS.identity_api.create_user(user_data) attr_value = uuid.uuid4().hex user['arbitrary_attr'] = attr_value updated_user = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual(attr_value, updated_user['arbitrary_attr']) def test_updated_arbitrary_attributes_are_returned_from_update_user(self): attr_value = uuid.uuid4().hex user_data = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, arbitrary_attr=attr_value, ) new_attr_value = uuid.uuid4().hex user = PROVIDERS.identity_api.create_user(user_data) user['arbitrary_attr'] = new_attr_value updated_user = PROVIDERS.identity_api.update_user(user['id'], user) self.assertEqual(new_attr_value, updated_user['arbitrary_attr']) def test_user_update_and_user_get_return_same_response(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) updated_user = {'enabled': False} updated_user_ref = PROVIDERS.identity_api.update_user( user['id'], updated_user ) # SQL backend adds 'extra' field updated_user_ref.pop('extra', None) self.assertIs(False, updated_user_ref['enabled']) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertDictEqual(updated_user_ref, user_ref) @unit.skip_if_no_multiple_domains_support def test_list_domains_filtered_and_limited(self): # The test is designed for multiple domains only def create_domains(domain_count, domain_name_prefix): for _ in range(domain_count): domain_name = '{}-{}'.format( domain_name_prefix, uuid.uuid4().hex ) domain = unit.new_domain_ref(name=domain_name) self.domain_list[domain_name] = ( PROVIDERS.resource_api.create_domain(domain['id'], domain) ) def clean_up_domains(): for _, domain in self.domain_list.items(): domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) PROVIDERS.resource_api.delete_domain(domain['id']) self.domain_list = {} create_domains(2, 'domaingroup1') create_domains(3, 'domaingroup2') self.addCleanup(clean_up_domains) unfiltered_domains = PROVIDERS.resource_api.list_domains() # Should get back just 4 entities self.config_fixture.config(list_limit=4) hints = driver_hints.Hints() entities = PROVIDERS.resource_api.list_domains(hints=hints) self.assertThat(entities, matchers.HasLength(hints.limit['limit'])) self.assertTrue(hints.limit['truncated']) # Get one exact item from the list hints = driver_hints.Hints() hints.add_filter('name', unfiltered_domains[3]['name']) entities = PROVIDERS.resource_api.list_domains(hints=hints) self.assertThat(entities, matchers.HasLength(1)) self.assertEqual(entities[0], unfiltered_domains[3]) # Get 2 entries hints = driver_hints.Hints() hints.add_filter('name', 'domaingroup1', comparator='startswith') entities = PROVIDERS.resource_api.list_domains(hints=hints) self.assertThat(entities, matchers.HasLength(2)) self.assertThat( entities[0]['name'], matchers.StartsWith('domaingroup1') ) self.assertThat( entities[1]['name'], matchers.StartsWith('domaingroup1') ) @unit.skip_if_no_multiple_domains_support def test_list_limit_for_domains(self): def create_domains(count): for _ in range(count): domain = unit.new_domain_ref() self.domain_list.append( PROVIDERS.resource_api.create_domain(domain['id'], domain) ) def clean_up_domains(): for domain in self.domain_list: PROVIDERS.resource_api.update_domain( domain['id'], {'enabled': False} ) PROVIDERS.resource_api.delete_domain(domain['id']) self.domain_list = [] create_domains(6) self.addCleanup(clean_up_domains) for x in range(1, 7): self.config_fixture.config(group='resource', list_limit=x) hints = driver_hints.Hints() entities = PROVIDERS.resource_api.list_domains(hints=hints) self.assertThat(entities, matchers.HasLength(hints.limit['limit'])) class FilterTests(filtering.FilterTests): def test_list_entities_filtered(self): for entity in ['user', 'group', 'project']: # Create 20 entities entity_list = self._create_test_data(entity, 20) # Try filtering to get one an exact item out of the list hints = driver_hints.Hints() hints.add_filter('name', entity_list[10]['name']) entities = self._list_entities(entity)(hints=hints) self.assertEqual(1, len(entities)) self.assertEqual(entity_list[10]['id'], entities[0]['id']) # Check the driver has removed the filter from the list hints self.assertFalse(hints.get_exact_filter_by_name('name')) self._delete_test_data(entity, entity_list) def test_list_users_inexact_filtered(self): # Create 20 users, some with specific names. We set the names at create # time (rather than updating them), since the LDAP driver does not # support name updates. user_name_data = { # user index: name for user 5: 'The', 6: 'The Ministry', 7: 'The Ministry of', 8: 'The Ministry of Silly', 9: 'The Ministry of Silly Walks', # ...and one for useful case insensitivity testing 10: 'The ministry of silly walks OF', } user_list = self._create_test_data( 'user', 20, domain_id=CONF.identity.default_domain_id, name_dict=user_name_data, ) hints = driver_hints.Hints() hints.add_filter('name', 'ministry', comparator='contains') users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(5, len(users)) self._match_with_list(users, user_list, list_start=6, list_end=11) # TODO(henry-nash) Check inexact filter has been removed. hints = driver_hints.Hints() hints.add_filter('name', 'The', comparator='startswith') users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(6, len(users)) self._match_with_list(users, user_list, list_start=5, list_end=11) # TODO(henry-nash) Check inexact filter has been removed. hints = driver_hints.Hints() hints.add_filter('name', 'of', comparator='endswith') users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(2, len(users)) # We can't assume we will get back the users in any particular order self.assertIn(user_list[7]['id'], [users[0]['id'], users[1]['id']]) self.assertIn(user_list[10]['id'], [users[0]['id'], users[1]['id']]) # TODO(henry-nash) Check inexact filter has been removed. # TODO(henry-nash): Add some case sensitive tests. However, # these would be hard to validate currently, since: # # For SQL, the issue is that MySQL 0.7, by default, is installed in # case insensitive mode (which is what is run by default for our # SQL backend tests). For production deployments. OpenStack # assumes a case sensitive database. For these tests, therefore, we # need to be able to check the sensitivity of the database so as to # know whether to run case sensitive tests here. # # For LDAP/AD, although dependent on the schema being used, attributes # are typically configured to be case aware, but not case sensitive. self._delete_test_data('user', user_list) def _groups_for_user_data(self): number_of_groups = 10 group_name_data = { # entity index: name for entity 5: 'The', 6: 'The Ministry', 9: 'The Ministry of Silly Walks', } group_list = self._create_test_data( 'group', number_of_groups, domain_id=CONF.identity.default_domain_id, name_dict=group_name_data, ) user_list = self._create_test_data('user', 2) for group in range(7): # Create membership, including with two out of the three groups # with well know names PROVIDERS.identity_api.add_user_to_group( user_list[0]['id'], group_list[group]['id'] ) # ...and some spoiler memberships for group in range(7, number_of_groups): PROVIDERS.identity_api.add_user_to_group( user_list[1]['id'], group_list[group]['id'] ) return group_list, user_list def test_groups_for_user_inexact_filtered(self): """Test use of filtering doesn't break groups_for_user listing. Some backends may use filtering to achieve the list of groups for a user, so test that it can combine a second filter. Test Plan: - Create 10 groups, some with names we can filter on - Create 2 users - Assign 1 of those users to most of the groups, including some of the well known named ones - Assign the other user to other groups as spoilers - Ensure that when we list groups for users with a filter on the group name, both restrictions have been enforced on what is returned. """ group_list, user_list = self._groups_for_user_data() hints = driver_hints.Hints() hints.add_filter('name', 'Ministry', comparator='contains') groups = PROVIDERS.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints ) # We should only get back one group, since of the two that contain # 'Ministry' the user only belongs to one. self.assertThat(len(groups), matchers.Equals(1)) self.assertEqual(group_list[6]['id'], groups[0]['id']) hints = driver_hints.Hints() hints.add_filter('name', 'The', comparator='startswith') groups = PROVIDERS.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints ) # We should only get back 2 out of the 3 groups that start with 'The' # hence showing that both "filters" have been applied self.assertThat(len(groups), matchers.Equals(2)) self.assertIn(group_list[5]['id'], [groups[0]['id'], groups[1]['id']]) self.assertIn(group_list[6]['id'], [groups[0]['id'], groups[1]['id']]) hints.add_filter('name', 'The', comparator='endswith') groups = PROVIDERS.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints ) # We should only get back one group since it is the only one that # ends with 'The' self.assertThat(len(groups), matchers.Equals(1)) self.assertEqual(group_list[5]['id'], groups[0]['id']) self._delete_test_data('user', user_list) self._delete_test_data('group', group_list) def test_groups_for_user_exact_filtered(self): """Test exact filters doesn't break groups_for_user listing.""" group_list, user_list = self._groups_for_user_data() hints = driver_hints.Hints() hints.add_filter('name', 'The Ministry', comparator='equals') groups = PROVIDERS.identity_api.list_groups_for_user( user_list[0]['id'], hints=hints ) # We should only get back 1 out of the 3 groups with name 'The # Ministry' hence showing that both "filters" have been applied. self.assertEqual(1, len(groups)) self.assertEqual(group_list[6]['id'], groups[0]['id']) self._delete_test_data('user', user_list) self._delete_test_data('group', group_list) def _get_user_name_field_size(self): """Return the size of the user name field for the backend. Subclasses can override this method to indicate that the user name field is limited in length. The user name is the field used in the test that validates that a filter value works even if it's longer than a field. If the backend doesn't limit the value length then return None. """ return None def test_filter_value_wider_than_field(self): # If a filter value is given that's larger than the field in the # backend then no values are returned. user_name_field_size = self._get_user_name_field_size() if user_name_field_size is None: # The backend doesn't limit the size of the user name, so pass this # test. return # Create some users just to make sure would return something if the # filter was ignored. self._create_test_data('user', 2) hints = driver_hints.Hints() value = 'A' * (user_name_field_size + 1) hints.add_filter('name', value) users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual([], users) def _list_users_in_group_data(self): number_of_users = 10 user_name_data = { 1: 'Arthur Conan Doyle', 3: 'Arthur Rimbaud', 9: 'Arthur Schopenhauer', } user_list = self._create_test_data( 'user', number_of_users, domain_id=CONF.identity.default_domain_id, name_dict=user_name_data, ) group = self._create_one_entity( 'group', CONF.identity.default_domain_id, 'Great Writers' ) for i in range(7): PROVIDERS.identity_api.add_user_to_group( user_list[i]['id'], group['id'] ) return user_list, group def test_list_users_in_group_inexact_filtered(self): user_list, group = self._list_users_in_group_data() hints = driver_hints.Hints() hints.add_filter('name', 'Arthur', comparator='contains') users = PROVIDERS.identity_api.list_users_in_group( group['id'], hints=hints ) self.assertThat(len(users), matchers.Equals(2)) self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']]) self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']]) hints = driver_hints.Hints() hints.add_filter('name', 'Arthur', comparator='startswith') users = PROVIDERS.identity_api.list_users_in_group( group['id'], hints=hints ) self.assertThat(len(users), matchers.Equals(2)) self.assertIn(user_list[1]['id'], [users[0]['id'], users[1]['id']]) self.assertIn(user_list[3]['id'], [users[0]['id'], users[1]['id']]) hints = driver_hints.Hints() hints.add_filter('name', 'Doyle', comparator='endswith') users = PROVIDERS.identity_api.list_users_in_group( group['id'], hints=hints ) self.assertThat(len(users), matchers.Equals(1)) self.assertEqual(user_list[1]['id'], users[0]['id']) self._delete_test_data('user', user_list) self._delete_entity('group')(group['id']) def test_list_users_in_group_exact_filtered(self): hints = driver_hints.Hints() user_list, group = self._list_users_in_group_data() hints.add_filter('name', 'Arthur Rimbaud', comparator='equals') users = PROVIDERS.identity_api.list_users_in_group( group['id'], hints=hints ) self.assertEqual(1, len(users)) self.assertEqual(user_list[3]['id'], users[0]['id']) self._delete_test_data('user', user_list) self._delete_entity('group')(group['id']) class LimitTests(filtering.FilterTests): ENTITIES = ['user', 'group', 'project'] def setUp(self): """Setup for Limit Test Cases.""" self.entity_lists = {} for entity in self.ENTITIES: # Create 20 entities self.entity_lists[entity] = self._create_test_data(entity, 20) self.addCleanup(self.clean_up_entities) def clean_up_entities(self): """Clean up entity test data from Limit Test Cases.""" for entity in self.ENTITIES: self._delete_test_data(entity, self.entity_lists[entity]) del self.entity_lists def _test_list_entity_filtered_and_limited(self, entity): self.config_fixture.config(list_limit=10) # Should get back just 10 entities hints = driver_hints.Hints() entities = self._list_entities(entity)(hints=hints) self.assertEqual(hints.limit['limit'], len(entities)) self.assertTrue(hints.limit['truncated']) # Override with driver specific limit if entity == 'project': self.config_fixture.config(group='resource', list_limit=5) else: self.config_fixture.config(group='identity', list_limit=5) # Should get back just 5 users hints = driver_hints.Hints() entities = self._list_entities(entity)(hints=hints) self.assertEqual(hints.limit['limit'], len(entities)) # Finally, let's pretend we want to get the full list of entities, # even with the limits set, as part of some internal calculation. # Calling the API without a hints list should achieve this, and # return at least the 20 entries we created (there may be other # entities lying around created by other tests/setup). entities = self._list_entities(entity)() self.assertGreaterEqual(len(entities), 20) self._match_with_list(self.entity_lists[entity], entities) def test_list_users_filtered_and_limited(self): self._test_list_entity_filtered_and_limited('user') def test_list_groups_filtered_and_limited(self): self._test_list_entity_filtered_and_limited('group') def test_list_projects_filtered_and_limited(self): self._test_list_entity_filtered_and_limited('project') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity/test_core.py0000664000175000017500000002312500000000000023642 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for core identity behavior.""" import itertools import os from unittest import mock import uuid import fixtures from oslo_config import fixture as config_fixture import stevedore from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import identity from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.identity.backends import fake_driver from keystone.tests.unit.ksfixtures import database CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestDomainConfigs(unit.BaseTestCase): def setUp(self): super().setUp() self.addCleanup(CONF.reset) self.tmp_dir = unit.dirs.tmp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config( domain_config_dir=self.tmp_dir, group='identity' ) def test_config_for_nonexistent_domain(self): """Having a config for a non-existent domain will be ignored. There are no assertions in this test because there are no side effects. If there is a config file for a domain that does not exist it should be ignored. """ domain_id = uuid.uuid4().hex domain_config_filename = os.path.join( self.tmp_dir, 'keystone.%s.conf' % domain_id ) self.addCleanup(lambda: os.remove(domain_config_filename)) with open(domain_config_filename, 'w'): """Write an empty config file.""" e = exception.DomainNotFound(domain_id=domain_id) mock_assignment_api = mock.Mock() mock_assignment_api.get_domain_by_name.side_effect = e domain_config = identity.DomainConfigs() fake_standard_driver = None domain_config.setup_domain_drivers( fake_standard_driver, mock_assignment_api ) def test_config_for_dot_name_domain(self): # Ensure we can get the right domain name which has dots within it # from filename. domain_config_filename = os.path.join( self.tmp_dir, 'keystone.abc.def.com.conf' ) with open(domain_config_filename, 'w'): """Write an empty config file.""" self.addCleanup(os.remove, domain_config_filename) with mock.patch.object( identity.DomainConfigs, '_load_config_from_file' ) as mock_load_config: domain_config = identity.DomainConfigs() fake_assignment_api = None fake_standard_driver = None domain_config.setup_domain_drivers( fake_standard_driver, fake_assignment_api ) mock_load_config.assert_called_once_with( fake_assignment_api, [domain_config_filename], 'abc.def.com' ) def test_config_for_multiple_sql_backend(self): domains_config = identity.DomainConfigs() # Create the right sequence of is_sql in the drivers being # requested to expose the bug, which is that a False setting # means it forgets previous True settings. drivers = [] files = [] for idx, is_sql in enumerate((True, False, True)): drv = mock.Mock(is_sql=is_sql) drivers.append(drv) name = f'dummy.{idx}' files.append( ''.join( ( identity.DOMAIN_CONF_FHEAD, name, identity.DOMAIN_CONF_FTAIL, ) ) ) def walk_fake(*a, **kwa): return (('/fake/keystone/domains/config', [], files),) generic_driver = mock.Mock(is_sql=False) assignment_api = mock.Mock() id_factory = itertools.count() assignment_api.get_domain_by_name.side_effect = lambda name: { 'id': next(id_factory), '_': 'fake_domain', } load_driver_mock = mock.Mock(side_effect=drivers) with mock.patch.object(os, 'walk', walk_fake): with mock.patch.object(identity.cfg, 'ConfigOpts'): with mock.patch.object( domains_config, '_load_driver', load_driver_mock ): self.assertRaises( exception.MultipleSQLDriversInConfig, domains_config.setup_domain_drivers, generic_driver, assignment_api, ) self.assertEqual(3, load_driver_mock.call_count) class TestDatabaseDomainConfigs(unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) def test_domain_config_in_database_disabled_by_default(self): self.assertFalse(CONF.identity.domain_configurations_from_database) def test_loading_config_from_database(self): self.config_fixture.config( domain_configurations_from_database=True, group='identity' ) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Override two config options for our domain conf = { 'ldap': { 'url': uuid.uuid4().hex, 'suffix': uuid.uuid4().hex, 'use_tls': True, }, 'identity': {'driver': 'ldap'}, } PROVIDERS.domain_config_api.create_config(domain['id'], conf) fake_standard_driver = None domain_config = identity.DomainConfigs() domain_config.setup_domain_drivers( fake_standard_driver, PROVIDERS.resource_api ) # Make sure our two overrides are in place, and others are not affected res = domain_config.get_domain_conf(domain['id']) self.assertEqual(conf['ldap']['url'], res.ldap.url) self.assertEqual(conf['ldap']['suffix'], res.ldap.suffix) self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope) # Make sure the override is not changing the type of the config value use_tls_type = type(CONF.ldap.use_tls) self.assertEqual( use_tls_type(conf['ldap']['use_tls']), res.ldap.use_tls ) # Now turn off using database domain configuration and check that the # default config file values are now seen instead of the overrides. self.config_fixture.config( group='identity', domain_configurations_from_database=False ) domain_config = identity.DomainConfigs() domain_config.setup_domain_drivers( fake_standard_driver, PROVIDERS.resource_api ) res = domain_config.get_domain_conf(domain['id']) self.assertEqual(CONF.ldap.url, res.ldap.url) self.assertEqual(CONF.ldap.suffix, res.ldap.suffix) self.assertEqual(CONF.ldap.use_tls, res.ldap.use_tls) self.assertEqual(CONF.ldap.query_scope, res.ldap.query_scope) def test_loading_config_from_database_out_of_tree(self): # Test domain config loading for out-of-tree driver supporting own # config options # Prepare fake driver extension = stevedore.extension.Extension( name="foo", entry_point=None, obj=fake_driver.FooDriver(), plugin=None, ) fake_driver_manager = stevedore.DriverManager.make_test_instance( extension, namespace="keystone.identity" ) # replace DriverManager with a patched test instance self.useFixture( fixtures.MockPatchObject( stevedore, "DriverManager", return_value=fake_driver_manager ) ).mock self.config_fixture.config( domain_configurations_from_database=True, group="identity" ) self.config_fixture.config( additional_whitelisted_options={"foo": ["opt1"]}, group="domain_config", ) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain["id"], domain) # Override two config options for our domain conf = { "foo": {"opt1": uuid.uuid4().hex}, "identity": {"driver": "foo"}, } PROVIDERS.domain_config_api.create_config(domain["id"], conf) domain_config = identity.DomainConfigs() domain_config.setup_domain_drivers("foo", PROVIDERS.resource_api) # Make sure our two overrides are in place, and others are not affected res = domain_config.get_domain_conf(domain["id"]) self.assertEqual(conf["foo"]["opt1"], res.foo.opt1) # Reset whitelisted options in the provider directly. Due to the fact # that there are too many singletons used around the code basis there # is a chance of clash when other API domain_config tests are being # executed by the same process. It is NOT ENOUGH just to invoke reset # on fixtures. PROVIDERS.domain_config_api.whitelisted_options.pop("foo", None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/identity_mapping.py0000664000175000017500000000154400000000000023367 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystone.common import sql from keystone.identity.mapping_backends import sql as mapping_sql def list_id_mappings(): """List all id_mappings for testing purposes.""" with sql.session_for_read() as session: refs = session.query(mapping_sql.IDMapping).all() return [x.to_dict() for x in refs] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.566113 keystone-26.0.0/keystone/tests/unit/ksfixtures/0000775000175000017500000000000000000000000021654 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/__init__.py0000664000175000017500000000222400000000000023765 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystone.tests.unit.ksfixtures.auth_plugins import ( ConfigAuthPlugins, ) # noqa from keystone.tests.unit.ksfixtures.jws_key_repository import ( JWSKeyRepository, ) # noqa from keystone.tests.unit.ksfixtures.backendloader import BackendLoader # noqa from keystone.tests.unit.ksfixtures.cache import Cache # noqa from keystone.tests.unit.ksfixtures.key_repository import KeyRepository # noqa from keystone.tests.unit.ksfixtures.logging import StandardLogging # noqa from keystone.tests.unit.ksfixtures.policy import Policy # noqa from keystone.tests.unit.ksfixtures.warnings import WarningsFixture # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/auth_plugins.py0000664000175000017500000000443500000000000024736 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone import auth import keystone.conf class ConfigAuthPlugins(fixtures.Fixture): """A fixture for setting up and tearing down a auth plugins.""" def __init__(self, config_fixture, methods, **method_classes): super().__init__() self.methods = methods self.config_fixture = config_fixture self.method_classes = method_classes def setUp(self): super().setUp() if self.methods: self.config_fixture.config(group='auth', methods=self.methods) keystone.conf.auth.setup_authentication() if self.method_classes: self.config_fixture.config(group='auth', **self.method_classes) class LoadAuthPlugins(fixtures.Fixture): def __init__(self, *method_names): super().__init__() self.method_names = method_names # NOTE(dstanek): This fixture will load the requested auth # methods as part of its setup. We need to save any existing # plugins so that we can restore them in the cleanup. self.saved = {} def setUp(self): super().setUp() AUTH_METHODS = auth.core.AUTH_METHODS for method_name in self.method_names: if method_name in AUTH_METHODS: self.saved[method_name] = AUTH_METHODS[method_name] AUTH_METHODS[method_name] = auth.core.load_auth_method(method_name) auth.core.AUTH_PLUGINS_LOADED = True def cleanUp(self): AUTH_METHODS = auth.core.AUTH_METHODS for method_name in list(AUTH_METHODS): if method_name in self.saved: AUTH_METHODS[method_name] = self.saved[method_name] else: del AUTH_METHODS[method_name] auth.core.AUTH_PLUGINS_LOADED = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/backendloader.py0000664000175000017500000000250100000000000025002 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone import auth import keystone.server class BackendLoader(fixtures.Fixture): """Initialize each manager and assigns them to an attribute.""" def __init__(self, testcase): super().__init__() self._testcase = testcase def setUp(self): super().setUp() self.clear_auth_plugin_registry() drivers, _unused = keystone.server.setup_backends() for manager_name, manager in drivers.items(): setattr(self._testcase, manager_name, manager) self.addCleanup(self._testcase.cleanup_instance(*list(drivers.keys()))) del self._testcase # break circular reference def clear_auth_plugin_registry(self): auth.core.AUTH_METHODS.clear() auth.core.AUTH_PLUGINS_LOADED = False ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/cache.py0000664000175000017500000000306300000000000023273 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone import catalog from keystone.common import cache from keystone import revoke CACHE_REGIONS = ( cache.CACHE_REGION, catalog.COMPUTED_CATALOG_REGION, revoke.REVOKE_REGION, ) class Cache(fixtures.Fixture): """A fixture for setting up the cache between test cases. This will also tear down an existing cache if one is already configured. """ def setUp(self): super().setUp() # NOTE(dstanek): We must remove the existing cache backend in the # setUp instead of the tearDown because it defaults to a no-op cache # and we want the configure call below to create the correct backend. # NOTE(morganfainberg): The only way to reconfigure the CacheRegion # object on each setUp() call is to remove the .backend property. for region in CACHE_REGIONS: if region.is_configured: del region.backend # ensure the cache region instance is setup cache.configure_cache(region=region) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/database.py0000664000175000017500000001051500000000000023774 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import os import fixtures from oslo_db import options as db_options from oslo_db.sqlalchemy import enginefacade from keystone.common import sql import keystone.conf from keystone.tests import unit CONF = keystone.conf.CONF def run_once(f): """A decorator to ensure the decorated function is only executed once. The decorated function is assumed to have a one parameter. """ @functools.wraps(f) def wrapper(): if not wrapper.already_ran: f() wrapper.already_ran = True wrapper.already_ran = False return wrapper # NOTE(I159): Every execution all the options will be cleared. The method must # be called at the every fixture initialization. def initialize_sql_session( connection_str=unit.IN_MEM_DB_CONN_STRING, enforce_sqlite_fks=True ): # Make sure the DB is located in the correct location, in this case set # the default value, as this should be able to be overridden in some # test cases. db_options.set_defaults(CONF, connection=connection_str) # Enable the Sqlite FKs for global engine by default. facade = enginefacade.writer engine = facade.get_engine() f_key = 'ON' if enforce_sqlite_fks else 'OFF' if engine.name == 'sqlite': engine.connect().exec_driver_sql('PRAGMA foreign_keys = ' + f_key) @run_once def _load_sqlalchemy_models(): """Find all modules containing SQLAlchemy models and import them. This creates more consistent, deterministic test runs because tables for all core and extension models are always created in the test database. We ensure this by importing all modules that contain model definitions. The database schema during test runs is created using reflection. Reflection is simply SQLAlchemy taking the model definitions for all models currently imported and making tables for each of them. The database schema created during test runs may vary between tests as more models are imported. Importing all models at the start of the test run avoids this problem. """ keystone_root = os.path.normpath( os.path.join(os.path.dirname(__file__), '..', '..', '..') ) for root, dirs, files in os.walk(keystone_root): # NOTE(morganfainberg): Slice the keystone_root off the root to ensure # we do not end up with a module name like: # Users.home.openstack.keystone.assignment.backends.sql root = root[len(keystone_root) :] if root.endswith('backends') and 'sql.py' in files: # The root will be prefixed with an instance of os.sep, which will # make the root after replacement '.', the 'keystone' part # of the module path is always added to the front module_root = 'keystone.%s' % root.replace(os.sep, '.').lstrip('.') module_components = module_root.split('.') module_without_backends = '' for x in range(0, len(module_components) - 1): module_without_backends += module_components[x] + '.' module_without_backends = module_without_backends.rstrip('.') module_name = module_root + '.sql' __import__(module_name) class Database(fixtures.Fixture): """A fixture for setting up and tearing down a database.""" def __init__(self): super().__init__() initialize_sql_session() _load_sqlalchemy_models() sql.enable_sqlite_foreign_key() def setUp(self): super().setUp() with sql.session_for_write() as session: self.engine = session.get_bind() self.addCleanup(sql.cleanup) sql.ModelBase.metadata.create_all(bind=self.engine) self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine) def recreate(self): sql.ModelBase.metadata.create_all(bind=self.engine) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/hacking.py0000664000175000017500000002427200000000000023641 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morganfainberg) This file shouldn't have flake8 run on it as it has # code examples that will fail normal CI pep8/flake8 tests. This is expected. # The code has been moved here to ensure that proper tests occur on the # test_hacking_checks test cases. # flake8: noqa import sys import fixtures class HackingCode(fixtures.Fixture): """A fixture to house the various code examples for the keystone hacking style checks. """ mutable_default_args = { 'code': """ def f(): pass def f(a, b='', c=None): pass def f(bad=[]): pass def f(foo, bad=[], more_bad=[x for x in range(3)]): pass def f(foo, bad={}): pass def f(foo, bad={}, another_bad=[], fine=None): pass def f(bad=[]): # noqa pass def funcs(bad=dict(), more_bad=list(), even_more_bad=set()): "creating mutables through builtins" def funcs(bad=something(), more_bad=some_object.something()): "defaults from any functions" def f(bad=set(), more_bad={x for x in range(3)}, even_more_bad={1, 2, 3}): "set and set comprehession" def f(bad={x: x for x in range(3)}): "dict comprehension" """, 'expected_errors': [ (7, 10, 'K001'), (10, 15, 'K001'), (10, 28, 'K001'), (13, 15, 'K001'), (16, 15, 'K001'), (16, 31, 'K001'), (22, 14, 'K001'), (22, 31, 'K001'), (22, 53, 'K001'), (25, 14, 'K001'), (25, 36, 'K001'), (28, 10, 'K001'), (28, 26, 'K001'), (29, 21, 'K001'), (32, 10, 'K001'), ], } # NOTE(browne): This is gross, but in Python 3.4 and earlier, the ast # module returns the incorrect col_offset for two of the defined functions # in the code sample above. if sys.version_info < (3, 5): mutable_default_args['expected_errors'][12] = (28, 27, 'K001') mutable_default_args['expected_errors'][14] = (32, 11, 'K001') # NOTE(gmann): Python version < 3.8.2 (before Ubuntu Focal) returns # the incorrect col_offset for below function defined in above code sample # def f(foo, bad=[], more_bad=[x for x in range(3)]): if sys.version_info < (3, 8, 2): mutable_default_args['expected_errors'][2] = (10, 29, 'K001') # type: ignore[index] comments_begin_with_space = { 'code': """ # This is a good comment #This is a bad one # This is alright and can # be continued with extra indentation # if that's what the developer wants. """, 'expected_errors': [ (3, 0, 'K002'), ], } asserting_none_equality = { 'code': """ class Test(object): def test(self): self.assertEqual('', '') self.assertEqual('', None) self.assertEqual(None, '') self.assertNotEqual('', None) self.assertNotEqual(None, '') self.assertNotEqual('', None) # noqa self.assertNotEqual(None, '') # noqa """, 'expected_errors': [ (5, 8, 'K003'), (6, 8, 'K003'), (7, 8, 'K004'), (8, 8, 'K004'), ], } dict_constructor = { 'code': """ lower_res = {k.lower(): v for k, v in res[1].items()} fool = dict(a='a', b='b') lower_res = dict((k.lower(), v) for k, v in res[1].items()) attrs = dict([(k, _from_json(v))]) dict([[i,i] for i in range(3)]) dict(({1:2})) """, 'expected_errors': [ (3, 0, 'K008'), (4, 0, 'K008'), (5, 0, 'K008'), ], } class HackingTranslations(fixtures.Fixture): """Fixtures for checking translation rules. 1. Exception messages should be translated 2. Logging messages should not be translated 3. If a message is used for both an exception and logging it should be translated """ shared_imports = """ import logging import logging as stlib_logging from keystone.i18n import _ from keystone.i18n import _ as oslo_i18n from oslo_log import log from oslo_log import log as oslo_logging """ examples = [ { 'code': """ # stdlib logging LOG = logging.getLogger() LOG.info(_('text')) class C: def __init__(self): LOG.warning(oslo_i18n('text', {})) """, 'expected_errors': [ (3, 9, 'K005'), (6, 20, 'K005'), ], }, { 'code': """ # stdlib logging w/ alias and specifying a logger class C: def __init__(self): self.L = logging.getLogger(__name__) def m(self): self.L.warning( _('text'), {} ) """, 'expected_errors': [ (7, 12, 'K005'), ], }, { 'code': """ # oslo logging and specifying a logger L = log.getLogger(__name__) L.error(oslo_i18n('text')) """, 'expected_errors': [ (3, 8, 'K005'), ], }, { 'code': """ # oslo logging w/ alias class C: def __init__(self): self.LOG = oslo_logging.getLogger() self.LOG.critical(_('text')) """, 'expected_errors': [ (5, 26, 'K005'), ], }, { 'code': """ LOG = log.getLogger(__name__) # translation on a separate line msg = _('text') LOG.exception(msg) """, 'expected_errors': [ (4, 14, 'K005'), ], }, { 'code': """ # this should be an error even if it'll be raised later. L = log.getLogger(__name__) msg = _('text') L.warning(msg) raise Exception(msg) """, 'expected_errors': [ (4, 10, 'K005'), ], }, { 'code': """ L = log.getLogger(__name__) def f(): msg = _('text') L.warning(msg) something = True # add an extra statement here raise Exception(msg) """, 'expected_errors': [ (4, 14, 'K005'), ], }, { 'code': """ LOG = log.getLogger(__name__) def func(): msg = _('text') LOG.warning(msg) raise Exception('some other message') """, 'expected_errors': [ (4, 16, 'K005'), ], }, { 'code': """ LOG = log.getLogger(__name__) if True: msg = _('text') else: msg = _('text') LOG.warning(msg) raise Exception(msg) """, 'expected_errors': [ (6, 12, 'K005'), ], }, { 'code': """ LOG = log.getLogger(__name__) if True: msg = _('text') else: msg = _('text') LOG.warning(msg) """, 'expected_errors': [ (6, 12, 'K005'), ], }, { 'code': """ LOG = log.getLogger(__name__) msg = _LW('text') LOG.warning(msg) msg = _('something else') raise Exception(msg) """, 'expected_errors': [], }, { 'code': """ LOG = log.getLogger(__name__) msg = _('hello %s') % 'world' LOG.warning(msg) """, 'expected_errors': [ (3, 12, 'K005'), ], }, { 'code': """ # this should not be an error LOG = log.getLogger(__name__) try: something = True except AssertionError as e: LOG.warning(e) raise exception.Unauthorized(e) """, 'expected_errors': [], }, { 'code': """ # this should not be an error LOG = log.getLogger(__name__) try: pass except AssertionError as e: msg = _('some message') LOG.warning(msg) raise exception.Unauthorized(message=msg) """, 'expected_errors': [ (7, 16, 'K005'), ], }, ] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/jws_key_repository.py0000664000175000017500000000351000000000000026177 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from keystone.common import jwt_utils from keystone.common import utils class JWSKeyRepository(fixtures.Fixture): def __init__(self, config_fixture): super().__init__() self.config_fixture = config_fixture self.key_group = 'jwt_tokens' def setUp(self): super().setUp() # grab a couple of temporary directory file paths private_key_directory = self.useFixture(fixtures.TempDir()).path public_key_directory = self.useFixture(fixtures.TempDir()).path # set config to use temporary paths self.config_fixture.config( group=self.key_group, jws_private_key_repository=private_key_directory, ) self.config_fixture.config( group=self.key_group, jws_public_key_repository=public_key_directory, ) # create temporary repositories utils.create_directory(private_key_directory) utils.create_directory(public_key_directory) # create an asymmetric key pair for token signing and validation private_key_path = os.path.join(private_key_directory, 'private.pem') public_key_path = os.path.join(public_key_directory, 'public.pem') jwt_utils.create_jws_keypair(private_key_path, public_key_path) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/key_repository.py0000664000175000017500000000241600000000000025320 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fixtures from keystone.common import fernet_utils as utils class KeyRepository(fixtures.Fixture): def __init__(self, config_fixture, key_group, max_active_keys): super().__init__() self.config_fixture = config_fixture self.max_active_keys = max_active_keys self.key_group = key_group def setUp(self): super().setUp() directory = self.useFixture(fixtures.TempDir()).path self.config_fixture.config( group=self.key_group, key_repository=directory ) fernet_utils = utils.FernetUtils( directory, self.max_active_keys, self.key_group ) fernet_utils.create_key_directory() fernet_utils.initialize_key_repository() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/ldapdb.py0000664000175000017500000000262300000000000023457 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from keystone.identity.backends.ldap import common as common_ldap from keystone.tests.unit import fakeldap class LDAPDatabase(fixtures.Fixture): """A fixture for setting up and tearing down an LDAP database.""" def __init__(self, dbclass=fakeldap.FakeLdap): self._dbclass = dbclass def setUp(self): super().setUp() self.clear() common_ldap.WRITABLE = True common_ldap._HANDLERS.clear() common_ldap.register_handler('fake://', self._dbclass) # TODO(dstanek): switch the flow here self.addCleanup(self.clear) self.addCleanup(common_ldap._HANDLERS.clear) self.addCleanup(self.disable_write) def disable_write(self): common_ldap.WRITABLE = False def clear(self): for shelf in fakeldap.FakeShelves: fakeldap.FakeShelves[shelf].clear() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/logging.py0000664000175000017500000001024400000000000023655 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging as std_logging import os import fixtures class NullHandler(std_logging.Handler): """Custom default NullHandler to attempt to format the record. Used to detect formatting errors in debug level logs without saving the logs. """ def handle(self, record): self.format(record) def emit(self, record): pass def createLock(self): self.lock = None class StandardLogging(fixtures.Fixture): """Setup Logging redirection for tests. There are a number of things we want to handle with logging in tests: * Redirect the logging to somewhere that we can test or dump it later. * Ensure that as many DEBUG messages as possible are actually executed, to ensure they are actually syntactically valid (they often have not been). * Ensure that we create useful output for tests that doesn't overwhelm the testing system (which means we can't capture the 100 MB of debug logging on every run). To do this we create a logger fixture at the root level, which defaults to INFO and create a NullLogger at DEBUG which lets us execute log messages at DEBUG but not keep the output. To support local debugging OS_DEBUG=True can be set in the environment, which will print out the full debug logging. There are also a set of overrides for particularly verbose modules to be even less than INFO. """ def setUp(self): super().setUp() # set root logger to debug root = std_logging.getLogger() root.setLevel(std_logging.DEBUG) # supports collecting debug level for local runs if os.environ.get('OS_DEBUG') in ('True', 'true', '1', 'yes'): level = std_logging.DEBUG else: level = std_logging.INFO # Collect logs fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s' self.logger = self.useFixture( fixtures.FakeLogger(format=fs, level=None) ) # TODO(sdague): why can't we send level through the fake # logger? Tests prove that it breaks, but it's worth getting # to the bottom of. root.handlers[0].setLevel(level) if level > std_logging.DEBUG: # Just attempt to format debug level logs, but don't save them handler = NullHandler() self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False)) handler.setLevel(std_logging.DEBUG) # Don't log every single DB migration step std_logging.getLogger('migrate.versioning.api').setLevel( std_logging.WARNING ) # Or alembic for model comparisons. std_logging.getLogger('alembic').setLevel(std_logging.WARNING) # Or oslo_db provisioning steps std_logging.getLogger('oslo_db.sqlalchemy').setLevel( std_logging.WARNING ) # At times we end up calling back into main() functions in # testing. This has the possibility of calling logging.setup # again, which completely unwinds the logging capture we've # created here. Once we've setup the logging the way we want, # disable the ability for the test to change this. def fake_logging_setup(*args): pass self.useFixture( fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup) ) def delete_stored_logs(self): # NOTE(gibi): this depends on the internals of the fixtures.FakeLogger. # This could be enhanced once the PR # https://github.com/testing-cabal/fixtures/pull/42 is released self.logger._output.truncate(0) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/policy.py0000664000175000017500000000224500000000000023530 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import fixtures from oslo_policy import opts from keystone.common.rbac_enforcer import policy class Policy(fixtures.Fixture): """A fixture for working with policy configuration.""" def __init__(self, config_fixture, policy_file=None): self._policy_file = policy_file self._config_fixture = config_fixture def setUp(self): super().setUp() opts.set_defaults(self._config_fixture.conf) self._config_fixture.config( group='oslo_policy', policy_file=self._policy_file ) policy._ENFORCER.suppress_deprecation_warnings = True self.addCleanup(policy.reset) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/temporaryfile.py0000664000175000017500000000163600000000000025116 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import fixtures class SecureTempFile(fixtures.Fixture): """A fixture for creating a secure temp file.""" def setUp(self): super().setUp() _fd, self.file_name = tempfile.mkstemp() # Make sure no file descriptors are leaked, close the unused FD. os.close(_fd) self.addCleanup(os.remove, self.file_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/ksfixtures/warnings.py0000664000175000017500000000517200000000000024063 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import warnings import fixtures from sqlalchemy import exc as sqla_exc class WarningsFixture(fixtures.Fixture): """Filters out warnings during test runs.""" def setUp(self): super().setUp() self._original_warning_filters = warnings.filters[:] # NOTE(stephenfin): Make deprecation warnings only happen once. # Otherwise this gets kind of crazy given the way that upstream python # libs use this. warnings.simplefilter('once', DeprecationWarning) warnings.filterwarnings( 'error', module='keystone', category=DeprecationWarning, ) warnings.filterwarnings( 'ignore', message=( 'Policy enforcement is depending on the value of ' '(token|group_ids). ' 'This key is deprecated. Please update your policy ' 'file to use the standard policy values.' ), ) # NOTE(stephenfin): Ignore scope check UserWarnings from oslo.policy. warnings.filterwarnings( 'ignore', message="Policy .* failed scope check", category=UserWarning, ) # TODO(stephenfin): This will be fixed once we drop sqlalchemy-migrate warnings.filterwarnings( 'ignore', category=DeprecationWarning, message=r"Using function/method 'db_version\(\)' is deprecated", ) warnings.filterwarnings( 'error', module='keystone', category=sqla_exc.SAWarning, ) warnings.filterwarnings( 'ignore', category=sqla_exc.SADeprecationWarning, ) # Enable deprecation warnings for keystone itself to capture upcoming # SQLALchemy changes warnings.filterwarnings( 'error', module='keystone', category=sqla_exc.SADeprecationWarning, ) self.addCleanup(self._reset_warning_filters) def _reset_warning_filters(self): warnings.filters[:] = self._original_warning_filters ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/limit/0000775000175000017500000000000000000000000020563 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/limit/__init__.py0000664000175000017500000000000000000000000022662 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/limit/test_backends.py0000664000175000017500000011126700000000000023756 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import driver_hints from keystone.common import provider_api from keystone import exception from keystone.tests import unit PROVIDERS = provider_api.ProviderAPIs class RegisteredLimitTests: def test_create_registered_limit_crud(self): # create one, return it. registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, description='test description', ) reg_limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1] ) self.assertDictEqual(registered_limit_1, reg_limits[0]) # create another two, return them. registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='snapshot', default_limit=5, id=uuid.uuid4().hex, ) registered_limit_3 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='backup', default_limit=5, id=uuid.uuid4().hex, ) reg_limits = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_2, registered_limit_3] ) self.assertEqual(2, len(reg_limits)) for reg_limit in reg_limits: if reg_limit['id'] == registered_limit_2['id']: self.assertDictEqual(registered_limit_2, reg_limit) if reg_limit['id'] == registered_limit_3['id']: self.assertDictEqual(registered_limit_3, reg_limit) def test_create_registered_limit_duplicate(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1] ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.Conflict, PROVIDERS.unified_limit_api.create_registered_limits, [registered_limit_2], ) def test_create_multi_registered_limits_duplicate(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1] ) # Create with a duplicated one and a normal one. Both of them will not # be created. registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_3 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='snapshot', default_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.Conflict, PROVIDERS.unified_limit_api.create_registered_limits, [registered_limit_2, registered_limit_3], ) reg_limits = PROVIDERS.unified_limit_api.list_registered_limits() self.assertEqual(1, len(reg_limits)) self.assertEqual(registered_limit_1['id'], reg_limits[0]['id']) def test_create_registered_limit_invalid_service(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=uuid.uuid4().hex, region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.ValidationError, PROVIDERS.unified_limit_api.create_registered_limits, [registered_limit_1], ) def test_create_registered_limit_invalid_region(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=uuid.uuid4().hex, resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.ValidationError, PROVIDERS.unified_limit_api.create_registered_limits, [registered_limit_1], ) def test_create_registered_limit_description_none(self): registered_limit = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, description=None, ) res = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) self.assertIsNone(res[0]['description']) def test_create_registered_limit_without_description(self): registered_limit = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit.pop('description') res = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit] ) self.assertIsNone(res[0]['description']) def test_update_registered_limit(self): # create two registered limits registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='snapshot', default_limit=5, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2] ) expect_region = 'region_two' registered_limit_update = { 'id': registered_limit_1['id'], 'region_id': expect_region, } res = PROVIDERS.unified_limit_api.update_registered_limit( registered_limit_1['id'], registered_limit_update ) self.assertEqual(expect_region, res['region_id']) # 'id' can be omitted in the update body registered_limit_update = {'region_id': expect_region} res = PROVIDERS.unified_limit_api.update_registered_limit( registered_limit_2['id'], registered_limit_update ) self.assertEqual(expect_region, res['region_id']) def test_update_registered_limit_invalid_input_return_bad_request(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1] ) update_ref = { 'id': registered_limit_1['id'], 'service_id': uuid.uuid4().hex, } self.assertRaises( exception.ValidationError, PROVIDERS.unified_limit_api.update_registered_limit, registered_limit_1['id'], update_ref, ) update_ref = {'id': registered_limit_1['id'], 'region_id': 'fake_id'} self.assertRaises( exception.ValidationError, PROVIDERS.unified_limit_api.update_registered_limit, registered_limit_1['id'], update_ref, ) def test_update_registered_limit_duplicate(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2] ) # Update registered_limit1 to registered_limit2 update_ref = { 'id': registered_limit_1['id'], 'region_id': self.region_two['id'], 'resource_name': 'snapshot', } self.assertRaises( exception.Conflict, PROVIDERS.unified_limit_api.update_registered_limit, registered_limit_1['id'], update_ref, ) def test_update_registered_limit_when_reference_limit_exist(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1] ) limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_1]) registered_limit_update = { 'id': registered_limit_1['id'], 'region_id': 'region_two', } self.assertRaises( exception.RegisteredLimitError, PROVIDERS.unified_limit_api.update_registered_limit, registered_limit_1['id'], registered_limit_update, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_2] ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_2]) registered_limit_update = { 'id': registered_limit_2['id'], 'region_id': 'region_two', } self.assertRaises( exception.RegisteredLimitError, PROVIDERS.unified_limit_api.update_registered_limit, registered_limit_2['id'], registered_limit_update, ) def test_list_registered_limits(self): # create two registered limits registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='snapshot', default_limit=5, id=uuid.uuid4().hex, ) reg_limits_1 = PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2] ) # list reg_limits_2 = PROVIDERS.unified_limit_api.list_registered_limits() self.assertEqual(2, len(reg_limits_2)) self.assertDictEqual(reg_limits_1[0], reg_limits_2[0]) self.assertDictEqual(reg_limits_1[1], reg_limits_2[1]) def test_list_registered_limit_by_limit(self): self.config_fixture.config(list_limit=1) # create two registered limits registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='snapshot', default_limit=5, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2] ) # list, limit is 1 hints = driver_hints.Hints() reg_limits = PROVIDERS.unified_limit_api.list_registered_limits( hints=hints ) self.assertEqual(1, len(reg_limits)) if reg_limits[0]['id'] == registered_limit_1['id']: self.assertDictEqual(registered_limit_1, reg_limits[0]) else: self.assertDictEqual(registered_limit_2, reg_limits[0]) def test_list_registered_limit_by_filter(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2] ) hints = driver_hints.Hints() hints.add_filter('service_id', self.service_one['id']) res = PROVIDERS.unified_limit_api.list_registered_limits(hints) self.assertEqual(2, len(res)) hints = driver_hints.Hints() hints.add_filter('region_id', self.region_one['id']) res = PROVIDERS.unified_limit_api.list_registered_limits(hints) self.assertEqual(1, len(res)) hints = driver_hints.Hints() hints.add_filter('resource_name', 'backup') res = PROVIDERS.unified_limit_api.list_registered_limits(hints) self.assertEqual(0, len(res)) def test_get_registered_limit(self): # create two registered limits registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='snapshot', default_limit=5, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2] ) # show one res = PROVIDERS.unified_limit_api.get_registered_limit( registered_limit_2['id'] ) self.assertDictEqual(registered_limit_2, res) def test_get_registered_limit_returns_not_found(self): self.assertRaises( exception.RegisteredLimitNotFound, PROVIDERS.unified_limit_api.get_registered_limit, uuid.uuid4().hex, ) def test_delete_registered_limit(self): # create two registered limits registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='snapshot', default_limit=5, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2] ) # delete one PROVIDERS.unified_limit_api.delete_registered_limit( registered_limit_1['id'] ) self.assertRaises( exception.RegisteredLimitNotFound, PROVIDERS.unified_limit_api.get_registered_limit, registered_limit_1['id'], ) reg_limits = PROVIDERS.unified_limit_api.list_registered_limits() self.assertEqual(1, len(reg_limits)) self.assertEqual(registered_limit_2['id'], reg_limits[0]['id']) def test_delete_registered_limit_returns_not_found(self): self.assertRaises( exception.RegisteredLimitNotFound, PROVIDERS.unified_limit_api.delete_registered_limit, uuid.uuid4().hex, ) def test_delete_registered_limit_when_reference_limit_exist(self): registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1] ) limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_1]) self.assertRaises( exception.RegisteredLimitError, PROVIDERS.unified_limit_api.delete_registered_limit, registered_limit_1['id'], ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_2] ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_2]) self.assertRaises( exception.RegisteredLimitError, PROVIDERS.unified_limit_api.delete_registered_limit, registered_limit_2['id'], ) class LimitTests: def test_default_enforcement_model_is_flat(self): expected = { 'description': ( 'Limit enforcement and validation does not take ' 'project hierarchy into consideration.' ), 'name': 'flat', } self.assertEqual(expected, PROVIDERS.unified_limit_api.get_model()) def test_registering_unsupported_enforcement_model_fails(self): self.assertRaises( ValueError, self.config_fixture.config, group='unified_limit', enforcement_model=uuid.uuid4().hex, ) def test_create_project_limit(self): # create one, return it. limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, description='test description', domain_id=None, ) limits = PROVIDERS.unified_limit_api.create_limits([limit_1]) self.assertDictEqual(limit_1, limits[0]) # create another two, return them. limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=5, id=uuid.uuid4().hex, domain_id=None, ) limit_3 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='backup', resource_limit=5, id=uuid.uuid4().hex, domain_id=None, ) limits = PROVIDERS.unified_limit_api.create_limits([limit_2, limit_3]) for limit in limits: if limit['id'] == limit_2['id']: self.assertDictEqual(limit_2, limit) if limit['id'] == limit_3['id']: self.assertDictEqual(limit_3, limit) def test_create_domain_limit(self): limit_1 = unit.new_limit_ref( project_id=None, service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, description='test description', domain_id=self.domain_default['id'], ) limits = PROVIDERS.unified_limit_api.create_limits([limit_1]) self.assertDictEqual(limit_1, limits[0]) def test_create_project_limit_duplicate(self): limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_1]) # use different id but the same project_id, service_id and region_id limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.Conflict, PROVIDERS.unified_limit_api.create_limits, [limit_1], ) def test_create_domain_limit_duplicate(self): limit_1 = unit.new_limit_ref( project_id=None, service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, domain_id=self.domain_default['id'], ) PROVIDERS.unified_limit_api.create_limits([limit_1]) # use different id but the same domain_id, service_id and region_id limit_1 = unit.new_limit_ref( project_id=None, service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, domain_id=self.domain_default['id'], ) self.assertRaises( exception.Conflict, PROVIDERS.unified_limit_api.create_limits, [limit_1], ) def test_create_limit_with_invalid_service_raises_validation_error(self): limit = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=uuid.uuid4().hex, region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.ValidationError, PROVIDERS.unified_limit_api.create_limits, [limit], ) def test_create_limit_with_invalid_region_raises_validation_error(self): limit = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=uuid.uuid4().hex, resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.ValidationError, PROVIDERS.unified_limit_api.create_limits, [limit], ) def test_create_limit_without_reference_registered_limit(self): limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) self.assertRaises( exception.NoLimitReference, PROVIDERS.unified_limit_api.create_limits, [limit_1], ) def test_create_limit_description_none(self): limit = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, description=None, ) res = PROVIDERS.unified_limit_api.create_limits([limit]) self.assertIsNone(res[0]['description']) def test_create_limit_without_description(self): limit = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) limit.pop('description') res = PROVIDERS.unified_limit_api.create_limits([limit]) self.assertIsNone(res[0]['description']) def test_update_limit(self): # create two limits limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=5, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2]) expect_limit = 8 limit_update = {'id': limit_1['id'], 'resource_limit': expect_limit} res = PROVIDERS.unified_limit_api.update_limit( limit_1['id'], limit_update ) self.assertEqual(expect_limit, res['resource_limit']) # 'id' can be omitted in the update body limit_update = {'resource_limit': expect_limit} res = PROVIDERS.unified_limit_api.update_limit( limit_2['id'], limit_update ) self.assertEqual(expect_limit, res['resource_limit']) def test_list_limits(self): # create two limits limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, domain_id=None, ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=5, id=uuid.uuid4().hex, domain_id=None, ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2]) # list hints = driver_hints.Hints() hints.add_filter('project_id', self.project_bar['id']) limits = PROVIDERS.unified_limit_api.list_limits(hints) self.assertEqual(2, len(limits)) for re in limits: if re['id'] == limit_1['id']: self.assertDictEqual(limit_1, re) if re['id'] == limit_2['id']: self.assertDictEqual(limit_2, re) def test_list_limit_by_limit(self): self.config_fixture.config(list_limit=1) # create two limits limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, domain_id=None, ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=5, id=uuid.uuid4().hex, domain_id=None, ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2]) # list, limit is 1 hints = driver_hints.Hints() limits = PROVIDERS.unified_limit_api.list_limits(hints=hints) self.assertEqual(1, len(limits)) if limits[0]['id'] == limit_1['id']: self.assertDictEqual(limit_1, limits[0]) else: self.assertDictEqual(limit_2, limits[0]) def test_list_limit_by_filter(self): limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, domain_id=None, ) limit_2 = unit.new_limit_ref( project_id=self.project_baz['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=10, id=uuid.uuid4().hex, domain_id=None, ) limit_3 = unit.new_limit_ref( project_id=None, service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=10, id=uuid.uuid4().hex, domain_id=self.domain_default['id'], ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2, limit_3]) hints = driver_hints.Hints() hints.add_filter('service_id', self.service_one['id']) res = PROVIDERS.unified_limit_api.list_limits(hints) self.assertEqual(3, len(res)) hints = driver_hints.Hints() hints.add_filter('region_id', self.region_one['id']) res = PROVIDERS.unified_limit_api.list_limits(hints) self.assertEqual(1, len(res)) self.assertDictEqual(limit_1, res[0]) hints = driver_hints.Hints() hints.add_filter('resource_name', 'backup') res = PROVIDERS.unified_limit_api.list_limits(hints) self.assertEqual(0, len(res)) hints = driver_hints.Hints() hints.add_filter('project_id', self.project_bar['id']) res = PROVIDERS.unified_limit_api.list_limits(hints) self.assertEqual(1, len(res)) hints = driver_hints.Hints() hints.add_filter('domain_id', self.domain_default['id']) res = PROVIDERS.unified_limit_api.list_limits(hints) self.assertEqual(1, len(res)) def test_list_limit_by_multi_filter_with_project_id(self): limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) limit_2 = unit.new_limit_ref( project_id=self.project_baz['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2]) hints = driver_hints.Hints() hints.add_filter('service_id', self.service_one['id']) hints.add_filter('project_id', self.project_bar['id']) res = PROVIDERS.unified_limit_api.list_limits(hints) self.assertEqual(1, len(res)) def test_get_limit(self): # create two limits limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, domain_id=None, ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=5, id=uuid.uuid4().hex, domain_id=None, ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2]) # show one res = PROVIDERS.unified_limit_api.get_limit(limit_2['id']) self.assertDictEqual(limit_2, res) def test_get_limit_returns_not_found(self): self.assertRaises( exception.LimitNotFound, PROVIDERS.unified_limit_api.get_limit, uuid.uuid4().hex, ) def test_delete_limit(self): # create two limits limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=5, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2]) # delete one PROVIDERS.unified_limit_api.delete_limit(limit_1['id']) # delete again self.assertRaises( exception.LimitNotFound, PROVIDERS.unified_limit_api.get_limit, limit_1['id'], ) def test_delete_limit_returns_not_found(self): self.assertRaises( exception.LimitNotFound, PROVIDERS.unified_limit_api.delete_limit, uuid.uuid4().hex, ) def test_delete_limit_project(self): # create two limits limit_1 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', resource_limit=10, id=uuid.uuid4().hex, ) limit_2 = unit.new_limit_ref( project_id=self.project_bar['id'], service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', resource_limit=5, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_limits([limit_1, limit_2]) # delete a unrelated project, the limits should still be there. PROVIDERS.resource_api.delete_project(self.project_baz['id']) ref = PROVIDERS.unified_limit_api.list_limits() self.assertEqual(2, len(ref)) # delete the referenced project, the limits should be deleted as well. PROVIDERS.resource_api.delete_project(self.project_bar['id']) ref = PROVIDERS.unified_limit_api.list_limits() self.assertEqual([], ref) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/mapping_fixtures.py0000664000175000017500000007512700000000000023417 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures for Federation Mapping.""" EMPLOYEE_GROUP_ID = "0cd5e9" CONTRACTOR_GROUP_ID = "85a868" TESTER_GROUP_ID = "123" TESTER_GROUP_NAME = "tester" DEVELOPER_GROUP_ID = "xyz" DEVELOPER_GROUP_NAME = "Developer" CONTRACTOR_GROUP_NAME = "Contractor" DEVELOPER_GROUP_DOMAIN_NAME = "outsourcing" DEVELOPER_GROUP_DOMAIN_ID = "5abc43" FEDERATED_DOMAIN = "Federated" LOCAL_DOMAIN = "Local" # Mapping summary: # LastName Smith & Not Contractor or SubContractor -> group 0cd5e9 # FirstName Jill & Contractor or SubContractor -> to group 85a868 MAPPING_SMALL = { "rules": [ { "local": [ {"group": {"id": EMPLOYEE_GROUP_ID}}, {"user": {"name": "{0}"}}, ], "remote": [ {"type": "UserName"}, { "type": "orgPersonType", "not_any_of": ["Contractor", "SubContractor"], }, {"type": "LastName", "any_one_of": ["Bo"]}, ], }, { "local": [ {"group": {"id": CONTRACTOR_GROUP_ID}}, {"user": {"name": "{0}"}}, ], "remote": [ {"type": "UserName"}, { "type": "orgPersonType", "any_one_of": ["Contractor", "SubContractor"], }, {"type": "FirstName", "any_one_of": ["Jill"]}, ], }, ] } # Mapping summary: # orgPersonType Admin or Big Cheese -> name {0} {1} email {2} and group 0cd5e9 # orgPersonType Customer -> user name {0} email {1} # orgPersonType Test and email ^@example.com$ -> group 123 and xyz MAPPING_LARGE = { "rules": [ { "local": [ { "user": {"name": "{0} {1}", "email": "{2}"}, "group": {"id": EMPLOYEE_GROUP_ID}, } ], "remote": [ {"type": "FirstName"}, {"type": "LastName"}, {"type": "Email"}, { "type": "orgPersonType", "any_one_of": ["Admin", "Big Cheese"], }, ], }, { "local": [{"user": {"name": "{0}", "email": "{1}"}}], "remote": [ {"type": "UserName"}, {"type": "Email"}, { "type": "orgPersonType", "not_any_of": [ "Admin", "Employee", "Contractor", "Tester", ], }, ], }, { "local": [ {"group": {"id": TESTER_GROUP_ID}}, {"group": {"id": DEVELOPER_GROUP_ID}}, {"user": {"name": "{0}"}}, ], "remote": [ {"type": "UserName"}, {"type": "orgPersonType", "any_one_of": ["Tester"]}, { "type": "Email", "any_one_of": [".*@example.com$"], "regex": True, }, ], }, ] } MAPPING_BAD_REQ = { "rules": [ { "local": [{"user": "name"}], "remote": [{"type": "UserName", "bad_requirement": ["Young"]}], } ] } MAPPING_BAD_VALUE = { "rules": [ { "local": [{"user": "name"}], "remote": [{"type": "UserName", "any_one_of": "should_be_list"}], } ] } MAPPING_NO_RULES: dict[str, list[str]] = {'rules': []} MAPPING_NO_REMOTE = {"rules": [{"local": [{"user": "name"}], "remote": []}]} MAPPING_MISSING_LOCAL = { "rules": [ {"remote": [{"type": "UserName", "any_one_of": "should_be_list"}]} ] } MAPPING_WRONG_TYPE = { "rules": [ {"local": [{"user": "{1}"}], "remote": [{"not_type": "UserName"}]} ] } MAPPING_MISSING_TYPE = { "rules": [{"local": [{"user": "{1}"}], "remote": [{}]}] } MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF = { "rules": [ { "local": [{"group": {"id": "0cd5e9"}}, {"user": {"name": "{0}"}}], "remote": [ {"type": "UserName"}, { "type": "orgPersonType", "not_any_of": ["SubContractor"], "invalid_type": "xyz", }, ], } ] } MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF = { "rules": [ { "local": [{"group": {"id": "0cd5e9"}}, {"user": {"name": "{0}"}}], "remote": [ {"type": "UserName"}, { "type": "orgPersonType", "any_one_of": ["SubContractor"], "invalid_type": "xyz", }, ], } ] } MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE = { "rules": [ { "local": [{"group": {"id": "0cd5e9"}}, {"user": {"name": "{0}"}}], "remote": [ {"type": "UserName"}, {"type": "orgPersonType", "invalid_type": "xyz"}, ], } ] } MAPPING_EXTRA_RULES_PROPS = { "rules": [ { "local": [{"group": {"id": "0cd5e9"}}, {"user": {"name": "{0}"}}], "invalid_type": { "id": "xyz", }, "remote": [ {"type": "UserName"}, {"type": "orgPersonType", "not_any_of": ["SubContractor"]}, ], } ] } MAPPING_TESTER_REGEX = { "rules": [ { "local": [ { "user": { "name": "{0}", } } ], "remote": [{"type": "UserName"}], }, { "local": [{"group": {"id": TESTER_GROUP_ID}}], "remote": [ { "type": "orgPersonType", "any_one_of": [".*Tester*"], "regex": True, } ], }, ] } MAPPING_DIRECT_MAPPING_THROUGH_KEYWORD = { "rules": [ { "local": [ { "user": { "name": "{0}", } }, {"group": {"id": TESTER_GROUP_ID}}, ], "remote": [{"type": "UserName", "any_one_of": ["bwilliams"]}], } ] } MAPPING_DEVELOPER_REGEX = { "rules": [ { "local": [ { "user": { "name": "{0}", }, "group": {"id": DEVELOPER_GROUP_ID}, } ], "remote": [ {"type": "UserName"}, { "type": "orgPersonType", "any_one_of": ["Developer"], }, { "type": "Email", "not_any_of": [".*@example.org$"], "regex": True, }, ], } ] } MAPPING_GROUP_NAMES = { "rules": [ { "local": [ { "user": { "name": "{0}", } } ], "remote": [{"type": "UserName"}], }, { "local": [ { "group": { "name": DEVELOPER_GROUP_NAME, "domain": {"name": DEVELOPER_GROUP_DOMAIN_NAME}, } } ], "remote": [ { "type": "orgPersonType", "any_one_of": ["Employee"], } ], }, { "local": [ { "group": { "name": TESTER_GROUP_NAME, "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}, } } ], "remote": [{"type": "orgPersonType", "any_one_of": ["BuildingX"]}], }, ] } MAPPING_GROUP_NAME_WITHOUT_DOMAIN = { "rules": [ { "local": [ { "group": { "name": DEVELOPER_GROUP_NAME, } } ], "remote": [ { "type": "orgPersonType", "any_one_of": ["Employee"], } ], }, ] } MAPPING_GROUP_ID_WITH_DOMAIN = { "rules": [ { "local": [ { "group": { "id": EMPLOYEE_GROUP_ID, "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}, } } ], "remote": [ { "type": "orgPersonType", "any_one_of": ["Employee"], } ], }, ] } MAPPING_BAD_GROUP = { "rules": [ { "local": [{"group": {}}], "remote": [ { "type": "orgPersonType", "any_one_of": ["Employee"], } ], }, ] } MAPPING_BAD_DOMAIN = { "rules": [ { "local": [ { "group": { "id": EMPLOYEE_GROUP_ID, "domain": { "id": DEVELOPER_GROUP_DOMAIN_ID, "badkey": "badvalue", }, } } ], "remote": [ { "type": "orgPersonType", "any_one_of": ["Employee"], } ], }, ] } MAPPING_EPHEMERAL_USER = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": {"id": FEDERATED_DOMAIN}, "type": "ephemeral", } } ], "remote": [ {"type": "UserName"}, {"type": "UserName", "any_one_of": ["tbo"]}, ], } ] } MAPPING_EPHEMERAL_USER_REMOTE_DOMAIN = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": {"name": "{1}"}, "type": "ephemeral", } } ], "remote": [ {"type": "UserName"}, {"type": "OIDC-openstack-user-domain"}, ], } ] } MAPPING_GROUPS_WHITELIST = { "rules": [ { "remote": [ { "type": "orgPersonType", "whitelist": ["Developer", "Contractor"], }, {"type": "UserName"}, ], "local": [ {"groups": "{0}", "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}}, {"user": {"name": "{1}"}}, ], } ] } MAPPING_EPHEMERAL_USER_LOCAL_DOMAIN = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": {"id": LOCAL_DOMAIN}, "type": "ephemeral", } } ], "remote": [ {"type": "UserName"}, {"type": "UserName", "any_one_of": ["jsmith"]}, ], } ] } MAPPING_GROUPS_WHITELIST_MISSING_DOMAIN = { "rules": [ { "remote": [ { "type": "orgPersonType", "whitelist": ["Developer", "Contractor"], }, ], "local": [ { "groups": "{0}", } ], } ] } MAPPING_LOCAL_USER_LOCAL_DOMAIN = { "rules": [ { "local": [ { "user": { "name": "{0}", "domain": {"id": LOCAL_DOMAIN}, "type": "local", } } ], "remote": [ {"type": "UserName"}, {"type": "UserName", "any_one_of": ["jsmith"]}, ], } ] } MAPPING_GROUPS_BLACKLIST_MULTIPLES = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": ["Developer", "Manager"], }, {"type": "Thing"}, # this could be variable length! {"type": "UserName"}, ], "local": [ {"groups": "{0}", "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}}, { "user": { "name": "{2}", } }, ], } ] } MAPPING_GROUPS_BLACKLIST = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": ["Developer", "Manager"], }, {"type": "UserName"}, ], "local": [ {"groups": "{0}", "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}}, {"user": {"name": "{1}"}}, ], } ] } MAPPING_GROUPS_BLACKLIST_REGEX = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": [".*Employee$"], "regex": True, }, ], "local": [ {"groups": "{0}", "domain": {"id": FEDERATED_DOMAIN}}, ], } ] } MAPPING_GROUPS_WHITELIST_REGEX = { "rules": [ { "remote": [ { "type": "orgPersonType", "whitelist": [".*Employee$"], "regex": True, }, ], "local": [ {"groups": "{0}", "domain": {"id": FEDERATED_DOMAIN}}, ], } ] } # Exercise all possibilities of user identification. Values are hardcoded on # purpose. MAPPING_USER_IDS = { "rules": [ { "local": [{"user": {"name": "{0}"}}], "remote": [ {"type": "UserName"}, {"type": "UserName", "any_one_of": ["jsmith"]}, ], }, { "local": [ { "user": { "name": "{0}", "id": "abc123@example.com", "domain": {"id": "federated"}, } } ], "remote": [ {"type": "UserName"}, {"type": "UserName", "any_one_of": ["tbo"]}, ], }, { "local": [{"user": {"id": "{0}"}}], "remote": [ {"type": "UserName"}, {"type": "UserName", "any_one_of": ["bob"]}, ], }, { "local": [ { "user": { "id": "abc123@example.com", "name": "{0}", "domain": {"id": "federated"}, } } ], "remote": [ {"type": "UserName"}, {"type": "UserName", "any_one_of": ["bwilliams"]}, ], }, ] } MAPPING_GROUPS_BLACKLIST_MISSING_DOMAIN = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": ["Developer", "Manager"], }, ], "local": [ { "groups": "{0}", }, ], } ] } MAPPING_GROUPS_WHITELIST_AND_BLACKLIST = { "rules": [ { "remote": [ { "type": "orgPersonType", "blacklist": ["Employee"], "whitelist": ["Contractor"], }, ], "local": [ {"groups": "{0}", "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}}, ], } ] } # Mapping used by tokenless test cases, it maps the user_name # and domain_name. MAPPING_WITH_USERNAME_AND_DOMAINNAME = { 'rules': [ { 'local': [ { 'user': { 'name': '{0}', 'domain': {'name': '{1}'}, 'type': 'local', } } ], 'remote': [ {'type': 'SSL_CLIENT_USER_NAME'}, {'type': 'SSL_CLIENT_DOMAIN_NAME'}, ], } ] } # Mapping used by tokenless test cases, it maps the user_id # and domain_name. MAPPING_WITH_USERID_AND_DOMAINNAME = { 'rules': [ { 'local': [ { 'user': { 'id': '{0}', 'domain': {'name': '{1}'}, 'type': 'local', } } ], 'remote': [ {'type': 'SSL_CLIENT_USER_ID'}, {'type': 'SSL_CLIENT_DOMAIN_NAME'}, ], } ] } # Mapping used by tokenless test cases, it maps the user_name # and domain_id. MAPPING_WITH_USERNAME_AND_DOMAINID = { 'rules': [ { 'local': [ { 'user': { 'name': '{0}', 'domain': {'id': '{1}'}, 'type': 'local', } } ], 'remote': [ {'type': 'SSL_CLIENT_USER_NAME'}, {'type': 'SSL_CLIENT_DOMAIN_ID'}, ], } ] } # Mapping used by tokenless test cases, it maps the user_id # and domain_id. MAPPING_WITH_USERID_AND_DOMAINID = { 'rules': [ { 'local': [ { 'user': { 'id': '{0}', 'domain': {'id': '{1}'}, 'type': 'local', } } ], 'remote': [ {'type': 'SSL_CLIENT_USER_ID'}, {'type': 'SSL_CLIENT_DOMAIN_ID'}, ], } ] } # Mapping used by tokenless test cases, it maps the domain_id only. MAPPING_WITH_DOMAINID_ONLY = { 'rules': [ { 'local': [{'user': {'domain': {'id': '{0}'}, 'type': 'local'}}], 'remote': [{'type': 'SSL_CLIENT_DOMAIN_ID'}], } ] } MAPPING_GROUPS_IDS_WHITELIST = { "rules": [ { "local": [ {"user": {"name": "{0}"}}, {"group_ids": "{1}"}, {"group": {"id": "{2}"}}, ], "remote": [ {"type": "name"}, { "type": "group_ids", "whitelist": ["abc123", "ghi789", "321cba"], }, {"type": "group"}, ], } ] } MAPPING_GROUPS_IDS_BLACKLIST = { "rules": [ { "local": [ {"user": {"name": "{0}"}}, {"group_ids": "{1}"}, {"group": {"id": "{2}"}}, ], "remote": [ {"type": "name"}, {"type": "group_ids", "blacklist": ["def456"]}, {"type": "group"}, ], } ] } # Mapping used by tokenless test cases, it maps the domain_name only. MAPPING_WITH_DOMAINNAME_ONLY = { 'rules': [ { 'local': [{'user': {'domain': {'name': '{0}'}, 'type': 'local'}}], 'remote': [{'type': 'SSL_CLIENT_DOMAIN_NAME'}], } ] } # Mapping used by tokenless test cases, it maps the user_name only. MAPPING_WITH_USERNAME_ONLY = { 'rules': [ { 'local': [{'user': {'name': '{0}', 'type': 'local'}}], 'remote': [{'type': 'SSL_CLIENT_USER_NAME'}], } ] } # Mapping used by tokenless test cases, it maps the user_id only. MAPPING_WITH_USERID_ONLY = { 'rules': [ { 'local': [{'user': {'id': '{0}', 'type': 'local'}}], 'remote': [{'type': 'SSL_CLIENT_USER_ID'}], } ] } MAPPING_FOR_EPHEMERAL_USER = { 'rules': [ { 'local': [ { 'user': {'name': '{0}', 'type': 'ephemeral'}, 'group': {'id': 'dummy'}, } ], 'remote': [{'type': 'SSL_CLIENT_USER_NAME'}], } ] } MAPPING_FOR_EPHEMERAL_USER_AND_GROUP_DOMAIN_NAME = { 'rules': [ { 'local': [ { 'user': {'name': '{0}', 'type': 'ephemeral'}, 'group': {'name': 'dummy', 'domain': {'name': 'dummy'}}, } ], 'remote': [{'type': 'SSL_CLIENT_USER_NAME'}], } ] } MAPPING_FOR_DEFAULT_EPHEMERAL_USER = { 'rules': [ { 'local': [{'user': {'name': '{0}'}, 'group': {'id': 'dummy'}}], 'remote': [{'type': 'SSL_CLIENT_USER_NAME'}], } ] } MAPPING_GROUPS_WHITELIST_PASS_THROUGH = { "rules": [ { "remote": [{"type": "UserName"}], "local": [ { "user": { "name": "{0}", "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}, } } ], }, { "remote": [{"type": "orgPersonType", "whitelist": ['Developer']}], "local": [ {"groups": "{0}", "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}} ], }, ] } MAPPING_BAD_LOCAL_SETUP = { "rules": [ { "local": [ { "user": {"name": "{0}", "domain": {"id": "default"}}, "whatisthis": "local", } ], "remote": [{"type": "UserName"}], } ] } MAPPING_BAD_LOCAL_TYPE_USER_IN_ASSERTION = { "rules": [ { "local": [{"user": {"name": "{0}", "groups": "{1}"}}], "remote": [ {"type": "openstack_user"}, {"type": "openstack_groups"}, {"type": "openstack_roles", "any_one_of": ["Admin"]}, ], }, ] } MAPPING_GROUPS_WITH_EMAIL = { "rules": [ { "remote": [ { "type": "groups", }, { "type": "userEmail", }, {"type": "UserName"}, ], "local": [ {"groups": "{0}", "domain": {"id": DEVELOPER_GROUP_DOMAIN_ID}}, {"user": {"name": "{2}", "email": "{1}"}}, ], } ] } MAPPING_GROUPS_DOMAIN_OF_USER = { "rules": [ { "local": [{"user": {"name": "{0}"}}, {"groups": "{1}"}], "remote": [ {"type": "openstack_user"}, {"type": "openstack_groups"}, ], } ] } EMPLOYEE_ASSERTION = { 'Email': 'tim@example.com', 'UserName': 'tbo', 'FirstName': 'Tim', 'LastName': 'Bo', 'orgPersonType': 'Employee;BuildingX', } EMPLOYEE_PARTTIME_ASSERTION = { 'Email': 'tim@example.com', 'UserName': 'tbo', 'FirstName': 'Tim', 'LastName': 'Bo', 'orgPersonType': 'Employee;PartTimeEmployee;Manager', } EMPLOYEE_ASSERTION_MULTIPLE_GROUPS = { 'Email': 'tim@example.com', 'UserName': 'tbo', 'FirstName': 'Tim', 'LastName': 'Bo', 'orgPersonType': 'Developer;Manager;Contractor', 'Thing': 'yes!;maybe!;no!!', } EMPLOYEE_ASSERTION_PREFIXED = { 'PREFIX_Email': 'tim@example.com', 'PREFIX_UserName': 'tbo', 'PREFIX_FirstName': 'Tim', 'PREFIX_LastName': 'Bo', 'PREFIX_orgPersonType': 'SuperEmployee;BuildingX', } CONTRACTOR_ASSERTION = { 'Email': 'jill@example.com', 'UserName': 'jsmith', 'FirstName': 'Jill', 'LastName': 'Smith', 'orgPersonType': 'Contractor;Non-Dev', } ADMIN_ASSERTION = { 'Email': 'bob@example.com', 'UserName': 'bob', 'FirstName': 'Bob', 'LastName': 'Thompson', 'orgPersonType': 'Admin;Chief', } CUSTOMER_ASSERTION = { 'Email': 'beth@example.com', 'UserName': 'bwilliams', 'FirstName': 'Beth', 'LastName': 'Williams', 'orgPersonType': 'Customer', } ANOTHER_CUSTOMER_ASSERTION = { 'Email': 'mark@example.com', 'UserName': 'markcol', 'FirstName': 'Mark', 'LastName': 'Collins', 'orgPersonType': 'Managers;CEO;CTO', } TESTER_ASSERTION = { 'Email': 'testacct@example.com', 'UserName': 'testacct', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'MadeupGroup;Tester;GroupX', } ANOTHER_TESTER_ASSERTION = { 'Email': 'testacct@example.com', 'UserName': 'IamTester', } BAD_TESTER_ASSERTION = { 'Email': 'eviltester@example.org', 'UserName': 'Evil', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'Tester', } BAD_DEVELOPER_ASSERTION = { 'Email': 'evildeveloper@example.org', 'UserName': 'Evil', 'FirstName': 'Develop', 'LastName': 'Account', 'orgPersonType': 'Developer', } MALFORMED_TESTER_ASSERTION = { 'Email': 'testacct@example.com', 'UserName': 'testacct', 'FirstName': 'Test', 'LastName': 'Account', 'orgPersonType': 'Tester', 'object': object(), 'dictionary': dict(zip('teststring', range(10))), 'tuple': tuple(range(5)), } DEVELOPER_ASSERTION = { 'Email': 'developacct@example.com', 'UserName': 'developacct', 'FirstName': 'Develop', 'LastName': 'Account', 'orgPersonType': 'Developer', } CONTRACTOR_MALFORMED_ASSERTION = { 'UserName': 'user', 'FirstName': object(), 'orgPersonType': 'Contractor', } LOCAL_USER_ASSERTION = {'UserName': 'marek', 'UserType': 'random'} ANOTHER_LOCAL_USER_ASSERTION = { 'UserName': 'marek', 'Position': 'DirectorGeneral', } USER_NO_GROUPS_ASSERTION = { 'Email': 'nogroupsuser1@example.org', 'UserName': 'nogroupsuser1', 'orgPersonType': 'NoGroupsOrg', } UNMATCHED_GROUP_ASSERTION = { 'REMOTE_USER': 'Any Momoose', 'REMOTE_USER_GROUPS': 'EXISTS;NO_EXISTS', } GROUP_IDS_ASSERTION = { 'name': 'opilotte', 'group_ids': 'abc123;def456;ghi789', 'group': 'klm012', } GROUP_IDS_ASSERTION_ONLY_ONE_GROUP = { 'name': 'opilotte', 'group_ids': '321cba', 'group': '210mlk', } UNICODE_NAME_ASSERTION = { 'PFX_Email': 'jon@example.com', 'PFX_UserName': 'jonkare', 'PFX_FirstName': 'Jon Kåre', 'PFX_LastName': 'Hellån', 'PFX_orgPersonType': 'Admin;Chief', } GROUPS_ASSERTION_ONLY_ONE_GROUP = { 'userEmail': 'jill@example.com', 'UserName': 'jsmith', 'groups': 'ALL USERS', } GROUPS_ASSERTION_ONLY_ONE_NUMERICAL_GROUP = { 'userEmail': 'jill@example.com', 'UserName': 'jsmith', 'groups': '1234', } GROUPS_DOMAIN_ASSERTION = { 'openstack_user': 'bwilliams', 'openstack_user_domain': 'default', 'openstack_roles': 'Admin', 'openstack_groups': 'JSON:{"name":"group1","domain":{"name":"xxx"}};' 'JSON:{"name":"group2","domain":{"name":"yyy"}}', } USER_WITH_DOMAIN_ASSERTION = { 'UserName': 'marek', 'OIDC-openstack-user-domain': 'user_domain', } MAPPING_UNICODE = { "rules": [ { "local": [ { "user": {"name": "{0} {1}", "email": "{2}"}, "group": {"id": EMPLOYEE_GROUP_ID}, } ], "remote": [ {"type": "PFX_FirstName"}, {"type": "PFX_LastName"}, {"type": "PFX_Email"}, { "type": "PFX_orgPersonType", "any_one_of": ["Admin", "Big Cheese"], }, ], }, ], } MAPPING_PROJECTS = { "rules": [ { "local": [ {"user": {"name": "{0}"}}, { "projects": [ { "name": "Production", "roles": [{"name": "observer"}], }, {"name": "Staging", "roles": [{"name": "member"}]}, { "name": "Project for {0}", "roles": [{"name": "admin"}], }, ], }, ], "remote": [ {"type": "UserName"}, { "type": "Email", }, {"type": "orgPersonType", "any_one_of": ["Employee"]}, ], } ] } MAPPING_PROJECTS_WITHOUT_ROLES = { "rules": [ { "local": [ { "user": {"name": "{0}"}, "projects": [ {"name": "a"}, {"name": "b"}, {"name": "Project for {0}"}, ], } ], "remote": [{"type": "UserName"}], }, ] } MAPPING_PROJECTS_WITHOUT_NAME = { "rules": [ { "local": [ { "user": {"name": "{0}"}, "projects": [ {"roles": [{"name": "observer"}]}, {"name": "Staging", "roles": [{"name": "member"}]}, { "name": "Project for {0}", "roles": [{"name": "admin"}], }, ], } ], "remote": [{"type": "UserName"}], }, ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/policy/0000775000175000017500000000000000000000000020744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/policy/__init__.py0000664000175000017500000000000000000000000023043 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/policy/backends/0000775000175000017500000000000000000000000022516 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/policy/backends/__init__.py0000664000175000017500000000000000000000000024615 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/policy/backends/test_base.py0000664000175000017500000000416600000000000025050 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone import exception class DriverTestCase: """Test cases to validate the current policy driver behavior.""" def setUp(self): super().setUp() self.policy = { 'id': uuid.uuid4().hex, 'blob': '{"identity:create_user": "role:domain_admin"}', 'type': 'application/json', } self.driver.create_policy(self.policy['id'], self.policy) @property def driver(self): raise exception.NotImplemented() def test_list_policies(self): another_policy = { 'id': uuid.uuid4().hex, 'blob': '{"compute:create": "role:project_member"}', 'type': 'application/json', } self.driver.create_policy(another_policy['id'], another_policy) policies = self.driver.list_policies() self.assertCountEqual([self.policy, another_policy], policies) def test_get_policy(self): self.assertEqual( self.policy, self.driver.get_policy(self.policy['id']) ) def test_update_policy(self): self.policy['blob'] = ( '{"identity:create_user": "role:domain_admin",' '"identity:update_user": "role:domain_admin"}' ) self.driver.update_policy(self.policy['id'], self.policy) self.assertEqual( self.policy, self.driver.get_policy(self.policy['id']) ) def test_delete_policy(self): self.driver.delete_policy(self.policy['id']) self.assertRaises( exception.PolicyNotFound, self.driver.get_policy, self.policy['id'] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/policy/backends/test_sql.py0000664000175000017500000000305600000000000024732 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.policy.backends import sql as sql_driver from keystone.tests import unit from keystone.tests.unit.backend import core_sql from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.policy.backends import test_base class SQLModelTestCase(core_sql.BaseBackendSqlModels): """Test cases to validate the table structure.""" def test_policy_model(self): cols = ( ('id', sql.String, 64), ('blob', sql.JsonBlob, None), ('type', sql.String, 255), ('extra', sql.JsonBlob, None), ) self.assertExpectedSchema('policy', cols) class SQLDriverTestCase(test_base.DriverTestCase, unit.TestCase): def setUp(self): # Load database first since parent's setUp will use it self.useFixture(database.Database()) super().setUp() @property def driver(self): if not hasattr(self, '_driver'): self._driver = sql_driver.Policy() return self._driver ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/policy/test_backends.py0000664000175000017500000000611500000000000024132 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import provider_api from keystone import exception from keystone.tests import unit PROVIDERS = provider_api.ProviderAPIs class PolicyTests: def test_create(self): ref = unit.new_policy_ref() res = PROVIDERS.policy_api.create_policy(ref['id'], ref) self.assertDictEqual(ref, res) def test_get(self): ref = unit.new_policy_ref() res = PROVIDERS.policy_api.create_policy(ref['id'], ref) res = PROVIDERS.policy_api.get_policy(ref['id']) self.assertDictEqual(ref, res) def test_list(self): ref = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(ref['id'], ref) res = PROVIDERS.policy_api.list_policies() res = [x for x in res if x['id'] == ref['id']][0] self.assertDictEqual(ref, res) def test_update(self): ref = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(ref['id'], ref) orig = ref ref = unit.new_policy_ref() # (cannot change policy ID) self.assertRaises( exception.ValidationError, PROVIDERS.policy_api.update_policy, orig['id'], ref, ) ref['id'] = orig['id'] res = PROVIDERS.policy_api.update_policy(orig['id'], ref) self.assertDictEqual(ref, res) def test_delete(self): ref = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(ref['id'], ref) PROVIDERS.policy_api.delete_policy(ref['id']) self.assertRaises( exception.PolicyNotFound, PROVIDERS.policy_api.delete_policy, ref['id'], ) self.assertRaises( exception.PolicyNotFound, PROVIDERS.policy_api.get_policy, ref['id'], ) res = PROVIDERS.policy_api.list_policies() self.assertFalse(len([x for x in res if x['id'] == ref['id']])) def test_get_policy_returns_not_found(self): self.assertRaises( exception.PolicyNotFound, PROVIDERS.policy_api.get_policy, uuid.uuid4().hex, ) def test_update_policy_returns_not_found(self): ref = unit.new_policy_ref() self.assertRaises( exception.PolicyNotFound, PROVIDERS.policy_api.update_policy, ref['id'], ref, ) def test_delete_policy_returns_not_found(self): self.assertRaises( exception.PolicyNotFound, PROVIDERS.policy_api.delete_policy, uuid.uuid4().hex, ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/receipt/0000775000175000017500000000000000000000000021100 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/receipt/__init__.py0000664000175000017500000000000000000000000023177 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/receipt/test_fernet_provider.py0000664000175000017500000004364300000000000025720 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import hashlib import os from unittest import mock import uuid from oslo_utils import timeutils from keystone.common import fernet_utils from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.identity.backends import resource_options as ro from keystone.receipt.providers import fernet from keystone.receipt import receipt_formatters from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database from keystone.token import provider as token_provider CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestFernetReceiptProvider(unit.TestCase): def setUp(self): super().setUp() self.provider = fernet.Provider() def test_invalid_receipt_raises_receipt_not_found(self): receipt_id = uuid.uuid4().hex e = self.assertRaises( exception.ReceiptNotFound, self.provider.validate_receipt, receipt_id, ) self.assertIn(receipt_id, '%s' % e) class TestValidate(unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.useFixture( ksfixtures.ConfigAuthPlugins( self.config_fixture, ['totp', 'token', 'password'] ) ) self.load_backends() PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='receipt', provider='fernet') def test_validate_v3_receipt_simple(self): # Check the fields in the receipt result when use validate_v3_receipt # with a simple receipt. domain_ref = unit.new_domain_ref() domain_ref = PROVIDERS.resource_api.create_domain( domain_ref['id'], domain_ref ) rule_list = [ ['password', 'totp'], ['password', 'totp', 'token'], ] user_ref = unit.new_user_ref(domain_ref['id']) user_ref = PROVIDERS.identity_api.create_user(user_ref) user_ref['options'][ro.MFA_RULES_OPT.option_name] = rule_list user_ref['options'][ro.MFA_ENABLED_OPT.option_name] = True PROVIDERS.identity_api.update_user(user_ref['id'], user_ref) method_names = ['password'] receipt = PROVIDERS.receipt_provider_api.issue_receipt( user_ref['id'], method_names ) receipt = PROVIDERS.receipt_provider_api.validate_receipt(receipt.id) self.assertIsInstance(receipt.expires_at, str) self.assertIsInstance(receipt.issued_at, str) self.assertEqual(set(method_names), set(receipt.methods)) self.assertEqual( {frozenset(r) for r in rule_list}, {frozenset(r) for r in receipt.required_methods}, ) self.assertEqual(user_ref['id'], receipt.user_id) def test_validate_v3_receipt_validation_error_exc(self): # When the receipt format isn't recognized, ReceiptNotFound is raised. # A uuid string isn't a valid Fernet receipt. receipt_id = uuid.uuid4().hex self.assertRaises( exception.ReceiptNotFound, PROVIDERS.receipt_provider_api.validate_receipt, receipt_id, ) class TestReceiptFormatter(unit.TestCase): def test_restore_padding(self): # 'a' will result in '==' padding, 'aa' will result in '=' padding, and # 'aaa' will result in no padding. binary_to_test = [b'a', b'aa', b'aaa'] for binary in binary_to_test: # base64.urlsafe_b64encode takes bytes and returns # bytes. encoded_string = base64.urlsafe_b64encode(binary) encoded_string = encoded_string.decode('utf-8') # encoded_string is now str. encoded_str_without_padding = encoded_string.rstrip('=') self.assertFalse(encoded_str_without_padding.endswith('=')) encoded_str_with_padding_restored = ( receipt_formatters.ReceiptFormatter.restore_padding( encoded_str_without_padding ) ) self.assertEqual(encoded_string, encoded_str_with_padding_restored) class TestPayloads(unit.TestCase): def setUp(self): super().setUp() self.useFixture( ksfixtures.ConfigAuthPlugins( self.config_fixture, ['totp', 'token', 'password'] ) ) def assertTimestampsEqual(self, expected, actual): # The timestamp that we get back when parsing the payload may not # exactly match the timestamp that was put in the payload due to # conversion to and from a float. exp_time = timeutils.parse_isotime(expected) actual_time = timeutils.parse_isotime(actual) # the granularity of timestamp string is microseconds and it's only the # last digit in the representation that's different, so use a delta # just above nanoseconds. return self.assertCloseEnoughForGovernmentWork( exp_time, actual_time, delta=1e-05 ) def test_strings_can_be_converted_to_bytes(self): s = token_provider.random_urlsafe_str() self.assertIsInstance(s, str) b = receipt_formatters.ReceiptPayload.random_urlsafe_str_to_bytes(s) self.assertIsInstance(b, bytes) def test_uuid_hex_to_byte_conversions(self): payload_cls = receipt_formatters.ReceiptPayload expected_hex_uuid = uuid.uuid4().hex uuid_obj = uuid.UUID(expected_hex_uuid) expected_uuid_in_bytes = uuid_obj.bytes actual_uuid_in_bytes = payload_cls.convert_uuid_hex_to_bytes( expected_hex_uuid ) self.assertEqual(expected_uuid_in_bytes, actual_uuid_in_bytes) actual_hex_uuid = payload_cls.convert_uuid_bytes_to_hex( expected_uuid_in_bytes ) self.assertEqual(expected_hex_uuid, actual_hex_uuid) def test_time_string_to_float_conversions(self): payload_cls = receipt_formatters.ReceiptPayload original_time_str = utils.isotime(subsecond=True) time_obj = timeutils.parse_isotime(original_time_str) expected_time_float = ( timeutils.normalize_time(time_obj) - datetime.datetime.fromtimestamp( 0, datetime.timezone.utc ).replace(tzinfo=None) ).total_seconds() # NOTE(lbragstad): The receipt expiration time for Fernet receipts is # passed in the payload of the receipt. This is different from the # receipt creation time, which is handled by Fernet and doesn't support # subsecond precision because it is a timestamp integer. self.assertIsInstance(expected_time_float, float) actual_time_float = payload_cls._convert_time_string_to_float( original_time_str ) self.assertIsInstance(actual_time_float, float) self.assertEqual(expected_time_float, actual_time_float) # Generate expected_time_str using the same time float. Using # original_time_str from utils.isotime will occasionally fail due to # floating point rounding differences. time_object = datetime.datetime.fromtimestamp( actual_time_float, datetime.timezone.utc ).replace(tzinfo=None) expected_time_str = utils.isotime(time_object, subsecond=True) actual_time_str = payload_cls._convert_float_to_time_string( actual_time_float ) self.assertEqual(expected_time_str, actual_time_str) def _test_payload(self, payload_class, exp_user_id=None, exp_methods=None): exp_user_id = exp_user_id or uuid.uuid4().hex exp_methods = exp_methods or ['password'] exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True) payload = payload_class.assemble( exp_user_id, exp_methods, exp_expires_at ) (user_id, methods, expires_at) = payload_class.disassemble(payload) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertTimestampsEqual(exp_expires_at, expires_at) def test_payload(self): self._test_payload(receipt_formatters.ReceiptPayload) def test_payload_multiple_methods(self): self._test_payload( receipt_formatters.ReceiptPayload, exp_methods=['password', 'totp'] ) class TestFernetKeyRotation(unit.TestCase): def setUp(self): super().setUp() # A collection of all previously-seen signatures of the key # repository's contents. self.key_repo_signatures = set() @property def keys(self): """Key files converted to numbers.""" return sorted( int(x) for x in os.listdir(CONF.fernet_receipts.key_repository) ) @property def key_repository_size(self): """The number of keys in the key repository.""" return len(self.keys) @property def key_repository_signature(self): """Create a "thumbprint" of the current key repository. Because key files are renamed, this produces a hash of the contents of the key files, ignoring their filenames. The resulting signature can be used, for example, to ensure that you have a unique set of keys after you perform a key rotation (taking a static set of keys, and simply shuffling them, would fail such a test). """ # Load the keys into a list, keys is list of str. key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) keys = key_utils.load_keys() # Sort the list of keys by the keys themselves (they were previously # sorted by filename). keys.sort() # Create the thumbprint using all keys in the repository. signature = hashlib.sha1() for key in keys: # Need to convert key to bytes for update. signature.update(key.encode('utf-8')) return signature.hexdigest() def assertRepositoryState(self, expected_size): """Validate the state of the key repository.""" self.assertEqual(expected_size, self.key_repository_size) self.assertUniqueRepositoryState() def assertUniqueRepositoryState(self): """Ensure that the current key repo state has not been seen before.""" # This is assigned to a variable because it takes some work to # calculate. signature = self.key_repository_signature # Ensure the signature is not in the set of previously seen signatures. self.assertNotIn(signature, self.key_repo_signatures) # Add the signature to the set of repository signatures to validate # that we don't see it again later. self.key_repo_signatures.add(signature) def test_rotation(self): # Initializing a key repository results in this many keys. We don't # support max_active_keys being set any lower. min_active_keys = 2 # Simulate every rotation strategy up to "rotating once a week while # maintaining a year's worth of keys." for max_active_keys in range(min_active_keys, 52 + 1): self.config_fixture.config( group='fernet_receipts', max_active_keys=max_active_keys ) # Ensure that resetting the key repository always results in 2 # active keys. self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_receipts', CONF.fernet_receipts.max_active_keys, ) ) # Validate the initial repository state. self.assertRepositoryState(expected_size=min_active_keys) # The repository should be initialized with a staged key (0) and a # primary key (1). The next key is just auto-incremented. exp_keys = [0, 1] next_key_number = exp_keys[-1] + 1 # keep track of next key self.assertEqual(exp_keys, self.keys) # Rotate the keys just enough times to fully populate the key # repository. key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) for rotation in range(max_active_keys - min_active_keys): key_utils.rotate_keys() self.assertRepositoryState(expected_size=rotation + 3) exp_keys.append(next_key_number) next_key_number += 1 self.assertEqual(exp_keys, self.keys) # We should have a fully populated key repository now. self.assertEqual(max_active_keys, self.key_repository_size) # Rotate an additional number of times to ensure that we maintain # the desired number of active keys. key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) for rotation in range(10): key_utils.rotate_keys() self.assertRepositoryState(expected_size=max_active_keys) exp_keys.pop(1) exp_keys.append(next_key_number) next_key_number += 1 self.assertEqual(exp_keys, self.keys) def test_rotation_disk_write_fail(self): # Make sure that the init key repository contains 2 keys self.assertRepositoryState(expected_size=2) key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) # Simulate the disk full situation mock_open = mock.mock_open() file_handle = mock_open() file_handle.flush.side_effect = IOError('disk full') with mock.patch('keystone.common.fernet_utils.open', mock_open): self.assertRaises(IOError, key_utils.rotate_keys) # Assert that the key repository is unchanged self.assertEqual(self.key_repository_size, 2) with mock.patch('keystone.common.fernet_utils.open', mock_open): self.assertRaises(IOError, key_utils.rotate_keys) # Assert that the key repository is still unchanged, even after # repeated rotation attempts. self.assertEqual(self.key_repository_size, 2) # Rotate the keys normally, without any mocking, to show that the # system can recover. key_utils.rotate_keys() # Assert that the key repository is now expanded. self.assertEqual(self.key_repository_size, 3) def test_rotation_empty_file(self): active_keys = 2 self.assertRepositoryState(expected_size=active_keys) empty_file = os.path.join(CONF.fernet_receipts.key_repository, '2') with open(empty_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) # Rotate the keys to overwrite the empty file key_utils.rotate_keys() self.assertTrue(os.path.isfile(empty_file)) keys = key_utils.load_keys() self.assertEqual(3, len(keys)) self.assertTrue(os.path.getsize(empty_file) > 0) def test_non_numeric_files(self): evil_file = os.path.join(CONF.fernet_receipts.key_repository, '99.bak') with open(evil_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) key_utils.rotate_keys() self.assertTrue(os.path.isfile(evil_file)) keys = 0 for x in os.listdir(CONF.fernet_receipts.key_repository): if x == '99.bak': continue keys += 1 self.assertEqual(3, keys) class TestLoadKeys(unit.TestCase): def assertValidFernetKeys(self, keys): # Make sure each key is a non-empty string for key in keys: self.assertGreater(len(key), 0) self.assertIsInstance(key, str) def test_non_numeric_files(self): evil_file = os.path.join(CONF.fernet_receipts.key_repository, '~1') with open(evil_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) keys = key_utils.load_keys() self.assertEqual(2, len(keys)) self.assertValidFernetKeys(keys) def test_empty_files(self): empty_file = os.path.join(CONF.fernet_receipts.key_repository, '2') with open(empty_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_receipts.key_repository, CONF.fernet_receipts.max_active_keys, 'fernet_receipts', ) keys = key_utils.load_keys() self.assertEqual(2, len(keys)) self.assertValidFernetKeys(keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/receipt/test_receipt_serialization.py0000664000175000017500000000467400000000000027114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from oslo_utils import timeutils from keystone.common.cache import _context_cache from keystone.common import utils as ks_utils from keystone import exception from keystone.models import receipt_model from keystone.tests.unit import base_classes class TestReceiptSerialization(base_classes.TestCaseWithBootstrap): def setUp(self): super().setUp() self.admin_user_id = self.bootstrapper.admin_user_id self.receipt_id = uuid.uuid4().hex issued_at = timeutils.utcnow() self.issued_at = ks_utils.isotime(at=issued_at, subsecond=True) # Reach into the cache registry and pull out an instance of the # _ReceiptModelHandler so that we can interact and test it directly (as # opposed to using PROVIDERS or managers to invoke it). receipt_handler_id = receipt_model._ReceiptModelHandler.identity self.receipt_handler = _context_cache._registry.get(receipt_handler_id) self.exp_receipt = receipt_model.ReceiptModel() self.exp_receipt.user_id = self.admin_user_id self.exp_receipt.mint(self.receipt_id, self.issued_at) def test_serialize_and_deserialize_receipt_model(self): serialized = self.receipt_handler.serialize(self.exp_receipt) receipt = self.receipt_handler.deserialize(serialized) self.assertEqual(self.exp_receipt.user_id, receipt.user_id) self.assertEqual(self.exp_receipt.id, receipt.id) self.assertEqual(self.exp_receipt.issued_at, receipt.issued_at) @mock.patch.object( receipt_model.ReceiptModel, '__init__', side_effect=Exception ) def test_error_handling_in_deserialize(self, handler_mock): serialized = self.receipt_handler.serialize(self.exp_receipt) self.assertRaises( exception.CacheDeserializationError, self.receipt_handler.deserialize, serialized, ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/resource/0000775000175000017500000000000000000000000021274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/resource/__init__.py0000664000175000017500000000000000000000000023373 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/resource/backends/0000775000175000017500000000000000000000000023046 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/resource/backends/__init__.py0000664000175000017500000000000000000000000025145 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/resource/backends/test_sql.py0000664000175000017500000000226600000000000025264 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.resource.backends import sql from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.resource import test_backends class TestSqlResourceDriver( unit.BaseTestCase, test_backends.ResourceDriverTests ): def setUp(self): super().setUp() self.useFixture(database.Database()) self.driver = sql.Resource() root_domain = default_fixtures.ROOT_DOMAIN root_domain['domain_id'] = root_domain['id'] root_domain['is_domain'] = True self.driver.create_project(root_domain['id'], root_domain) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/resource/config_backends/0000775000175000017500000000000000000000000024373 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/resource/config_backends/__init__.py0000664000175000017500000000000000000000000026472 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/resource/config_backends/test_sql.py0000664000175000017500000000371400000000000026610 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.resource.config_backends import sql as config_sql from keystone.tests import unit from keystone.tests.unit.backend import core_sql from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.resource import test_core class SqlDomainConfigModels(core_sql.BaseBackendSqlModels): def test_whitelisted_model(self): cols = ( ('domain_id', sql.String, 64), ('group', sql.String, 255), ('option', sql.String, 255), ('value', sql.JsonBlob, None), ) self.assertExpectedSchema('whitelisted_config', cols) def test_sensitive_model(self): cols = ( ('domain_id', sql.String, 64), ('group', sql.String, 255), ('option', sql.String, 255), ('value', sql.JsonBlob, None), ) self.assertExpectedSchema('sensitive_config', cols) class SqlDomainConfigDriver( unit.BaseTestCase, test_core.DomainConfigDriverTests ): def setUp(self): super().setUp() self.useFixture(database.Database()) self.driver = config_sql.DomainConfig() class SqlDomainConfig( core_sql.BaseBackendSqlTests, test_core.DomainConfigTests ): def setUp(self): super().setUp() # test_core.DomainConfigTests is effectively a mixin class, so make # sure we call its setup test_core.DomainConfigTests.setUp(self) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/resource/test_backends.py0000664000175000017500000025631000000000000024466 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import uuid from testtools import matchers from keystone.common import driver_hints from keystone.common import provider_api from keystone.common.resource_options import options as ro_opt import keystone.conf from keystone import exception from keystone.resource.backends import sql as resource_sql from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import utils as test_utils CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class ResourceTests: domain_count = len(default_fixtures.DOMAINS) def test_get_project(self): project_ref = PROVIDERS.resource_api.get_project( self.project_bar['id'] ) self.assertDictEqual(self.project_bar, project_ref) def test_get_project_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, uuid.uuid4().hex, ) def test_get_project_by_name(self): project_ref = PROVIDERS.resource_api.get_project_by_name( self.project_bar['name'], CONF.identity.default_domain_id ) self.assertDictEqual(self.project_bar, project_ref) @unit.skip_if_no_multiple_domains_support def test_get_project_by_name_for_project_acting_as_a_domain(self): """Test get_project_by_name works when the domain_id is None.""" project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, is_domain=False ) project = PROVIDERS.resource_api.create_project(project['id'], project) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project_by_name, project['name'], None, ) # Test that querying with domain_id as None will find the project # acting as a domain, even if it's name is the same as the regular # project above. project2 = unit.new_project_ref(is_domain=True, name=project['name']) project2 = PROVIDERS.resource_api.create_project( project2['id'], project2 ) project_ref = PROVIDERS.resource_api.get_project_by_name( project2['name'], None ) self.assertEqual(project2, project_ref) def test_get_project_by_name_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project_by_name, uuid.uuid4().hex, CONF.identity.default_domain_id, ) def test_create_duplicate_project_id_fails(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project_id = project['id'] PROVIDERS.resource_api.create_project(project_id, project) project['name'] = 'fake2' self.assertRaises( exception.Conflict, PROVIDERS.resource_api.create_project, project_id, project, ) def test_create_duplicate_project_name_fails(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project_id = project['id'] PROVIDERS.resource_api.create_project(project_id, project) project['id'] = 'fake2' self.assertRaises( exception.Conflict, PROVIDERS.resource_api.create_project, project['id'], project, ) def test_create_project_name_with_trailing_whitespace(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project_id = project['id'] project_name = project['name'] = project['name'] + ' ' project_returned = PROVIDERS.resource_api.create_project( project_id, project ) self.assertEqual(project_id, project_returned['id']) self.assertEqual(project_name.strip(), project_returned['name']) def test_create_duplicate_project_name_in_different_domains(self): new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project2 = unit.new_project_ref( name=project1['name'], domain_id=new_domain['id'] ) PROVIDERS.resource_api.create_project(project1['id'], project1) PROVIDERS.resource_api.create_project(project2['id'], project2) def test_rename_duplicate_project_name_fails(self): project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project1['id'], project1) PROVIDERS.resource_api.create_project(project2['id'], project2) project2['name'] = project1['name'] self.assertRaises( exception.Error, PROVIDERS.resource_api.update_project, project2['id'], project2, ) def test_update_project_id_does_nothing(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project_id = project['id'] PROVIDERS.resource_api.create_project(project['id'], project) project['id'] = 'fake2' PROVIDERS.resource_api.update_project(project_id, project) project_ref = PROVIDERS.resource_api.get_project(project_id) self.assertEqual(project_id, project_ref['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, 'fake2', ) def test_update_project_name_with_trailing_whitespace(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project_id = project['id'] project_create = PROVIDERS.resource_api.create_project( project_id, project ) self.assertEqual(project_id, project_create['id']) project_name = project['name'] = project['name'] + ' ' project_update = PROVIDERS.resource_api.update_project( project_id, project ) self.assertEqual(project_id, project_update['id']) self.assertEqual(project_name.strip(), project_update['name']) def test_delete_domain_with_user_group_project_links(self): # TODO(chungg):add test case once expected behaviour defined pass def test_update_project_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.update_project, uuid.uuid4().hex, dict(), ) def test_delete_project_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.delete_project, uuid.uuid4().hex, ) def test_create_update_delete_unicode_project(self): unicode_project_name = 'name \u540d\u5b57' project = unit.new_project_ref( name=unicode_project_name, domain_id=CONF.identity.default_domain_id, ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.resource_api.update_project(project['id'], project) PROVIDERS.resource_api.delete_project(project['id']) def test_create_project_with_no_enabled_field(self): ref = unit.new_project_ref(domain_id=CONF.identity.default_domain_id) del ref['enabled'] PROVIDERS.resource_api.create_project(ref['id'], ref) project = PROVIDERS.resource_api.get_project(ref['id']) self.assertIs(project['enabled'], True) def test_create_project_long_name_fails(self): project = unit.new_project_ref( name='a' * 65, domain_id=CONF.identity.default_domain_id ) self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.create_project, project['id'], project, ) def test_create_project_invalid_domain_id(self): project = unit.new_project_ref(domain_id=uuid.uuid4().hex) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.create_project, project['id'], project, ) def test_list_domains(self): domain1 = unit.new_domain_ref() domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) PROVIDERS.resource_api.create_domain(domain2['id'], domain2) domains = PROVIDERS.resource_api.list_domains() self.assertEqual(3, len(domains)) domain_ids = [] for domain in domains: domain_ids.append(domain.get('id')) self.assertIn(CONF.identity.default_domain_id, domain_ids) self.assertIn(domain1['id'], domain_ids) self.assertIn(domain2['id'], domain_ids) def test_list_projects(self): project_refs = PROVIDERS.resource_api.list_projects() project_count = len(default_fixtures.PROJECTS) + self.domain_count self.assertEqual(project_count, len(project_refs)) for project in default_fixtures.PROJECTS: self.assertIn(project, project_refs) def test_list_projects_with_multiple_filters(self): # Create a project project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) # Build driver hints with the project's name and inexistent description hints = driver_hints.Hints() hints.add_filter('name', project['name']) hints.add_filter('description', uuid.uuid4().hex) # Retrieve projects based on hints and check an empty list is returned projects = PROVIDERS.resource_api.list_projects(hints) self.assertEqual([], projects) # Build correct driver hints hints = driver_hints.Hints() hints.add_filter('name', project['name']) hints.add_filter('description', project['description']) # Retrieve projects based on hints projects = PROVIDERS.resource_api.list_projects(hints) # Check that the returned list contains only the first project self.assertEqual(1, len(projects)) self.assertEqual(project, projects[0]) def test_list_projects_for_domain(self): project_ids = [ x['id'] for x in PROVIDERS.resource_api.list_projects_in_domain( CONF.identity.default_domain_id ) ] # Only the projects from the default fixtures are expected, since # filtering by domain does not include any project that acts as a # domain. self.assertThat( project_ids, matchers.HasLength(len(default_fixtures.PROJECTS)) ) self.assertIn(self.project_bar['id'], project_ids) self.assertIn(self.project_baz['id'], project_ids) self.assertIn(self.project_mtu['id'], project_ids) self.assertIn(self.project_service['id'], project_ids) @unit.skip_if_no_multiple_domains_support def test_list_projects_acting_as_domain(self): initial_domains = PROVIDERS.resource_api.list_domains() # Creating 5 projects that act as domains new_projects_acting_as_domains = [] for i in range(5): project = unit.new_project_ref(is_domain=True) project = PROVIDERS.resource_api.create_project( project['id'], project ) new_projects_acting_as_domains.append(project) # Creating a few regular project to ensure it doesn't mess with the # ones that act as domains self._create_projects_hierarchy(hierarchy_size=2) projects = PROVIDERS.resource_api.list_projects_acting_as_domain() expected_number_projects = len(initial_domains) + len( new_projects_acting_as_domains ) self.assertEqual(expected_number_projects, len(projects)) for project in new_projects_acting_as_domains: self.assertIn(project, projects) for domain in initial_domains: self.assertIn(domain['id'], [p['id'] for p in projects]) @unit.skip_if_no_multiple_domains_support def test_list_projects_for_alternate_domain(self): domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) project_ids = [ x['id'] for x in PROVIDERS.resource_api.list_projects_in_domain( domain1['id'] ) ] self.assertEqual(2, len(project_ids)) self.assertIn(project1['id'], project_ids) self.assertIn(project2['id'], project_ids) def _create_projects_hierarchy( self, hierarchy_size=2, domain_id=None, is_domain=False, parent_project_id=None, ): """Create a project hierarchy with specified size. :param hierarchy_size: the desired hierarchy size, default is 2 - a project with one child. :param domain_id: domain where the projects hierarchy will be created. :param is_domain: if the hierarchy will have the is_domain flag active or not. :param parent_project_id: if the intention is to create a sub-hierarchy, sets the sub-hierarchy root. Defaults to creating a new hierarchy, i.e. a new root project. :returns projects: a list of the projects in the created hierarchy. """ if domain_id is None: domain_id = CONF.identity.default_domain_id if parent_project_id: project = unit.new_project_ref( parent_id=parent_project_id, domain_id=domain_id, is_domain=is_domain, ) else: project = unit.new_project_ref( domain_id=domain_id, is_domain=is_domain ) project_id = project['id'] project = PROVIDERS.resource_api.create_project(project_id, project) projects = [project] for i in range(1, hierarchy_size): new_project = unit.new_project_ref( parent_id=project_id, domain_id=domain_id ) PROVIDERS.resource_api.create_project( new_project['id'], new_project ) projects.append(new_project) project_id = new_project['id'] return projects @unit.skip_if_no_multiple_domains_support def test_create_domain_with_project_api(self): project = unit.new_project_ref(is_domain=True) ref = PROVIDERS.resource_api.create_project(project['id'], project) self.assertTrue(ref['is_domain']) PROVIDERS.resource_api.get_domain(ref['id']) @unit.skip_if_no_multiple_domains_support def test_project_as_a_domain_uniqueness_constraints(self): """Test project uniqueness for those acting as domains. If it is a project acting as a domain, we can't have two or more with the same name. """ # Create two projects acting as a domain project = unit.new_project_ref(is_domain=True) project = PROVIDERS.resource_api.create_project(project['id'], project) project2 = unit.new_project_ref(is_domain=True) project2 = PROVIDERS.resource_api.create_project( project2['id'], project2 ) # All projects acting as domains have a null domain_id, so should not # be able to create another with the same name but a different # project ID. new_project = project.copy() new_project['id'] = uuid.uuid4().hex self.assertRaises( exception.Conflict, PROVIDERS.resource_api.create_project, new_project['id'], new_project, ) # We also should not be able to update one to have a name clash project2['name'] = project['name'] self.assertRaises( exception.Conflict, PROVIDERS.resource_api.update_project, project2['id'], project2, ) # But updating it to a unique name is OK project2['name'] = uuid.uuid4().hex PROVIDERS.resource_api.update_project(project2['id'], project2) # Finally, it should be OK to create a project with same name as one of # these acting as a domain, as long as it is a regular project project3 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, name=project2['name'] ) PROVIDERS.resource_api.create_project(project3['id'], project3) # In fact, it should be OK to create such a project in the domain which # has the matching name. # TODO(henry-nash): Once we fully support projects acting as a domain, # add a test here to create a sub-project with a name that matches its # project acting as a domain @unit.skip_if_no_multiple_domains_support @test_utils.wip('waiting for sub projects acting as domains support') def test_is_domain_sub_project_has_parent_domain_id(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, is_domain=True ) PROVIDERS.resource_api.create_project(project['id'], project) sub_project = unit.new_project_ref( domain_id=project['id'], parent_id=project['id'], is_domain=True ) ref = PROVIDERS.resource_api.create_project( sub_project['id'], sub_project ) self.assertTrue(ref['is_domain']) self.assertEqual(project['id'], ref['parent_id']) self.assertEqual(project['id'], ref['domain_id']) @unit.skip_if_no_multiple_domains_support def test_delete_domain_with_project_api(self): project = unit.new_project_ref(domain_id=None, is_domain=True) PROVIDERS.resource_api.create_project(project['id'], project) # Check that a corresponding domain was created PROVIDERS.resource_api.get_domain(project['id']) # Try to delete the enabled project that acts as a domain self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.delete_project, project['id'], ) # Disable the project project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) # Successfully delete the project PROVIDERS.resource_api.delete_project(project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, project['id'], ) @unit.skip_if_no_multiple_domains_support def test_create_subproject_acting_as_domain_fails(self): root_project = unit.new_project_ref(is_domain=True) PROVIDERS.resource_api.create_project(root_project['id'], root_project) sub_project = unit.new_project_ref( is_domain=True, parent_id=root_project['id'] ) # Creation of sub projects acting as domains is not allowed yet self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.create_project, sub_project['id'], sub_project, ) @unit.skip_if_no_multiple_domains_support def test_create_domain_under_regular_project_hierarchy_fails(self): # Projects acting as domains can't have a regular project as parent projects_hierarchy = self._create_projects_hierarchy() parent = projects_hierarchy[1] project = unit.new_project_ref( domain_id=parent['id'], parent_id=parent['id'], is_domain=True ) self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.create_project, project['id'], project, ) @unit.skip_if_no_multiple_domains_support @test_utils.wip('waiting for sub projects acting as domains support') def test_create_project_under_domain_hierarchy(self): projects_hierarchy = self._create_projects_hierarchy(is_domain=True) parent = projects_hierarchy[1] project = unit.new_project_ref( domain_id=parent['id'], parent_id=parent['id'], is_domain=False ) ref = PROVIDERS.resource_api.create_project(project['id'], project) self.assertFalse(ref['is_domain']) self.assertEqual(parent['id'], ref['parent_id']) self.assertEqual(parent['id'], ref['domain_id']) def test_create_project_without_is_domain_flag(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) del project['is_domain'] ref = PROVIDERS.resource_api.create_project(project['id'], project) # The is_domain flag should be False by default self.assertFalse(ref['is_domain']) @unit.skip_if_no_multiple_domains_support def test_create_project_passing_is_domain_flag_true(self): project = unit.new_project_ref(is_domain=True) ref = PROVIDERS.resource_api.create_project(project['id'], project) self.assertTrue(ref['is_domain']) def test_create_project_passing_is_domain_flag_false(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, is_domain=False ) ref = PROVIDERS.resource_api.create_project(project['id'], project) self.assertIs(False, ref['is_domain']) @test_utils.wip('waiting for support for parent_id to imply domain_id') def test_create_project_with_parent_id_and_without_domain_id(self): # First create a domain project = unit.new_project_ref(is_domain=True) PROVIDERS.resource_api.create_project(project['id'], project) # Now create a child by just naming the parent_id sub_project = unit.new_project_ref(parent_id=project['id']) ref = PROVIDERS.resource_api.create_project( sub_project['id'], sub_project ) # The domain_id should be set to the parent domain_id self.assertEqual(project['domain_id'], ref['domain_id']) def test_create_project_with_domain_id_and_without_parent_id(self): # First create a domain project = unit.new_project_ref(is_domain=True) PROVIDERS.resource_api.create_project(project['id'], project) # Now create a child by just naming the domain_id sub_project = unit.new_project_ref(domain_id=project['id']) ref = PROVIDERS.resource_api.create_project( sub_project['id'], sub_project ) # The parent_id and domain_id should be set to the id of the project # acting as a domain self.assertEqual(project['id'], ref['parent_id']) self.assertEqual(project['id'], ref['domain_id']) def test_create_project_with_domain_id_mismatch_to_parent_domain(self): # First create a domain project = unit.new_project_ref(is_domain=True) PROVIDERS.resource_api.create_project(project['id'], project) # Now try to create a child with the above as its parent, but # specifying a different domain. sub_project = unit.new_project_ref( parent_id=project['id'], domain_id=CONF.identity.default_domain_id ) self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.create_project, sub_project['id'], sub_project, ) def test_check_leaf_projects(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[1] self.assertFalse( PROVIDERS.resource_api.is_leaf_project(root_project['id']) ) self.assertTrue( PROVIDERS.resource_api.is_leaf_project(leaf_project['id']) ) # Delete leaf_project PROVIDERS.resource_api.delete_project(leaf_project['id']) # Now, root_project should be leaf self.assertTrue( PROVIDERS.resource_api.is_leaf_project(root_project['id']) ) def test_list_projects_in_subtree(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) project1 = projects_hierarchy[0] project2 = projects_hierarchy[1] project3 = projects_hierarchy[2] project4 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project2['id'] ) PROVIDERS.resource_api.create_project(project4['id'], project4) subtree = PROVIDERS.resource_api.list_projects_in_subtree( project1['id'] ) self.assertEqual(3, len(subtree)) self.assertIn(project2, subtree) self.assertIn(project3, subtree) self.assertIn(project4, subtree) subtree = PROVIDERS.resource_api.list_projects_in_subtree( project2['id'] ) self.assertEqual(2, len(subtree)) self.assertIn(project3, subtree) self.assertIn(project4, subtree) subtree = PROVIDERS.resource_api.list_projects_in_subtree( project3['id'] ) self.assertEqual(0, len(subtree)) def test_get_projects_in_subtree_as_ids_with_large_tree(self): """Check project hierarchy is returned correctly in large tree. With a large hierarchy we need to enforce the projects are returned in the correct order (illustrated below). Tree we will create:: +------p1------+ | | +---p3---+ +-p2-+ | | | | p7 +-p6-+ p5 p4 | | | p10 p9 p8 | p11 """ # Create large project hierarchy, of above depiction p1, p2, p4 = self._create_projects_hierarchy(hierarchy_size=3) p5 = self._create_projects_hierarchy( hierarchy_size=1, parent_project_id=p2['id'] )[0] p3, p6, p8 = self._create_projects_hierarchy( hierarchy_size=3, parent_project_id=p1['id'] ) p9, p11 = self._create_projects_hierarchy( hierarchy_size=2, parent_project_id=p6['id'] ) p7, p10 = self._create_projects_hierarchy( hierarchy_size=2, parent_project_id=p3['id'] ) expected_projects = { p2['id']: {p5['id']: None, p4['id']: None}, p3['id']: { p7['id']: {p10['id']: None}, p6['id']: {p9['id']: {p11['id']: None}, p8['id']: None}, }, } prjs_hierarchy = PROVIDERS.resource_api.get_projects_in_subtree_as_ids( p1['id'] ) self.assertDictEqual(expected_projects, prjs_hierarchy) def test_list_projects_in_subtree_with_circular_reference(self): project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project1 = PROVIDERS.resource_api.create_project( project1['id'], project1 ) project2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project1['id'] ) PROVIDERS.resource_api.create_project(project2['id'], project2) project1['parent_id'] = project2['id'] # Adds cyclic reference # NOTE(dstanek): The manager does not allow parent_id to be updated. # Instead will directly use the driver to create the cyclic # reference. PROVIDERS.resource_api.driver.update_project(project1['id'], project1) subtree = PROVIDERS.resource_api.list_projects_in_subtree( project1['id'] ) # NOTE(dstanek): If a cyclic reference is detected the code bails # and returns None instead of falling into the infinite # recursion trap. self.assertIsNone(subtree) def test_list_projects_in_subtree_invalid_project_id(self): self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.list_projects_in_subtree, None, ) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.list_projects_in_subtree, uuid.uuid4().hex, ) def test_list_project_parents(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) project1 = projects_hierarchy[0] project2 = projects_hierarchy[1] project3 = projects_hierarchy[2] project4 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project2['id'] ) PROVIDERS.resource_api.create_project(project4['id'], project4) parents1 = PROVIDERS.resource_api.list_project_parents(project3['id']) self.assertEqual(3, len(parents1)) self.assertIn(project1, parents1) self.assertIn(project2, parents1) parents2 = PROVIDERS.resource_api.list_project_parents(project4['id']) self.assertEqual(parents1, parents2) parents = PROVIDERS.resource_api.list_project_parents(project1['id']) # It has the default domain as parent self.assertEqual(1, len(parents)) def test_update_project_enabled_cascade(self): """Test update_project_cascade. Ensures the enabled attribute is correctly updated across a simple 3-level projects hierarchy. """ projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) parent = projects_hierarchy[0] # Disable in parent project disables the whole subtree parent['enabled'] = False # Store the ref from backend in another variable so we don't bother # to remove other attributes that were not originally provided and # were set in the manager, like parent_id and domain_id. parent_ref = PROVIDERS.resource_api.update_project( parent['id'], parent, cascade=True ) subtree = PROVIDERS.resource_api.list_projects_in_subtree(parent['id']) self.assertEqual(2, len(subtree)) self.assertFalse(parent_ref['enabled']) self.assertFalse(subtree[0]['enabled']) self.assertFalse(subtree[1]['enabled']) # Enable parent project enables the whole subtree parent['enabled'] = True parent_ref = PROVIDERS.resource_api.update_project( parent['id'], parent, cascade=True ) subtree = PROVIDERS.resource_api.list_projects_in_subtree(parent['id']) self.assertEqual(2, len(subtree)) self.assertTrue(parent_ref['enabled']) self.assertTrue(subtree[0]['enabled']) self.assertTrue(subtree[1]['enabled']) def test_cannot_enable_cascade_with_parent_disabled(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) grandparent = projects_hierarchy[0] parent = projects_hierarchy[1] grandparent['enabled'] = False PROVIDERS.resource_api.update_project( grandparent['id'], grandparent, cascade=True ) subtree = PROVIDERS.resource_api.list_projects_in_subtree(parent['id']) self.assertFalse(subtree[0]['enabled']) parent['enabled'] = True self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.update_project, parent['id'], parent, cascade=True, ) def test_update_cascade_only_accepts_enabled(self): # Update cascade does not accept any other attribute but 'enabled' new_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(new_project['id'], new_project) new_project['name'] = 'project1' self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.update_project, new_project['id'], new_project, cascade=True, ) def test_list_project_parents_invalid_project_id(self): self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.list_project_parents, None, ) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.list_project_parents, uuid.uuid4().hex, ) def test_create_project_doesnt_modify_passed_in_dict(self): new_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) original_project = new_project.copy() PROVIDERS.resource_api.create_project(new_project['id'], new_project) self.assertDictEqual(original_project, new_project) def test_update_project_enable(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue(project_ref['enabled']) project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertEqual(project['enabled'], project_ref['enabled']) # If not present, enabled field should not be updated del project['enabled'] PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertFalse(project_ref['enabled']) project['enabled'] = True PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertEqual(project['enabled'], project_ref['enabled']) del project['enabled'] PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue(project_ref['enabled']) def test_create_invalid_domain_fails(self): new_group = unit.new_group_ref(domain_id="doesnotexist") self.assertRaises( exception.DomainNotFound, PROVIDERS.identity_api.create_group, new_group, ) new_user = unit.new_user_ref(domain_id="doesnotexist") self.assertRaises( exception.DomainNotFound, PROVIDERS.identity_api.create_user, new_user, ) @unit.skip_if_no_multiple_domains_support def test_project_crud(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertLessEqual(project.items(), project_ref.items()) project['name'] = uuid.uuid4().hex PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertLessEqual(project.items(), project_ref.items()) PROVIDERS.resource_api.delete_project(project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) def test_domain_delete_hierarchy(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Creating a root and a leaf project inside the domain projects_hierarchy = self._create_projects_hierarchy( domain_id=domain['id'] ) root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[0] # Disable the domain domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) # Delete the domain PROVIDERS.resource_api.delete_domain(domain['id']) # Make sure the domain no longer exists self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, domain['id'], ) # Make sure the root project no longer exists self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, root_project['id'], ) # Make sure the leaf project no longer exists self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, leaf_project['id'], ) def test_delete_projects_from_ids(self): """Test the resource backend call delete_projects_from_ids. Tests the normal flow of the delete_projects_from_ids backend call, that ensures no project on the list exists after it is successfully called. """ project1_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project2_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) projects = (project1_ref, project2_ref) for project in projects: PROVIDERS.resource_api.create_project(project['id'], project) # Setting up the ID's list projects_ids = [p['id'] for p in projects] PROVIDERS.resource_api.driver.delete_projects_from_ids(projects_ids) # Ensuring projects no longer exist at backend level for project_id in projects_ids: self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.driver.get_project, project_id, ) # Passing an empty list is silently ignored PROVIDERS.resource_api.driver.delete_projects_from_ids([]) def test_delete_projects_from_ids_with_no_existing_project_id(self): """Test delete_projects_from_ids issues warning if not found. Tests the resource backend call delete_projects_from_ids passing a non existing ID in project_ids, which is logged and ignored by the backend. """ project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) # Setting up the ID's list projects_ids = (project_ref['id'], uuid.uuid4().hex) with mock.patch('keystone.resource.backends.sql.LOG') as mock_log: PROVIDERS.resource_api.delete_projects_from_ids(projects_ids) self.assertTrue(mock_log.warning.called) # The existing project was deleted. self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.driver.get_project, project_ref['id'], ) # Even if we only have one project, and it does not exist, it returns # no error. PROVIDERS.resource_api.driver.delete_projects_from_ids( [uuid.uuid4().hex] ) def test_delete_project_cascade(self): # create a hierarchy with 3 levels projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) root_project = projects_hierarchy[0] project1 = projects_hierarchy[1] project2 = projects_hierarchy[2] # Disabling all projects before attempting to delete for project in (project2, project1, root_project): project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) PROVIDERS.resource_api.delete_project(root_project['id'], cascade=True) for project in projects_hierarchy: self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) def test_delete_large_project_cascade(self): """Try delete a large project with cascade true. Tree we will create:: +-p1-+ | | p5 p2 | | p6 +-p3-+ | | p7 p4 """ # create a hierarchy with 4 levels projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=4) p1 = projects_hierarchy[0] # Add the left branch to the hierarchy (p5, p6) self._create_projects_hierarchy( hierarchy_size=2, parent_project_id=p1['id'] ) # Add p7 to the hierarchy p3_id = projects_hierarchy[2]['id'] self._create_projects_hierarchy( hierarchy_size=1, parent_project_id=p3_id ) # Reverse the hierarchy to disable the leaf first prjs_hierarchy = ( [p1] + PROVIDERS.resource_api.list_projects_in_subtree(p1['id']) )[::-1] # Disabling all projects before attempting to delete for project in prjs_hierarchy: project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) PROVIDERS.resource_api.delete_project(p1['id'], cascade=True) for project in prjs_hierarchy: self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) def test_cannot_delete_project_cascade_with_enabled_child(self): # create a hierarchy with 3 levels projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) root_project = projects_hierarchy[0] project1 = projects_hierarchy[1] project2 = projects_hierarchy[2] project2['enabled'] = False PROVIDERS.resource_api.update_project(project2['id'], project2) # Cannot cascade delete root_project, since project1 is enabled self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.delete_project, root_project['id'], cascade=True, ) # Ensuring no project was deleted, not even project2 PROVIDERS.resource_api.get_project(root_project['id']) PROVIDERS.resource_api.get_project(project1['id']) PROVIDERS.resource_api.get_project(project2['id']) def test_hierarchical_projects_crud(self): # create a hierarchy with just a root project (which is a leaf as well) projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=1) root_project1 = projects_hierarchy[0] # create a hierarchy with one root project and one leaf project projects_hierarchy = self._create_projects_hierarchy() root_project2 = projects_hierarchy[0] leaf_project = projects_hierarchy[1] # update description from leaf_project leaf_project['description'] = 'new description' PROVIDERS.resource_api.update_project(leaf_project['id'], leaf_project) proj_ref = PROVIDERS.resource_api.get_project(leaf_project['id']) self.assertDictEqual(leaf_project, proj_ref) # update the parent_id is not allowed leaf_project['parent_id'] = root_project1['id'] self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.update_project, leaf_project['id'], leaf_project, ) # delete root_project1 PROVIDERS.resource_api.delete_project(root_project1['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, root_project1['id'], ) # delete root_project2 is not allowed since it is not a leaf project self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.delete_project, root_project2['id'], ) def test_create_project_with_invalid_parent(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id='fake' ) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.create_project, project['id'], project, ) @unit.skip_if_no_multiple_domains_support def test_create_leaf_project_with_different_domain(self): root_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(root_project['id'], root_project) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) leaf_project = unit.new_project_ref( domain_id=domain['id'], parent_id=root_project['id'] ) self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.create_project, leaf_project['id'], leaf_project, ) def test_delete_hierarchical_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[1] PROVIDERS.resource_api.delete_project(leaf_project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, leaf_project['id'], ) PROVIDERS.resource_api.delete_project(root_project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, root_project['id'], ) def test_delete_hierarchical_not_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.delete_project, root_project['id'], ) def test_update_project_parent(self): projects_hierarchy = self._create_projects_hierarchy(hierarchy_size=3) project1 = projects_hierarchy[0] project2 = projects_hierarchy[1] project3 = projects_hierarchy[2] # project2 is the parent from project3 self.assertEqual(project3.get('parent_id'), project2['id']) # try to update project3 parent to parent1 project3['parent_id'] = project1['id'] self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.update_project, project3['id'], project3, ) def test_create_project_under_disabled_one(self): project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, enabled=False ) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project1['id'] ) # It's not possible to create a project under a disabled one in the # hierarchy self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.create_project, project2['id'], project2, ) def test_disable_hierarchical_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() leaf_project = projects_hierarchy[1] leaf_project['enabled'] = False PROVIDERS.resource_api.update_project(leaf_project['id'], leaf_project) project_ref = PROVIDERS.resource_api.get_project(leaf_project['id']) self.assertEqual(leaf_project['enabled'], project_ref['enabled']) def test_disable_hierarchical_not_leaf_project(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] root_project['enabled'] = False self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.update_project, root_project['id'], root_project, ) def test_enable_project_with_disabled_parent(self): projects_hierarchy = self._create_projects_hierarchy() root_project = projects_hierarchy[0] leaf_project = projects_hierarchy[1] # Disable leaf and root leaf_project['enabled'] = False PROVIDERS.resource_api.update_project(leaf_project['id'], leaf_project) root_project['enabled'] = False PROVIDERS.resource_api.update_project(root_project['id'], root_project) # Try to enable the leaf project, it's not possible since it has # a disabled parent leaf_project['enabled'] = True self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.update_project, leaf_project['id'], leaf_project, ) def _get_hierarchy_depth(self, project_id): return len(PROVIDERS.resource_api.list_project_parents(project_id)) + 1 def test_check_hierarchy_depth(self): # Should be allowed to have a hierarchy of the max depth specified # in the config option plus one (to allow for the additional project # acting as a domain after an upgrade) projects_hierarchy = self._create_projects_hierarchy( CONF.max_project_tree_depth ) leaf_project = projects_hierarchy[CONF.max_project_tree_depth - 1] depth = self._get_hierarchy_depth(leaf_project['id']) self.assertEqual(CONF.max_project_tree_depth + 1, depth) # Creating another project in the hierarchy shouldn't be allowed project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=leaf_project['id'], ) self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.create_project, project['id'], project, ) def test_project_update_missing_attrs_with_a_value(self): # Creating a project with no description attribute. project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) del project['description'] project = PROVIDERS.resource_api.create_project(project['id'], project) # Add a description attribute. project['description'] = uuid.uuid4().hex PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) def test_project_update_missing_attrs_with_a_falsey_value(self): # Creating a project with no description attribute. project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) del project['description'] project = PROVIDERS.resource_api.create_project(project['id'], project) # Add a description attribute. project['description'] = '' PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) def test_domain_crud(self): domain = unit.new_domain_ref() domain_ref = PROVIDERS.resource_api.create_domain(domain['id'], domain) self.assertDictEqual(domain, domain_ref) domain_ref = PROVIDERS.resource_api.get_domain(domain['id']) self.assertDictEqual(domain, domain_ref) domain['name'] = uuid.uuid4().hex domain_ref = PROVIDERS.resource_api.update_domain(domain['id'], domain) self.assertDictEqual(domain, domain_ref) domain_ref = PROVIDERS.resource_api.get_domain(domain['id']) self.assertDictEqual(domain, domain_ref) # Ensure an 'enabled' domain cannot be deleted self.assertRaises( exception.ForbiddenNotSecurity, PROVIDERS.resource_api.delete_domain, domain_id=domain['id'], ) # Disable the domain domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) # Delete the domain PROVIDERS.resource_api.delete_domain(domain['id']) # Make sure the domain no longer exists self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, domain['id'], ) @unit.skip_if_no_multiple_domains_support def test_delete_domain_call_db_time(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Disable the domain domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) domain_ref = PROVIDERS.resource_api.get_project(domain['id']) with mock.patch.object( resource_sql.Resource, "get_project" ) as mock_get_project: mock_get_project.return_value = domain_ref # Delete the domain PROVIDERS.resource_api.delete_domain(domain['id']) self.assertEqual(mock_get_project.call_count, 1) @unit.skip_if_no_multiple_domains_support def test_domain_name_case_sensitivity(self): # create a ref with a lowercase name domain_name = 'test_domain' ref = unit.new_domain_ref(name=domain_name) lower_case_domain = PROVIDERS.resource_api.create_domain( ref['id'], ref ) # assign a new ID to the ref with the same name, but in uppercase ref['id'] = uuid.uuid4().hex ref['name'] = domain_name.upper() upper_case_domain = PROVIDERS.resource_api.create_domain( ref['id'], ref ) # We can get each domain by name lower_case_domain_ref = PROVIDERS.resource_api.get_domain_by_name( domain_name ) self.assertDictEqual(lower_case_domain, lower_case_domain_ref) upper_case_domain_ref = PROVIDERS.resource_api.get_domain_by_name( domain_name.upper() ) self.assertDictEqual(upper_case_domain, upper_case_domain_ref) def test_project_attribute_update(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project['id'], project) # pick a key known to be non-existent key = 'description' def assert_key_equals(value): project_ref = PROVIDERS.resource_api.update_project( project['id'], project ) self.assertEqual(value, project_ref[key]) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertEqual(value, project_ref[key]) def assert_get_key_is(value): project_ref = PROVIDERS.resource_api.update_project( project['id'], project ) self.assertIs(project_ref.get(key), value) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertIs(project_ref.get(key), value) # add an attribute that doesn't exist, set it to a falsey value value = '' project[key] = value assert_key_equals(value) # set an attribute with a falsey value to null value = None project[key] = value assert_get_key_is(value) # do it again, in case updating from this situation is handled oddly value = None project[key] = value assert_get_key_is(value) # set a possibly-null value to a falsey value value = '' project[key] = value assert_key_equals(value) # set a falsey value to a truthy value value = uuid.uuid4().hex project[key] = value assert_key_equals(value) @unit.skip_if_cache_disabled('resource') @unit.skip_if_no_multiple_domains_support def test_domain_rename_invalidates_get_domain_by_name_cache(self): domain = unit.new_domain_ref() domain_id = domain['id'] domain_name = domain['name'] PROVIDERS.resource_api.create_domain(domain_id, domain) domain_ref = PROVIDERS.resource_api.get_domain_by_name(domain_name) domain_ref['name'] = uuid.uuid4().hex PROVIDERS.resource_api.update_domain(domain_id, domain_ref) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain_by_name, domain_name, ) @unit.skip_if_cache_disabled('resource') def test_cache_layer_domain_crud(self): domain = unit.new_domain_ref() domain_id = domain['id'] # Create Domain PROVIDERS.resource_api.create_domain(domain_id, domain) project_domain_ref = PROVIDERS.resource_api.get_project(domain_id) domain_ref = PROVIDERS.resource_api.get_domain(domain_id) updated_project_domain_ref = copy.deepcopy(project_domain_ref) updated_project_domain_ref['name'] = uuid.uuid4().hex updated_domain_ref = copy.deepcopy(domain_ref) updated_domain_ref['name'] = updated_project_domain_ref['name'] # Update domain, bypassing resource api manager PROVIDERS.resource_api.driver.update_project( domain_id, updated_project_domain_ref ) # Verify get_domain still returns the domain self.assertLessEqual( domain_ref.items(), PROVIDERS.resource_api.get_domain(domain_id).items(), ) # Invalidate cache PROVIDERS.resource_api.get_domain.invalidate( PROVIDERS.resource_api, domain_id ) # Verify get_domain returns the updated domain self.assertLessEqual( updated_domain_ref.items(), PROVIDERS.resource_api.get_domain(domain_id).items(), ) # Update the domain back to original ref, using the assignment api # manager PROVIDERS.resource_api.update_domain(domain_id, domain_ref) self.assertLessEqual( domain_ref.items(), PROVIDERS.resource_api.get_domain(domain_id).items(), ) # Make sure domain is 'disabled', bypass resource api manager project_domain_ref_disabled = project_domain_ref.copy() project_domain_ref_disabled['enabled'] = False PROVIDERS.resource_api.driver.update_project( domain_id, project_domain_ref_disabled ) PROVIDERS.resource_api.driver.update_project( domain_id, {'enabled': False} ) # Delete domain, bypassing resource api manager PROVIDERS.resource_api.driver.delete_project(domain_id) # Verify get_domain still returns the domain self.assertLessEqual( domain_ref.items(), PROVIDERS.resource_api.get_domain(domain_id).items(), ) # Invalidate cache PROVIDERS.resource_api.get_domain.invalidate( PROVIDERS.resource_api, domain_id ) # Verify get_domain now raises DomainNotFound self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, domain_id, ) # Recreate Domain PROVIDERS.resource_api.create_domain(domain_id, domain) PROVIDERS.resource_api.get_domain(domain_id) # Make sure domain is 'disabled', bypass resource api manager domain['enabled'] = False PROVIDERS.resource_api.driver.update_project(domain_id, domain) PROVIDERS.resource_api.driver.update_project( domain_id, {'enabled': False} ) # Delete domain PROVIDERS.resource_api.delete_domain(domain_id) # verify DomainNotFound raised self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, domain_id, ) @unit.skip_if_cache_disabled('resource') @unit.skip_if_no_multiple_domains_support def test_project_rename_invalidates_get_project_by_name_cache(self): domain = unit.new_domain_ref() project = unit.new_project_ref(domain_id=domain['id']) project_id = project['id'] project_name = project['name'] PROVIDERS.resource_api.create_domain(domain['id'], domain) # Create a project PROVIDERS.resource_api.create_project(project_id, project) PROVIDERS.resource_api.get_project_by_name(project_name, domain['id']) project['name'] = uuid.uuid4().hex PROVIDERS.resource_api.update_project(project_id, project) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project_by_name, project_name, domain['id'], ) @unit.skip_if_cache_disabled('resource') @unit.skip_if_no_multiple_domains_support def test_cache_layer_project_crud(self): domain = unit.new_domain_ref() project = unit.new_project_ref(domain_id=domain['id']) project_id = project['id'] PROVIDERS.resource_api.create_domain(domain['id'], domain) # Create a project PROVIDERS.resource_api.create_project(project_id, project) PROVIDERS.resource_api.get_project(project_id) updated_project = copy.deepcopy(project) updated_project['name'] = uuid.uuid4().hex # Update project, bypassing resource manager PROVIDERS.resource_api.driver.update_project( project_id, updated_project ) # Verify get_project still returns the original project_ref self.assertLessEqual( project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Invalidate cache PROVIDERS.resource_api.get_project.invalidate( PROVIDERS.resource_api, project_id ) # Verify get_project now returns the new project self.assertLessEqual( updated_project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Update project using the resource_api manager back to original PROVIDERS.resource_api.update_project(project['id'], project) # Verify get_project returns the original project_ref self.assertLessEqual( project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Delete project bypassing resource PROVIDERS.resource_api.driver.delete_project(project_id) # Verify get_project still returns the project_ref self.assertLessEqual( project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Invalidate cache PROVIDERS.resource_api.get_project.invalidate( PROVIDERS.resource_api, project_id ) # Verify ProjectNotFound now raised self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project_id, ) # recreate project PROVIDERS.resource_api.create_project(project_id, project) PROVIDERS.resource_api.get_project(project_id) # delete project PROVIDERS.resource_api.delete_project(project_id) # Verify ProjectNotFound is raised self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project_id, ) @unit.skip_if_no_multiple_domains_support def test_get_default_domain_by_name(self): domain_name = 'default' domain = unit.new_domain_ref(name=domain_name) PROVIDERS.resource_api.create_domain(domain['id'], domain) domain_ref = PROVIDERS.resource_api.get_domain_by_name(domain_name) self.assertEqual(domain, domain_ref) def test_get_not_default_domain_by_name(self): domain_name = 'foo' self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain_by_name, domain_name, ) def test_project_update_and_project_get_return_same_response(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project['id'], project) updated_project = {'enabled': False} updated_project_ref = PROVIDERS.resource_api.update_project( project['id'], updated_project ) # SQL backend adds 'extra' field updated_project_ref.pop('extra', None) self.assertIs(False, updated_project_ref['enabled']) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(updated_project_ref, project_ref) def test_delete_project_clears_default_project_id(self): self.config_fixture.config(group='cache', enabled=False) project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, project_id=project['id'] ) PROVIDERS.resource_api.create_project(project['id'], project) user = PROVIDERS.identity_api.create_user(user) user = PROVIDERS.identity_api.get_user(user['id']) # LDAP is read only default_project_id doesn't exist if 'default_project_id' in user: self.assertIsNotNone(user['default_project_id']) PROVIDERS.resource_api.delete_project(project['id']) user = PROVIDERS.identity_api.get_user(user['id']) self.assertNotIn('default_project_id', user) def test_delete_project_with_roles_clears_default_project_id(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) user = unit.new_user_ref( domain_id=CONF.identity.default_domain_id, project_id=project['id'] ) PROVIDERS.resource_api.create_project(project['id'], project) user = PROVIDERS.identity_api.create_user(user) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) PROVIDERS.assignment_api.create_grant( user_id=user['id'], project_id=project['id'], role_id=role['id'] ) PROVIDERS.resource_api.delete_project(project['id']) user = PROVIDERS.identity_api.get_user(user['id']) self.assertNotIn('default_project_id', user) def _create_project_and_tags(self, num_of_tags=1): """Create a project and tags associated to that project. :param num_of_tags: the desired number of tags attached to a project, default is 1. :returns: A tuple of a new project and a list of random tags """ tags = [uuid.uuid4().hex for i in range(num_of_tags)] ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, tags=tags ) project = PROVIDERS.resource_api.create_project(ref['id'], ref) return project, tags def test_create_project_with_tags(self): project, tags = self._create_project_and_tags(num_of_tags=5) tag_ref = PROVIDERS.resource_api.get_project_tag( project['id'], tags[0] ) self.assertEqual(tags[0], tag_ref) def test_get_project_contains_tags(self): project, _ = self._create_project_and_tags() tag = uuid.uuid4().hex PROVIDERS.resource_api.create_project_tag(project['id'], tag) ref = PROVIDERS.resource_api.get_project(project['id']) self.assertIn(tag, ref['tags']) def test_list_project_tags(self): project, tags = self._create_project_and_tags(num_of_tags=1) tag_ref = PROVIDERS.resource_api.list_project_tags(project['id']) self.assertEqual(tags[0], tag_ref[0]) def test_list_project_tags_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.list_project_tags, uuid.uuid4().hex, ) def test_get_project_tag(self): project, tags = self._create_project_and_tags() tag_ref = PROVIDERS.resource_api.get_project_tag( project['id'], tags[0] ) self.assertEqual(tags[0], tag_ref) def test_create_project_tag_with_trailing_whitespace(self): project, _ = self._create_project_and_tags() tag = uuid.uuid4().hex + ' ' tag_ref = PROVIDERS.resource_api.create_project_tag(project['id'], tag) self.assertEqual(tag.strip(), tag_ref) def test_create_project_tag_is_case_sensitive(self): project, _ = self._create_project_and_tags() new_tags = ['aaa', 'AAA'] ref = PROVIDERS.resource_api.update_project_tags( project['id'], new_tags ) for tag in new_tags: self.assertIn(tag, ref) def test_update_project_tags(self): project, tags = self._create_project_and_tags(num_of_tags=2) project_tag_ref = PROVIDERS.resource_api.list_project_tags( project['id'] ) self.assertEqual(len(project_tag_ref), 2) # Update project to only have one tag tags = ['one'] PROVIDERS.resource_api.update_project_tags(project['id'], tags) project_tag_ref = PROVIDERS.resource_api.list_project_tags( project['id'] ) self.assertEqual(len(project_tag_ref), 1) def test_update_project_tags_returns_not_found(self): _, tags = self._create_project_and_tags(num_of_tags=2) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.update_project_tags, uuid.uuid4().hex, tags, ) def test_delete_tag_from_project(self): project, tags = self._create_project_and_tags(num_of_tags=2) tag_to_delete = tags[-1] PROVIDERS.resource_api.delete_project_tag(project['id'], tag_to_delete) project_tag_ref = PROVIDERS.resource_api.list_project_tags( project['id'] ) self.assertEqual(len(project_tag_ref), 1) self.assertEqual(project_tag_ref[0], tags[0]) def test_delete_project_tag_returns_not_found(self): self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.delete_project_tag, uuid.uuid4().hex, uuid.uuid4().hex, ) def test_delete_project_tags(self): project, tags = self._create_project_and_tags(num_of_tags=5) project_tag_ref = PROVIDERS.resource_api.list_project_tags( project['id'] ) self.assertEqual(len(project_tag_ref), 5) PROVIDERS.resource_api.update_project_tags(project['id'], []) project_tag_ref = PROVIDERS.resource_api.list_project_tags( project['id'] ) self.assertEqual(project_tag_ref, []) def test_create_project_immutable(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project['options'][ro_opt.IMMUTABLE_OPT.option_name] = True p_created = PROVIDERS.resource_api.create_project( project['id'], project ) project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue('options' in p_created) self.assertTrue('options' in project_via_manager) self.assertTrue( project_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) self.assertTrue(p_created['options'][ro_opt.IMMUTABLE_OPT.option_name]) def test_cannot_update_immutable_project(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.resource_api.create_project(project['id'], project) update_project = {'name': uuid.uuid4().hex} self.assertRaises( exception.ResourceUpdateForbidden, PROVIDERS.resource_api.update_project, project['id'], update_project, ) def test_cannot_update_immutable_project_while_unsetting_immutable(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.resource_api.create_project(project['id'], project) update_project = { 'name': uuid.uuid4().hex, 'options': {ro_opt.IMMUTABLE_OPT.option_name: True}, } self.assertRaises( exception.ResourceUpdateForbidden, PROVIDERS.resource_api.update_project, project['id'], update_project, ) def test_cannot_delete_immutable_project(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.resource_api.create_project(project['id'], project) self.assertRaises( exception.ResourceDeleteForbidden, PROVIDERS.resource_api.delete_project, project['id'], ) def test_update_project_set_immutable(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project['id'], project) update_project = {'options': {ro_opt.IMMUTABLE_OPT.option_name: True}} project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue('options' in project_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in project_via_manager['options'] ) p_update = PROVIDERS.resource_api.update_project( project['id'], update_project ) project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in p_update['options'] ) self.assertTrue(p_update['options'][ro_opt.IMMUTABLE_OPT.option_name]) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in project_via_manager['options'] ) self.assertTrue( project_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) def test_update_project_set_immutable_with_additional_updates(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project['id'], project) update_project = { 'name': uuid.uuid4().hex, 'options': {ro_opt.IMMUTABLE_OPT.option_name: True}, } project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue('options' in project_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in project_via_manager['options'] ) p_update = PROVIDERS.resource_api.update_project( project['id'], update_project ) project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertEqual(p_update['name'], update_project['name']) self.assertEqual(project_via_manager['name'], update_project['name']) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in p_update['options'] ) self.assertTrue(p_update['options'][ro_opt.IMMUTABLE_OPT.option_name]) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in project_via_manager['options'] ) self.assertTrue( project_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) def test_update_project_unset_immutable(self): project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.resource_api.create_project(project['id'], project) project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue('options' in project_via_manager) self.assertTrue( project_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) update_project = {'options': {ro_opt.IMMUTABLE_OPT.option_name: False}} PROVIDERS.resource_api.update_project(project['id'], update_project) project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue('options' in project_via_manager) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in project_via_manager['options'] ) self.assertFalse( project_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) update_project = {'name': uuid.uuid4().hex} p_updated = PROVIDERS.resource_api.update_project( project['id'], update_project ) self.assertEqual(p_updated['name'], update_project['name']) update_project = {'options': {ro_opt.IMMUTABLE_OPT.option_name: None}} p_updated = PROVIDERS.resource_api.update_project( project['id'], update_project ) project_via_manager = PROVIDERS.resource_api.get_project(project['id']) self.assertTrue('options' in p_updated) self.assertTrue('options' in project_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in p_updated['options'] ) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in project_via_manager['options'] ) def test_cannot_delete_project_tags_immutable_project(self): project, tags = self._create_project_and_tags(num_of_tags=2) update_project = {'options': {ro_opt.IMMUTABLE_OPT.option_name: True}} PROVIDERS.resource_api.update_project(project['id'], update_project) self.assertRaises( exception.ResourceUpdateForbidden, PROVIDERS.resource_api.delete_project_tag, project['id'], tags[0], ) def test_cannot_update_project_tags_immutable_project(self): # Update and Add tag use the same API project, tags = self._create_project_and_tags(num_of_tags=2) update_project = {'options': {ro_opt.IMMUTABLE_OPT.option_name: True}} PROVIDERS.resource_api.update_project(project['id'], update_project) tags.append(uuid.uuid4().hex) self.assertRaises( exception.ResourceUpdateForbidden, PROVIDERS.resource_api.update_project_tags, project['id'], tags, ) @unit.skip_if_no_multiple_domains_support def test_create_domain_immutable(self): domain_id = uuid.uuid4().hex domain = { 'name': uuid.uuid4().hex, 'id': domain_id, 'is_domain': True, 'options': {'immutable': True}, } PROVIDERS.resource_api.create_domain(domain_id, domain) domain_via_manager = PROVIDERS.resource_api.get_domain(domain_id) self.assertTrue('options' in domain_via_manager) self.assertTrue(domain_via_manager['options']['immutable']) @unit.skip_if_no_multiple_domains_support def test_cannot_update_immutable_domain(self): domain_id = uuid.uuid4().hex domain = { 'name': uuid.uuid4().hex, 'id': domain_id, 'is_domain': True, 'options': {'immutable': True}, } PROVIDERS.resource_api.create_domain(domain_id, domain) update_domain = {'name': uuid.uuid4().hex} self.assertRaises( exception.ResourceUpdateForbidden, PROVIDERS.resource_api.update_domain, domain_id, update_domain, ) @unit.skip_if_no_multiple_domains_support def test_cannot_delete_immutable_domain(self): domain_id = uuid.uuid4().hex domain = { 'name': uuid.uuid4().hex, 'id': domain_id, 'is_domain': True, 'options': {'immutable': True}, } PROVIDERS.resource_api.create_domain(domain_id, domain) self.assertRaises( exception.ResourceDeleteForbidden, PROVIDERS.resource_api.delete_domain, domain_id, ) @unit.skip_if_no_multiple_domains_support def test_cannot_delete_disabled_domain_with_immutable_project(self): domain_id = uuid.uuid4().hex domain = { 'name': uuid.uuid4().hex, 'id': domain_id, 'is_domain': True, } PROVIDERS.resource_api.create_domain(domain_id, domain) project = unit.new_project_ref(domain_id) project['options'][ro_opt.IMMUTABLE_OPT.option_name] = True PROVIDERS.resource_api.create_project(project['id'], project) # Disable the domain PROVIDERS.resource_api.update_domain(domain_id, {'enabled': False}) # attempt to delete the domain, should error when the immutable # project is reached self.assertRaises( exception.ResourceDeleteForbidden, PROVIDERS.resource_api.delete_domain, domain_id, ) @unit.skip_if_no_multiple_domains_support def test_update_domain_set_immutable(self): # domains are projects, this should be the same as the project version domain_id = uuid.uuid4().hex domain = { 'name': uuid.uuid4().hex, 'id': domain_id, 'is_domain': True, } PROVIDERS.resource_api.create_domain(domain_id, domain) domain_via_manager = PROVIDERS.resource_api.get_domain(domain_id) self.assertTrue('options' in domain_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in domain_via_manager['options'] ) domain_update = {'options': {ro_opt.IMMUTABLE_OPT.option_name: True}} d_update = PROVIDERS.resource_api.update_domain( domain_id, domain_update ) domain_via_manager = PROVIDERS.resource_api.get_domain(domain_id) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in d_update['options'] ) self.assertTrue(d_update['options'][ro_opt.IMMUTABLE_OPT.option_name]) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in domain_via_manager['options'] ) self.assertTrue( domain_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) def test_update_domain_unset_immutable(self): # domains are projects, this should be the same as the project version domain_id = uuid.uuid4().hex domain = { 'name': uuid.uuid4().hex, 'id': domain_id, 'is_domain': True, } PROVIDERS.resource_api.create_domain(domain_id, domain) domain_via_manager = PROVIDERS.resource_api.get_domain(domain_id) self.assertTrue('options' in domain_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in domain_via_manager['options'] ) update_domain = {'options': {ro_opt.IMMUTABLE_OPT.option_name: False}} d_updated = PROVIDERS.resource_api.update_domain( domain_id, update_domain ) domain_via_manager = PROVIDERS.resource_api.get_domain(domain_id) self.assertTrue('options' in domain_via_manager) self.assertTrue('options' in d_updated) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in domain_via_manager['options'] ) self.assertTrue( ro_opt.IMMUTABLE_OPT.option_name in d_updated['options'] ) self.assertFalse( d_updated['options'][ro_opt.IMMUTABLE_OPT.option_name] ) self.assertFalse( domain_via_manager['options'][ro_opt.IMMUTABLE_OPT.option_name] ) update_domain = {'name': uuid.uuid4().hex} d_updated = PROVIDERS.resource_api.update_domain( domain_id, update_domain ) self.assertEqual(d_updated['name'], update_domain['name']) update_domain = {'options': {ro_opt.IMMUTABLE_OPT.option_name: None}} d_updated = PROVIDERS.resource_api.update_domain( domain_id, update_domain ) domain_via_manager = PROVIDERS.resource_api.get_domain(domain_id) self.assertTrue('options' in d_updated) self.assertTrue('options' in domain_via_manager) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in d_updated['options'] ) self.assertFalse( ro_opt.IMMUTABLE_OPT.option_name in domain_via_manager['options'] ) class ResourceDriverTests: """Test for the resource driver. Subclasses must set self.driver to the driver instance. """ def test_create_project(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': default_fixtures.ROOT_DOMAIN['id'], } self.driver.create_project(project_id, project) def test_create_project_all_defined_properties(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': default_fixtures.ROOT_DOMAIN['id'], } parent_project = self.driver.create_project(project_id, project) project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': default_fixtures.ROOT_DOMAIN['id'], 'description': uuid.uuid4().hex, 'enabled': True, 'parent_id': parent_project['id'], 'is_domain': True, } self.driver.create_project(project_id, project) def test_create_project_null_domain(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': None, } self.driver.create_project(project_id, project) def test_create_project_same_name_same_domain_conflict(self): name = uuid.uuid4().hex domain_id = default_fixtures.ROOT_DOMAIN['id'] project_id = uuid.uuid4().hex project = { 'name': name, 'id': project_id, 'domain_id': domain_id, } self.driver.create_project(project_id, project) project_id = uuid.uuid4().hex project = { 'name': name, 'id': project_id, 'domain_id': domain_id, } self.assertRaises( exception.Conflict, self.driver.create_project, project_id, project ) def test_create_project_same_id_conflict(self): project_id = uuid.uuid4().hex project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': default_fixtures.ROOT_DOMAIN['id'], } self.driver.create_project(project_id, project) project = { 'name': uuid.uuid4().hex, 'id': project_id, 'domain_id': default_fixtures.ROOT_DOMAIN['id'], } self.assertRaises( exception.Conflict, self.driver.create_project, project_id, project ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/resource/test_core.py0000664000175000017500000007576600000000000023662 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy from unittest import mock import uuid from oslo_config import cfg from testtools import matchers from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestResourceManagerNoFixtures(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) def test_update_project_name_conflict(self): name = uuid.uuid4().hex description = uuid.uuid4().hex domain_attrs = { 'id': CONF.identity.default_domain_id, 'name': name, 'description': description, } domain = PROVIDERS.resource_api.create_domain( CONF.identity.default_domain_id, domain_attrs ) project1 = unit.new_project_ref( domain_id=domain['id'], name=uuid.uuid4().hex ) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref( domain_id=domain['id'], name=uuid.uuid4().hex ) project = PROVIDERS.resource_api.create_project( project2['id'], project2 ) self.assertRaises( exception.Conflict, PROVIDERS.resource_api.update_project, project['id'], {'name': project1['name'], 'id': project['id']}, ) class DomainConfigDriverTests: def _domain_config_crud(self, sensitive): domain = uuid.uuid4().hex group = uuid.uuid4().hex option = uuid.uuid4().hex value = uuid.uuid4().hex config = { 'group': group, 'option': option, 'value': value, 'sensitive': sensitive, } self.driver.create_config_options(domain, [config]) res = self.driver.get_config_option(domain, group, option, sensitive) config.pop('sensitive') self.assertEqual(config, res) value = uuid.uuid4().hex config = { 'group': group, 'option': option, 'value': value, 'sensitive': sensitive, } self.driver.update_config_options(domain, [config]) res = self.driver.get_config_option(domain, group, option, sensitive) config.pop('sensitive') self.assertEqual(config, res) self.driver.delete_config_options(domain, group, option) self.assertRaises( exception.DomainConfigNotFound, self.driver.get_config_option, domain, group, option, sensitive, ) # ...and silent if we try to delete it again self.driver.delete_config_options(domain, group, option) def test_whitelisted_domain_config_crud(self): self._domain_config_crud(sensitive=False) def test_sensitive_domain_config_crud(self): self._domain_config_crud(sensitive=True) def _list_domain_config(self, sensitive): """Test listing by combination of domain, group & option.""" config1 = { 'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive, } # Put config2 in the same group as config1 config2 = { 'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive, } config3 = { 'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': 100, 'sensitive': sensitive, } domain = uuid.uuid4().hex self.driver.create_config_options(domain, [config1, config2, config3]) for config in [config1, config2, config3]: config.pop('sensitive') # Try listing all items from a domain res = self.driver.list_config_options(domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(3)) for res_entry in res: self.assertIn(res_entry, [config1, config2, config3]) # Try listing by domain and group res = self.driver.list_config_options( domain, group=config1['group'], sensitive=sensitive ) self.assertThat(res, matchers.HasLength(2)) for res_entry in res: self.assertIn(res_entry, [config1, config2]) # Try listing by domain, group and option res = self.driver.list_config_options( domain, group=config2['group'], option=config2['option'], sensitive=sensitive, ) self.assertThat(res, matchers.HasLength(1)) self.assertEqual(config2, res[0]) def test_list_whitelisted_domain_config_crud(self): self._list_domain_config(False) def test_list_sensitive_domain_config_crud(self): self._list_domain_config(True) def _delete_domain_configs(self, sensitive): """Test deleting by combination of domain, group & option.""" config1 = { 'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive, } # Put config2 and config3 in the same group as config1 config2 = { 'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive, } config3 = { 'group': config1['group'], 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive, } config4 = { 'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive, } domain = uuid.uuid4().hex self.driver.create_config_options( domain, [config1, config2, config3, config4] ) for config in [config1, config2, config3, config4]: config.pop('sensitive') # Try deleting by domain, group and option res = self.driver.delete_config_options( domain, group=config2['group'], option=config2['option'] ) res = self.driver.list_config_options(domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(3)) for res_entry in res: self.assertIn(res_entry, [config1, config3, config4]) # Try deleting by domain and group res = self.driver.delete_config_options(domain, group=config4['group']) res = self.driver.list_config_options(domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(2)) for res_entry in res: self.assertIn(res_entry, [config1, config3]) # Try deleting all items from a domain res = self.driver.delete_config_options(domain) res = self.driver.list_config_options(domain, sensitive=sensitive) self.assertThat(res, matchers.HasLength(0)) def test_delete_whitelisted_domain_configs(self): self._delete_domain_configs(False) def test_delete_sensitive_domain_configs(self): self._delete_domain_configs(True) def _create_domain_config_twice(self, sensitive): """Test create the same option twice just overwrites.""" config = { 'group': uuid.uuid4().hex, 'option': uuid.uuid4().hex, 'value': uuid.uuid4().hex, 'sensitive': sensitive, } domain = uuid.uuid4().hex self.driver.create_config_options(domain, [config]) config['value'] = uuid.uuid4().hex self.driver.create_config_options(domain, [config]) res = self.driver.get_config_option( domain, config['group'], config['option'], sensitive ) config.pop('sensitive') self.assertEqual(config, res) def test_create_whitelisted_domain_config_twice(self): self._create_domain_config_twice(False) def test_create_sensitive_domain_config_twice(self): self._create_domain_config_twice(True) class DomainConfigTests: def setUp(self): self.domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domain['id'], self.domain) self.addCleanup(self.clean_up_domain) def clean_up_domain(self): # NOTE(henry-nash): Deleting the domain will also delete any domain # configs for this domain. self.domain['enabled'] = False PROVIDERS.resource_api.update_domain(self.domain['id'], self.domain) PROVIDERS.resource_api.delete_domain(self.domain['id']) del self.domain def test_create_domain_config_including_sensitive_option(self): config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, } } PROVIDERS.domain_config_api.create_config(self.domain['id'], config) # password is sensitive, so check that the whitelisted portion and # the sensitive piece have been stored in the appropriate locations. res = PROVIDERS.domain_config_api.get_config(self.domain['id']) config_whitelisted = copy.deepcopy(config) config_whitelisted['ldap'].pop('password') self.assertEqual(config_whitelisted, res) res = PROVIDERS.domain_config_api.driver.get_config_option( self.domain['id'], 'ldap', 'password', sensitive=True ) self.assertEqual(config['ldap']['password'], res['value']) # Finally, use the non-public API to get back the whole config res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) self.assertEqual(config, res) def test_get_partial_domain_config(self): config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } PROVIDERS.domain_config_api.create_config(self.domain['id'], config) res = PROVIDERS.domain_config_api.get_config( self.domain['id'], group='identity' ) config_partial = copy.deepcopy(config) config_partial.pop('ldap') self.assertEqual(config_partial, res) res = PROVIDERS.domain_config_api.get_config( self.domain['id'], group='ldap', option='user_tree_dn' ) self.assertEqual({'user_tree_dn': config['ldap']['user_tree_dn']}, res) # ...but we should fail to get a sensitive option self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.get_config, self.domain['id'], group='ldap', option='password', ) def test_delete_partial_domain_config(self): config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } PROVIDERS.domain_config_api.create_config(self.domain['id'], config) PROVIDERS.domain_config_api.delete_config( self.domain['id'], group='identity' ) config_partial = copy.deepcopy(config) config_partial.pop('identity') config_partial['ldap'].pop('password') res = PROVIDERS.domain_config_api.get_config(self.domain['id']) self.assertEqual(config_partial, res) PROVIDERS.domain_config_api.delete_config( self.domain['id'], group='ldap', option='url' ) config_partial = copy.deepcopy(config_partial) config_partial['ldap'].pop('url') res = PROVIDERS.domain_config_api.get_config(self.domain['id']) self.assertEqual(config_partial, res) def test_get_options_not_in_domain_config(self): self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.get_config, self.domain['id'], ) config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.get_config, self.domain['id'], group='identity', ) self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.get_config, self.domain['id'], group='ldap', option='user_tree_dn', ) def test_get_sensitive_config(self): config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) self.assertEqual({}, res) PROVIDERS.domain_config_api.create_config(self.domain['id'], config) res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) self.assertEqual(config, res) def test_update_partial_domain_config(self): config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } PROVIDERS.domain_config_api.create_config(self.domain['id'], config) # Try updating a group new_config = { 'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex} } res = PROVIDERS.domain_config_api.update_config( self.domain['id'], new_config, group='ldap' ) expected_config = copy.deepcopy(config) expected_config['ldap']['url'] = new_config['ldap']['url'] expected_config['ldap']['user_filter'] = new_config['ldap'][ 'user_filter' ] expected_full_config = copy.deepcopy(expected_config) expected_config['ldap'].pop('password') res = PROVIDERS.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_config, res) # The sensitive option should still exist res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) self.assertEqual(expected_full_config, res) # Try updating a single whitelisted option PROVIDERS.domain_config_api.delete_config(self.domain['id']) PROVIDERS.domain_config_api.create_config(self.domain['id'], config) new_config = {'url': uuid.uuid4().hex} res = PROVIDERS.domain_config_api.update_config( self.domain['id'], new_config, group='ldap', option='url' ) # Make sure whitelisted and full config is updated expected_whitelisted_config = copy.deepcopy(config) expected_whitelisted_config['ldap']['url'] = new_config['url'] expected_full_config = copy.deepcopy(expected_whitelisted_config) expected_whitelisted_config['ldap'].pop('password') self.assertEqual(expected_whitelisted_config, res) res = PROVIDERS.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_whitelisted_config, res) res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) self.assertEqual(expected_full_config, res) # Try updating a single sensitive option PROVIDERS.domain_config_api.delete_config(self.domain['id']) PROVIDERS.domain_config_api.create_config(self.domain['id'], config) new_config = {'password': uuid.uuid4().hex} res = PROVIDERS.domain_config_api.update_config( self.domain['id'], new_config, group='ldap', option='password' ) # The whitelisted config should not have changed... expected_whitelisted_config = copy.deepcopy(config) expected_full_config = copy.deepcopy(config) expected_whitelisted_config['ldap'].pop('password') self.assertEqual(expected_whitelisted_config, res) res = PROVIDERS.domain_config_api.get_config(self.domain['id']) self.assertEqual(expected_whitelisted_config, res) expected_full_config['ldap']['password'] = new_config['password'] res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) # ...but the sensitive piece should have. self.assertEqual(expected_full_config, res) def test_update_invalid_partial_domain_config(self): config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } # An extra group, when specifying one group should fail self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.update_config, self.domain['id'], config, group='ldap', ) # An extra option, when specifying one option should fail self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.update_config, self.domain['id'], config['ldap'], group='ldap', option='url', ) # Now try the right number of groups/options, but just not # ones that are in the config provided config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}} self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.update_config, self.domain['id'], config, group='identity', ) self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.update_config, self.domain['id'], config['ldap'], group='ldap', option='url', ) # Now some valid groups/options, but just not ones that are in the # existing config config = {'ldap': {'user_tree_dn': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) config_wrong_group = {'identity': {'driver': uuid.uuid4().hex}} self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.update_config, self.domain['id'], config_wrong_group, group='identity', ) config_wrong_option = {'url': uuid.uuid4().hex} self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.update_config, self.domain['id'], config_wrong_option, group='ldap', option='url', ) # And finally just some bad groups/options bad_group = uuid.uuid4().hex config = {bad_group: {'user': uuid.uuid4().hex}} self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.update_config, self.domain['id'], config, group=bad_group, option='user', ) bad_option = uuid.uuid4().hex config = {'ldap': {bad_option: uuid.uuid4().hex}} self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.update_config, self.domain['id'], config, group='ldap', option=bad_option, ) def test_create_invalid_domain_config(self): self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.create_config, self.domain['id'], {}, ) config = {uuid.uuid4().hex: uuid.uuid4().hex} self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.create_config, self.domain['id'], config, ) config = {uuid.uuid4().hex: {uuid.uuid4().hex: uuid.uuid4().hex}} self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.create_config, self.domain['id'], config, ) config = {'ldap': {uuid.uuid4().hex: uuid.uuid4().hex}} self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.create_config, self.domain['id'], config, ) # Try an option that IS in the standard conf, but neither whitelisted # or marked as sensitive config = {'identity': {'user_tree_dn': uuid.uuid4().hex}} self.assertRaises( exception.InvalidDomainConfig, PROVIDERS.domain_config_api.create_config, self.domain['id'], config, ) def test_delete_invalid_partial_domain_config(self): config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) # Try deleting a group not in the config self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.delete_config, self.domain['id'], group='identity', ) # Try deleting an option not in the config self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.delete_config, self.domain['id'], group='ldap', option='user_tree_dn', ) def test_sensitive_substitution_in_domain_config(self): # Create a config that contains a whitelisted option that requires # substitution of a sensitive option. config = { 'ldap': { 'url': 'my_url/%(password)s', 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } PROVIDERS.domain_config_api.create_config(self.domain['id'], config) # Read back the config with the internal method and ensure that the # substitution has taken place. res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) expected_url = config['ldap']['url'] % { 'password': config['ldap']['password'] } self.assertEqual(expected_url, res['ldap']['url']) def test_invalid_sensitive_substitution_in_domain_config(self): """Check that invalid substitutions raise warnings.""" mock_log = mock.Mock() invalid_option_config = { 'ldap': { 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } for invalid_option in [ 'my_url/%(passssword)s', 'my_url/%(password', 'my_url/%(password)', 'my_url/%(password)d', ]: invalid_option_config['ldap']['url'] = invalid_option PROVIDERS.domain_config_api.create_config( self.domain['id'], invalid_option_config ) with mock.patch('keystone.resource.core.LOG', mock_log): res = ( PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) ) mock_log.warning.assert_any_call(mock.ANY, mock.ANY) self.assertEqual( invalid_option_config['ldap']['url'], res['ldap']['url'] ) def test_escaped_sequence_in_domain_config(self): """Check that escaped '%(' doesn't get interpreted.""" mock_log = mock.Mock() escaped_option_config = { 'ldap': { 'url': 'my_url/%%(password)s', 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } PROVIDERS.domain_config_api.create_config( self.domain['id'], escaped_option_config ) with mock.patch('keystone.resource.core.LOG', mock_log): res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) self.assertFalse(mock_log.warn.called) # The escaping '%' should have been removed self.assertEqual('my_url/%(password)s', res['ldap']['url']) @unit.skip_if_cache_disabled('domain_config') def test_cache_layer_get_sensitive_config(self): config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } PROVIDERS.domain_config_api.create_config(self.domain['id'], config) # cache the result res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ) self.assertEqual(config, res) # delete, bypassing domain config manager api PROVIDERS.domain_config_api.delete_config_options(self.domain['id']) self.assertDictEqual( res, PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ), ) PROVIDERS.domain_config_api.get_config_with_sensitive_info.invalidate( PROVIDERS.domain_config_api, self.domain['id'] ) self.assertDictEqual( {}, PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domain['id'] ), ) def test_delete_domain_deletes_configs(self): """Test domain deletion clears the domain configs.""" domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, 'password': uuid.uuid4().hex, } } PROVIDERS.domain_config_api.create_config(domain['id'], config) # Now delete the domain domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) PROVIDERS.resource_api.delete_domain(domain['id']) # Check domain configs have also been deleted self.assertRaises( exception.DomainConfigNotFound, PROVIDERS.domain_config_api.get_config, domain['id'], ) # The get_config_with_sensitive_info does not throw an exception if # the config is empty, it just returns an empty dict self.assertDictEqual( {}, PROVIDERS.domain_config_api.get_config_with_sensitive_info( domain['id'] ), ) def test_config_registration(self): type = uuid.uuid4().hex PROVIDERS.domain_config_api.obtain_registration( self.domain['id'], type ) PROVIDERS.domain_config_api.release_registration( self.domain['id'], type=type ) # Make sure that once someone has it, nobody else can get it. # This includes the domain who already has it. PROVIDERS.domain_config_api.obtain_registration( self.domain['id'], type ) self.assertFalse( PROVIDERS.domain_config_api.obtain_registration( self.domain['id'], type ) ) # Make sure we can read who does have it self.assertEqual( self.domain['id'], PROVIDERS.domain_config_api.read_registration(type), ) # Make sure releasing it is silent if the domain specified doesn't # have the registration domain2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} PROVIDERS.resource_api.create_domain(domain2['id'], domain2) PROVIDERS.domain_config_api.release_registration( domain2['id'], type=type ) # If nobody has the type registered, then trying to read it should # raise ConfigRegistrationNotFound PROVIDERS.domain_config_api.release_registration( self.domain['id'], type=type ) self.assertRaises( exception.ConfigRegistrationNotFound, PROVIDERS.domain_config_api.read_registration, type, ) # Finally check multiple registrations are cleared if you free the # registration without specifying the type type2 = uuid.uuid4().hex PROVIDERS.domain_config_api.obtain_registration( self.domain['id'], type ) PROVIDERS.domain_config_api.obtain_registration( self.domain['id'], type2 ) PROVIDERS.domain_config_api.release_registration(self.domain['id']) self.assertRaises( exception.ConfigRegistrationNotFound, PROVIDERS.domain_config_api.read_registration, type, ) self.assertRaises( exception.ConfigRegistrationNotFound, PROVIDERS.domain_config_api.read_registration, type2, ) def test_option_dict_fails_when_group_is_none(self): group = 'foo' option = 'bar' self.assertRaises( cfg.NoSuchOptError, PROVIDERS.domain_config_api._option_dict, group, option, ) def test_option_dict_returns_valid_config_values(self): regex = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex=regex ) expected_dict = { 'group': 'security_compliance', 'option': 'password_regex', 'value': regex, } option_dict = PROVIDERS.domain_config_api._option_dict( 'security_compliance', 'password_regex' ) self.assertEqual(option_dict, expected_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/rest.py0000664000175000017500000001762600000000000021010 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from oslo_serialization import jsonutils import webtest from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database class RestfulTestCase(unit.TestCase): """Performs restful tests against the WSGI app over HTTP. This class launches public & admin WSGI servers for every test, which can be accessed by calling ``public_request()`` or ``admin_request()``, respectfully. ``restful_request()`` and ``request()`` methods are also exposed if you need to bypass restful conventions or access HTTP details in your test implementation. Three new asserts are provided: * ``assertResponseSuccessful``: called automatically for every request unless an ``expected_status`` is provided * ``assertResponseStatus``: called instead of ``assertResponseSuccessful``, if an ``expected_status`` is provided * ``assertValidResponseHeaders``: validates that the response headers appear as expected Requests are automatically serialized according to the defined ``content_type``. Responses are automatically deserialized as well, and available in the ``response.body`` attribute. The original body content is available in the ``response.raw`` attribute. """ # default content type to test content_type = 'json' def setUp(self): super().setUp() self.auth_plugin_config_override() self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) self.public_app = webtest.TestApp(self.loadapp(name='public')) self.addCleanup(delattr, self, 'public_app') def auth_plugin_config_override(self, methods=None, **method_classes): self.useFixture( ksfixtures.ConfigAuthPlugins( self.config_fixture, methods, **method_classes ) ) def request( self, app, path, body=None, headers=None, token=None, expected_status=None, **kwargs ): if headers: headers = {str(k): str(v) for k, v in headers.items()} else: headers = {} if token: headers['X-Auth-Token'] = str(token) # sets environ['REMOTE_ADDR'] kwargs.setdefault('remote_addr', 'localhost') response = app.request( path, headers=headers, status=expected_status, body=body, **kwargs ) return response def assertResponseSuccessful(self, response): """Assert that a status code lies inside the 2xx range. :param response: :py:class:`httplib.HTTPResponse` to be verified to have a status code between 200 and 299. example:: self.assertResponseSuccessful(response) """ self.assertTrue( 200 <= response.status_code <= 299, 'Status code %d is outside of the expected range (2xx)\n\n%s' % (response.status, response.body), ) def assertResponseStatus(self, response, expected_status): """Assert a specific status code on the response. :param response: :py:class:`httplib.HTTPResponse` :param expected_status: The specific ``status`` result expected example:: self.assertResponseStatus(response, http.client.NO_CONTENT) """ self.assertEqual( expected_status, response.status_code, 'Status code %s is not %s, as expected\n\n%s' % (response.status_code, expected_status, response.body), ) def assertValidResponseHeaders(self, response): """Ensure that response headers appear as expected.""" self.assertIn('X-Auth-Token', response.headers.get('Vary')) def assertValidErrorResponse( self, response, expected_status=http.client.BAD_REQUEST ): """Verify that the error response is valid. Subclasses can override this function based on the expected response. """ self.assertEqual(expected_status, response.status_code) error = response.result['error'] self.assertEqual(response.status_code, error['code']) self.assertIsNotNone(error.get('title')) def _to_content_type(self, body, headers, content_type=None): """Attempt to encode JSON and XML automatically.""" content_type = content_type or self.content_type if content_type == 'json': headers['Accept'] = 'application/json' if body: headers['Content-Type'] = 'application/json' # NOTE(davechen):dump the body to bytes since WSGI requires # the body of the response to be `Bytestrings`. # see pep-3333: # https://www.python.org/dev/peps/pep-3333/#a-note-on-string-types return jsonutils.dump_as_bytes(body) def _from_content_type(self, response, content_type=None): """Attempt to decode JSON and XML automatically, if detected.""" content_type = content_type or self.content_type if response.body is not None and response.body.strip(): # if a body is provided, a Content-Type is also expected header = response.headers.get('Content-Type') self.assertIn(content_type, header) if content_type == 'json': response.result = jsonutils.loads(response.body) else: response.result = response.body def restful_request( self, method='GET', headers=None, body=None, content_type=None, response_content_type=None, **kwargs ): """Serialize/deserialize json as request/response body. .. WARNING:: * Existing Accept header will be overwritten. * Existing Content-Type header will be overwritten. """ # Initialize headers dictionary headers = {} if not headers else headers body = self._to_content_type(body, headers, content_type) # Perform the HTTP request/response response = self.request( method=method, headers=headers, body=body, **kwargs ) response_content_type = response_content_type or content_type self._from_content_type(response, content_type=response_content_type) # we can save some code & improve coverage by always doing this if ( method != 'HEAD' and response.status_code >= http.client.BAD_REQUEST ): self.assertValidErrorResponse(response) # Contains the decoded response.body return response def _request(self, convert=True, **kwargs): if convert: response = self.restful_request(**kwargs) else: response = self.request(**kwargs) self.assertValidResponseHeaders(response) return response def public_request(self, **kwargs): return self._request(app=self.public_app, **kwargs) def admin_request(self, **kwargs): return self._request(app=self.public_app, **kwargs) def _get_token_id(self, r): """Helper method to return a token ID from a response. This needs to be overridden by child classes for on their content type. """ raise NotImplementedError() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/saml2/0000775000175000017500000000000000000000000020463 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/saml2/idp_saml2_metadata.xml0000664000175000017500000000456600000000000024732 0ustar00zuulzuul00000000000000 MIIDpTCCAo0CAREwDQYJKoZIhvcNAQEFBQAwgZ4xCjAIBgNVBAUTATUxCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3JnMRQwEgYDVQQDEwtTZWxmIFNpZ25lZDAgFw0xMzA3MDkxNjI1MDBaGA8yMDcyMDEwMTE2MjUwMFowgY8xCzAJBgNVBAYTAlVTMQswCQYDVQQIEwJDQTESMBAGA1UEBxMJU3Vubnl2YWxlMRIwEAYDVQQKEwlPcGVuU3RhY2sxETAPBgNVBAsTCEtleXN0b25lMSUwIwYJKoZIhvcNAQkBFhZrZXlzdG9uZUBvcGVuc3RhY2sub3JnMREwDwYDVQQDEwhLZXlzdG9uZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMTC6IdNd9Cg1DshcrT5gRVRF36nEmjSA9QWdik7B925PK70U4F6j4pz/5JL7plIo/8rJ4jJz9ccE7m0iA+IuABtEhEwXkG9rj47Oy0J4ZyDGSh2K1Bl78PA9zxXSzysUTSjBKdAh29dPYbJY7cgZJ0uC3AtfVceYiAOIi14SdFeZ0LZLDXBuLaqUmSMrmKwJ9wAMOCb/jbBP9/3Ycd0GYjlvrSBU4Bqb8/NHasyO4DpPN68OAoyD5r5jUtV8QZN03UjIsoux8e0lrL6+MVtJo0OfWvlSrlzS5HKSryY+uqqQEuxtZKpJM2MV85ujvjc8eDSChh2shhDjBem3FIlHKUCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEAed9fHgdJrk+gZcO5gsqq6uURfDOuYD66GsSdZw4BqHjYAcnyWq2da+iw7Uxkqu7iLf2k4+Hu3xjDFrce479OwZkSnbXmqB7XspTGOuM8MgT7jB/ypKTOZ6qaZKSWK1Hta995hMrVVlhUNBLh0MPGqoVWYA4d7mblujgH9vp+4mpCciJagHks8K5FBmI+pobB+uFdSYDoRzX9LTpStspK4e3IoY8baILuGcdKimRNBv6ItG4hMrntAe1/nWMJyUu5rDTGf2V/vAaS0S/faJBwQSz1o38QHMTWHNspfwIdX3yMqI9u7/vYlz3rLy5WdBdUgZrZ3/VLmJTiJVZu5Owq4Q== openstack openstack openstack openstack first lastname admin@example.com 555-555-5555 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/saml2/signed_saml2_assertion.xml0000664000175000017500000001123400000000000025644 0ustar00zuulzuul00000000000000 https://acme.com/FIM/sps/openstack/saml20 Lem2TKyYt+/tJy2iSos1t0KxcJE= b//GXtGeCIJPFsMAHrx4+3yjrL4smSpRLXG9PB3TLMJvU4fx8n2PzK7+VbtWNbZG vSgbvbQR52jq77iyaRfQ2iELuFEY+YietLRi7hsitkJCEayPmU+BDlNIGuCXZjAy 7tmtGFkLlZZJaom1jAzHfZ5JPjZdM5hvQwrhCI2Kzyk= MIICtjCCAh+gAwIBAgIJAJTeBUN2i9ZNMA0GCSqGSIb3DQEBBQUAME4xCzAJBgNV BAYTAkhSMQ8wDQYDVQQIEwZaYWdyZWIxITAfBgNVBAoTGE5la2Egb3JnYW5pemFj aWphIGQuby5vLjELMAkGA1UEAxMCQ0EwHhcNMTIxMjI4MTYwODA1WhcNMTQxMjI4 MTYwODA1WjBvMQswCQYDVQQGEwJIUjEPMA0GA1UECBMGWmFncmViMQ8wDQYDVQQH EwZaYWdyZWIxITAfBgNVBAoTGE5la2Egb3JnYW5pemFjaWphIGQuby5vLjEbMBkG A1UEAxMSUHJvZ3JhbWVyc2thIGZpcm1hMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB iQKBgQCgWApHV5cma0GY/v/vmwgciDQBgITcitx2rG0F+ghXtGiEJeK75VY7jQwE UFCbgV+AaOY2NQChK2FKec7Hss/5y+jbWfX2yVwX6TYcCwnOGXenz+cgx2Fwqpu3 ncL6dYJMfdbKvojBaJQLJTaNjRJsZACButDsDtXDSH9QaRy+hQIDAQABo3sweTAJ BgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0 aWZpY2F0ZTAdBgNVHQ4EFgQUSo9ThP/MOg8QIRWxoPo8qKR8O2wwHwYDVR0jBBgw FoAUAelckr4bx8MwZ7y+VlHE46Mbo+cwDQYJKoZIhvcNAQEFBQADgYEAy19Z7Z5/ /MlWkogu41s0RxL9ffG60QQ0Y8hhDTmgHNx1itj0wT8pB7M4KVMbZ4hjjSFsfRq4 Vj7jm6LwU0WtZ3HGl8TygTh8AAJvbLROnTjLL5MqI9d9pKvIIfZ2Qs3xmJ7JEv4H UHeBXxQq/GmfBv3l+V5ObQ+EHKnyDodLHCk= test_user urn:oasis:names:tc:SAML:2.0:ac:classes:Password https://acme.com/FIM/sps/openstack/saml20 test_user user_domain admin member development project_domain JSON:{"name":"group1","domain":{"name":"Default"}} JSON:{"name":"group2","domain":{"name":"Default"}} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/server/0000775000175000017500000000000000000000000020753 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/server/__init__.py0000664000175000017500000000000000000000000023052 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/server/test_keystone_flask.py0000664000175000017500000007361600000000000025422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import typing as ty import uuid import fixtures import flask import flask_restful from oslo_policy import policy from oslo_serialization import jsonutils from testtools import matchers from keystone.common import context from keystone.common import json_home from keystone.common import rbac_enforcer import keystone.conf from keystone import exception from keystone.server.flask import common as flask_common from keystone.server.flask.request_processing import json_body from keystone.tests.unit import rest CONF = keystone.conf.CONF class _TestResourceWithCollectionInfo(flask_common.ResourceBase): collection_key = 'arguments' member_key = 'argument' __shared_state__: dict[str, ty.Any] = {} _storage_dict: dict[str, ty.Any] = {} def __init__(self): super().__init__() # Share State, this is for "dummy" backend storage. self.__dict__ = self.__shared_state__ @classmethod def _reset(cls): # Used after a test to ensure clean-state cls._storage_dict.clear() cls.__shared_state__.clear() def _list_arguments(self): return self.wrap_collection(list(self._storage_dict.values())) def get(self, argument_id=None): # List with no argument, get resource with id, used for HEAD as well. rbac_enforcer.enforcer.RBACEnforcer.enforce_call( action='example:allowed' ) if argument_id is None: # List return self._list_arguments() else: # get resource with id try: return self.wrap_member(self._storage_dict[argument_id]) except KeyError: raise exception.NotFound(target=argument_id) def post(self): rbac_enforcer.enforcer.RBACEnforcer.enforce_call( action='example:allowed' ) ref = flask.request.get_json(force=True) ref = self._assign_unique_id(ref) self._storage_dict[ref['id']] = ref return self.wrap_member(self._storage_dict[ref['id']]), 201 def put(self, argument_id): rbac_enforcer.enforcer.RBACEnforcer.enforce_call( action='example:allowed' ) try: self._storage_dict[argument_id] except KeyError: raise exception.NotFound(target=argument_id) ref = flask.request.get_json(force=True) self._require_matching_id(ref) # Maintain the ref id ref['id'] = argument_id self._storage_dict[argument_id] = ref return '', 204 def patch(self, argument_id): rbac_enforcer.enforcer.RBACEnforcer.enforce_call( action='example:allowed' ) try: self._storage_dict[argument_id] except KeyError: raise exception.NotFound(target=argument_id) ref = flask.request.get_json(force=True) self._require_matching_id(ref) self._storage_dict[argument_id].update(ref) return self.wrap_member(self._storage_dict[argument_id]) def delete(self, argument_id): rbac_enforcer.enforcer.RBACEnforcer.enforce_call( action='example:allowed' ) try: del self._storage_dict[argument_id] except KeyError: raise exception.NotFound(target=argument_id) return '', 204 class _TestRestfulAPI(flask_common.APIBase): _name = 'test_api_base' _import_name = __name__ resources = [] resource_mapping = [] def __init__(self, *args, **kwargs): self.resource_mapping = kwargs.pop('resource_mapping', []) self.resources = kwargs.pop( 'resources', [_TestResourceWithCollectionInfo] ) super().__init__(*args, **kwargs) class TestKeystoneFlaskCommon(rest.RestfulTestCase): _policy_rules = [ policy.RuleDefault(name='example:allowed', check_str=''), policy.RuleDefault(name='example:deny', check_str='false:false'), ] def setUp(self): super().setUp() enf = rbac_enforcer.enforcer.RBACEnforcer() def register_rules(enf_obj): enf_obj.register_defaults(self._policy_rules) self.useFixture( fixtures.MockPatchObject(enf, 'register_rules', register_rules) ) self.useFixture( fixtures.MockPatchObject( rbac_enforcer.enforcer, '_POSSIBLE_TARGET_ACTIONS', {r.name for r in self._policy_rules}, ) ) enf._reset() self.addCleanup(enf._reset) self.addCleanup(_TestResourceWithCollectionInfo._reset) def _get_token(self): auth_json = { 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user_req_admin['name'], 'password': self.user_req_admin['password'], 'domain': {'id': self.user_req_admin['domain_id']}, } }, }, 'scope': {'project': {'id': self.project_service['id']}}, } } return ( self.test_client() .post('/v3/auth/tokens', json=auth_json, expected_status_code=201) .headers['X-Subject-Token'] ) def _setup_flask_restful_api(self, **options): self.restful_api_opts = options.copy() orig_value = _TestResourceWithCollectionInfo.api_prefix setattr( _TestResourceWithCollectionInfo, 'api_prefix', options.get('api_url_prefix', ''), ) self.addCleanup( setattr, _TestResourceWithCollectionInfo, 'api_prefix', orig_value ) self.restful_api = _TestRestfulAPI(**options) self.public_app.app.register_blueprint(self.restful_api.blueprint) self.cleanup_instance('restful_api') self.cleanup_instance('restful_api_opts') def _make_requests(self): path_base = '/arguments' api_prefix = self.restful_api_opts.get('api_url_prefix', '') blueprint_prefix = self.restful_api._blueprint_url_prefix.rstrip('/') url = ''.join( [x for x in [blueprint_prefix, api_prefix, path_base] if x] ) headers = {'X-Auth-Token': self._get_token()} with self.test_client() as c: # GET LIST resp = c.get(url, headers=headers) self.assertEqual( _TestResourceWithCollectionInfo.wrap_collection([]), resp.json ) unknown_id = uuid.uuid4().hex # GET non-existent ref c.get( f'{url}/{unknown_id}', headers=headers, expected_status_code=404, ) # HEAD non-existent ref c.head( f'{url}/{unknown_id}', headers=headers, expected_status_code=404, ) # PUT non-existent ref c.put( f'{url}/{unknown_id}', json={}, headers=headers, expected_status_code=404, ) # PATCH non-existent ref c.patch( f'{url}/{unknown_id}', json={}, headers=headers, expected_status_code=404, ) # DELETE non-existent ref c.delete( f'{url}/{unknown_id}', headers=headers, expected_status_code=404, ) # POST new ref new_argument_resource = {'testing': uuid.uuid4().hex} new_argument_resp = c.post( url, json=new_argument_resource, headers=headers ).json['argument'] # POST second new ref new_argument2_resource = {'testing': uuid.uuid4().hex} new_argument2_resp = c.post( url, json=new_argument2_resource, headers=headers ).json['argument'] # GET list get_list_resp = c.get(url, headers=headers).json self.assertIn(new_argument_resp, get_list_resp['arguments']) self.assertIn(new_argument2_resp, get_list_resp['arguments']) # GET first ref get_resp = c.get( '{}/{}'.format(url, new_argument_resp['id']), headers=headers ).json['argument'] self.assertEqual(new_argument_resp, get_resp) # HEAD first ref head_resp = c.head( '{}/{}'.format(url, new_argument_resp['id']), headers=headers ).data # NOTE(morgan): For python3 compat, explicitly binary type self.assertEqual(head_resp, b'') # PUT update first ref replacement_argument = {'new_arg': True, 'id': uuid.uuid4().hex} c.put( '{}/{}'.format(url, new_argument_resp['id']), headers=headers, json=replacement_argument, expected_status_code=400, ) replacement_argument.pop('id') c.put( '{}/{}'.format(url, new_argument_resp['id']), headers=headers, json=replacement_argument, ) put_resp = c.get( '{}/{}'.format(url, new_argument_resp['id']), headers=headers ).json['argument'] self.assertNotIn(new_argument_resp['testing'], put_resp) self.assertTrue(put_resp['new_arg']) # GET first ref (check for replacement) get_replacement_resp = c.get( '{}/{}'.format(url, new_argument_resp['id']), headers=headers ).json['argument'] self.assertEqual(put_resp, get_replacement_resp) # PATCH update first ref patch_ref = {'uuid': uuid.uuid4().hex} patch_resp = c.patch( '{}/{}'.format(url, new_argument_resp['id']), headers=headers, json=patch_ref, ).json['argument'] self.assertTrue(patch_resp['new_arg']) self.assertEqual(patch_ref['uuid'], patch_resp['uuid']) # GET first ref (check for update) get_patched_ref_resp = c.get( '{}/{}'.format(url, new_argument_resp['id']), headers=headers ).json['argument'] self.assertEqual(patch_resp, get_patched_ref_resp) # DELETE first ref c.delete( '{}/{}'.format(url, new_argument_resp['id']), headers=headers ) # Check that it was in-fact deleted c.get( '{}/{}'.format(url, new_argument_resp['id']), headers=headers, expected_status_code=404, ) def test_api_url_prefix(self): url_prefix = '/%s' % uuid.uuid4().hex self._setup_flask_restful_api(api_url_prefix=url_prefix) self._make_requests() def test_blueprint_url_prefix(self): url_prefix = '/%s' % uuid.uuid4().hex self._setup_flask_restful_api(blueprint_url_prefix=url_prefix) self._make_requests() def test_build_restful_api_no_prefix(self): self._setup_flask_restful_api() self._make_requests() def test_cannot_add_before_request_functions_twice(self): class TestAPIDuplicateBefore(_TestRestfulAPI): def __init__(self): super().__init__() self._register_before_request_functions() self.assertRaises(AssertionError, TestAPIDuplicateBefore) def test_cannot_add_after_request_functions_twice(self): class TestAPIDuplicateAfter(_TestRestfulAPI): def __init__(self): super().__init__() self._register_after_request_functions() self.assertRaises(AssertionError, TestAPIDuplicateAfter) def test_after_request_functions_must_be_added(self): class TestAPINoAfter(_TestRestfulAPI): def _register_after_request_functions(self, functions=None): pass self.assertRaises(AssertionError, TestAPINoAfter) def test_before_request_functions_must_be_added(self): class TestAPINoBefore(_TestRestfulAPI): def _register_before_request_functions(self, functions=None): pass self.assertRaises(AssertionError, TestAPINoBefore) def test_before_request_functions(self): # Test additional "before" request functions fire. attr = uuid.uuid4().hex def do_something(): setattr(flask.g, attr, True) class TestAPI(_TestRestfulAPI): def _register_before_request_functions(self, functions=None): functions = functions or [] functions.append(do_something) super()._register_before_request_functions(functions) api = TestAPI(resources=[_TestResourceWithCollectionInfo]) self.public_app.app.register_blueprint(api.blueprint) token = self._get_token() with self.test_client() as c: c.get('/v3/arguments', headers={'X-Auth-Token': token}) self.assertTrue(getattr(flask.g, attr, False)) def test_after_request_functions(self): # Test additional "after" request functions fire. In this case, we # alter the response code to 420 attr = uuid.uuid4().hex def do_something(resp): setattr(flask.g, attr, True) resp.status_code = 420 return resp class TestAPI(_TestRestfulAPI): def _register_after_request_functions(self, functions=None): functions = functions or [] functions.append(do_something) super()._register_after_request_functions(functions) api = TestAPI(resources=[_TestResourceWithCollectionInfo]) self.public_app.app.register_blueprint(api.blueprint) token = self._get_token() with self.test_client() as c: c.get( '/v3/arguments', headers={'X-Auth-Token': token}, expected_status_code=420, ) def test_construct_resource_map(self): resource_name = 'arguments' param_relation = json_home.build_v3_parameter_relation('argument_id') alt_rel_func = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='extension', extension_version='1.0', ) url = '/v3/arguments/' old_url = [ dict( url='/v3/old_arguments/', json_home=flask_common.construct_json_home_data( rel='arguments', resource_relation_func=alt_rel_func ), ) ] mapping = flask_common.construct_resource_map( resource=_TestResourceWithCollectionInfo, url=url, resource_kwargs={}, alternate_urls=old_url, rel=resource_name, status=json_home.Status.EXPERIMENTAL, path_vars={'argument_id': param_relation}, resource_relation_func=json_home.build_v3_resource_relation, ) self.assertEqual(_TestResourceWithCollectionInfo, mapping.resource) self.assertEqual(url, mapping.url) self.assertEqual( json_home.build_v3_resource_relation(resource_name), mapping.json_home_data.rel, ) self.assertEqual( json_home.Status.EXPERIMENTAL, mapping.json_home_data.status ) self.assertEqual( {'argument_id': param_relation}, mapping.json_home_data.path_vars ) # Check the alternate URL data is populated sanely self.assertEqual(1, len(mapping.alternate_urls)) alt_url_data = mapping.alternate_urls[0] self.assertEqual(old_url[0]['url'], alt_url_data['url']) self.assertEqual(old_url[0]['json_home'], alt_url_data['json_home']) def test_instantiate_and_register_to_app(self): # Test that automatic instantiation and registration to app works. self.restful_api_opts = {} self.restful_api = _TestRestfulAPI.instantiate_and_register_to_app( self.public_app.app ) self.cleanup_instance('restful_api_opts') self.cleanup_instance('restful_api') self._make_requests() def test_unenforced_api_decorator(self): # Test unenforced decorator works as expected class MappedResource(flask_restful.Resource): @flask_common.unenforced_api def post(self): post_body = flask.request.get_json() return {'post_body': post_body}, 201 resource_map = flask_common.construct_resource_map( resource=MappedResource, url='test_api', alternate_urls=[], resource_kwargs={}, rel='test', status=json_home.Status.STABLE, path_vars=None, resource_relation_func=json_home.build_v3_resource_relation, ) restful_api = _TestRestfulAPI( resource_mapping=[resource_map], resources=[] ) self.public_app.app.register_blueprint(restful_api.blueprint) token = self._get_token() with self.test_client() as c: body = {'test_value': uuid.uuid4().hex} # Works with token resp = c.post( '/v3/test_api', json=body, headers={'X-Auth-Token': token} ) self.assertEqual(body, resp.json['post_body']) # Works without token resp = c.post('/v3/test_api', json=body) self.assertEqual(body, resp.json['post_body']) def test_HTTP_OPTIONS_is_unenforced(self): # Standup a test mapped resource and call OPTIONS on it. This will # return a header "Allow" with the valid methods, in this case # OPTIONS and POST. Ensure that the response otherwise conforms # as expected. class MappedResource(flask_restful.Resource): def post(self): # we don't actually use this or call it. pass resource_map = flask_common.construct_resource_map( resource=MappedResource, url='test_api', alternate_urls=[], resource_kwargs={}, rel='test', status=json_home.Status.STABLE, path_vars=None, resource_relation_func=json_home.build_v3_resource_relation, ) restful_api = _TestRestfulAPI( resource_mapping=[resource_map], resources=[] ) self.public_app.app.register_blueprint(restful_api.blueprint) with self.test_client() as c: r = c.options('/v3/test_api') # make sure we split the data and left/right strip off whitespace # The use of a SET here is to ensure the exact values are in place # even if hash-seeds change order. `r.data` will be an empty # byte-string. `Content-Length` will be 0. self.assertEqual( {'OPTIONS', 'POST'}, {v.lstrip().rstrip() for v in r.headers['Allow'].split(',')}, ) self.assertEqual(r.headers['Content-Length'], '0') self.assertEqual(r.data, b'') def test_mapped_resource_routes(self): # Test non-standard URL routes ("mapped") function as expected class MappedResource(flask_restful.Resource): def post(self): rbac_enforcer.enforcer.RBACEnforcer().enforce_call( action='example:allowed' ) post_body = flask.request.get_json() return {'post_body': post_body}, 201 resource_map = flask_common.construct_resource_map( resource=MappedResource, url='test_api', alternate_urls=[], resource_kwargs={}, rel='test', status=json_home.Status.STABLE, path_vars=None, resource_relation_func=json_home.build_v3_resource_relation, ) restful_api = _TestRestfulAPI( resource_mapping=[resource_map], resources=[] ) self.public_app.app.register_blueprint(restful_api.blueprint) token = self._get_token() with self.test_client() as c: body = {'test_value': uuid.uuid4().hex} resp = c.post( '/v3/test_api', json=body, headers={'X-Auth-Token': token} ) self.assertEqual(body, resp.json['post_body']) def test_correct_json_home_document(self): class MappedResource(flask_restful.Resource): def post(self): rbac_enforcer.enforcer.RBACEnforcer().enforce_call( action='example:allowed' ) post_body = flask.request.get_json() return {'post_body': post_body} # NOTE(morgan): totally fabricated json_home data based upon our TEST # restful_apis. json_home_data = { 'https://docs.openstack.org/api/openstack-identity/3/' 'rel/argument': { 'href-template': '/v3/arguments/{argument_id}', 'href-vars': { 'argument_id': 'https://docs.openstack.org/api/' 'openstack-identity/3/param/argument_id' }, }, 'https://docs.openstack.org/api/openstack-identity/3/' 'rel/arguments': {'href': '/v3/arguments'}, 'https://docs.openstack.org/api/openstack-identity/3/' 'rel/test': {'href': '/v3/test_api'}, } resource_map = flask_common.construct_resource_map( resource=MappedResource, url='test_api', alternate_urls=[], resource_kwargs={}, rel='test', status=json_home.Status.STABLE, path_vars=None, resource_relation_func=json_home.build_v3_resource_relation, ) restful_api = _TestRestfulAPI(resource_mapping=[resource_map]) self.public_app.app.register_blueprint(restful_api.blueprint) with self.test_client() as c: headers = {'Accept': 'application/json-home'} resp = c.get('/', headers=headers) resp_data = jsonutils.loads(resp.data) for rel in json_home_data: self.assertThat( resp_data['resources'][rel], matchers.Equals(json_home_data[rel]), ) def test_normalize_domain_id_extracts_domain_id_if_needed(self): self._setup_flask_restful_api() blueprint_prefix = self.restful_api._blueprint_url_prefix.rstrip('/') url = ''.join([blueprint_prefix, '/arguments']) headers = {'X-Auth-Token': self._get_token()} ref_with_domain_id = {'domain_id': uuid.uuid4().hex} ref_without_domain_id = {} with self.test_client() as c: # Make a dummy request.. ANY request is fine to push the whole # context stack. c.get( f'{url}/{uuid.uuid4().hex}', headers=headers, expected_status_code=404, ) oslo_context = flask.request.environ[context.REQUEST_CONTEXT_ENV] # Normal Project Scope Form # --------------------------- # test that _normalize_domain_id does something sane domain_id = ref_with_domain_id['domain_id'] # Ensure we don't change the domain if it is specified flask_common.ResourceBase._normalize_domain_id(ref_with_domain_id) self.assertEqual(domain_id, ref_with_domain_id['domain_id']) # Ensure (deprecated) we add default domain if needed flask_common.ResourceBase._normalize_domain_id( ref_without_domain_id ) self.assertEqual( CONF.identity.default_domain_id, ref_without_domain_id['domain_id'], ) ref_without_domain_id.clear() # Domain Scoped Form # -------------------- # Just set oslo_context domain_id to a value. This is how we # communicate domain scope. No need to explicitly # do a domain-scoped request, this is a synthetic text anyway oslo_context.domain_id = uuid.uuid4().hex # Ensure we don't change the domain if it is specified flask_common.ResourceBase._normalize_domain_id(ref_with_domain_id) self.assertEqual(domain_id, ref_with_domain_id['domain_id']) flask_common.ResourceBase._normalize_domain_id( ref_without_domain_id ) self.assertEqual( oslo_context.domain_id, ref_without_domain_id['domain_id'] ) ref_without_domain_id.clear() # "Admin" Token form # ------------------- # Explicitly set "is_admin" to true, no new request is needed # as we simply check "is_admin" value everywhere oslo_context.is_admin = True oslo_context.domain_id = None # Ensure we don't change the domain if it is specified flask_common.ResourceBase._normalize_domain_id(ref_with_domain_id) self.assertEqual(domain_id, ref_with_domain_id['domain_id']) # Ensure we raise an appropriate exception with the inferred # domain_id self.assertRaises( exception.ValidationError, flask_common.ResourceBase._normalize_domain_id, ref=ref_without_domain_id, ) def test_api_prefix_self_referential_link_substitution(self): view_arg = uuid.uuid4().hex class TestResource(flask_common.ResourceBase): api_prefix = '//nothing' # use a dummy request context, no enforcement is happening # therefore we don't need the heavy lifting of a full request # run. with self.test_request_context( path='/%s/nothing/values' % view_arg, base_url='https://localhost/' ): # explicitly set the view_args, this is a special case # for a synthetic test case, usually one would rely on # a full request stack to set these. flask.request.view_args = {'test_value': view_arg} # create dummy ref ref = {'id': uuid.uuid4().hex} # add the self referential link TestResource._add_self_referential_link( ref, collection_name='values' ) # Check that the link in fact starts with what we expect # including the explicit view arg. self.assertTrue( ref['links']['self'].startswith( 'https://localhost/v3/%s' % view_arg ) ) def test_json_body_before_req_func_valid_json(self): with self.test_request_context( headers={'Content-Type': 'application/json'}, data='{"key": "value"}', ): # No exception should be raised, everything is happy. json_body.json_body_before_request() def test_json_body_before_req_func_invalid_json(self): with self.test_request_context( headers={'Content-Type': 'application/json'}, data='invalid JSON' ): # keystone.exception.ValidationError should be raised self.assertRaises( exception.ValidationError, json_body.json_body_before_request ) def test_json_body_before_req_func_no_content_type(self): # Unset with self.test_request_context(data='{"key": "value"}'): # No exception should be raised, everything is happy. json_body.json_body_before_request() # Explicitly set to '' with self.test_request_context( headers={'Content-Type': ''}, data='{"key": "value"}' ): # No exception should be raised, everything is happy. json_body.json_body_before_request() def test_json_body_before_req_func_unrecognized_content_type(self): with self.test_request_context( headers={'Content-Type': 'unrecognized/content-type'}, data='{"key": "value"', ): # keystone.exception.ValidationError should be raised self.assertRaises( exception.ValidationError, json_body.json_body_before_request ) def test_json_body_before_req_func_unrecognized_conten_type_no_body(self): with self.test_request_context( headers={'Content-Type': 'unrecognized/content-type'} ): # No exception should be raised, everything is happy. json_body.json_body_before_request() class TestKeystoneFlaskUnrouted404(rest.RestfulTestCase): def setUp(self): super().setUp() # unregister the 404 handler we explicitly set in loadapp. This # makes the 404 error fallback to a standard werkzeug handling. self.public_app.app.error_handler_spec[None].pop(404) def test_unrouted_path_is_not_jsonified_404(self): with self.test_client() as c: path = f'/{uuid.uuid4()}' resp = c.get(path, expected_status_code=404) # Make sure we're emitting a html error self.assertIn('text/html', resp.headers['Content-Type']) # Ensure the more generic flask/werkzeug 404 response is emitted self.assertTrue(b'404 Not Found' in resp.data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_app_config.py0000664000175000017500000001423700000000000023172 0ustar00zuulzuul00000000000000# # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystone.server.flask import core as server_flask from keystone.tests import unit class AppConfigTest(unit.TestCase): default_config_file = 'keystone.conf' custom_config_dir = '/etc/kst/' custom_config_files = ['kst.conf', 'kst2.conf'] def test_config_files_have_default_values_when_envars_not_set(self): config_files = server_flask._get_config_files() config_files.sort() expected_config_files = [] self.assertListEqual(config_files, expected_config_files) def test_config_files_have_default_values_with_empty_envars(self): env = {'OS_KEYSTONE_CONFIG_FILES': '', 'OS_KEYSTONE_CONFIG_DIR': ''} config_files = server_flask._get_config_files(env) config_files.sort() expected_config_files = [] self.assertListEqual(config_files, expected_config_files) def test_can_use_single_config_file_under_default_config_dir(self): cfg = self.custom_config_files[0] env = {'OS_KEYSTONE_CONFIG_FILES': cfg} config_files = server_flask._get_config_files(env) expected_config_files = [cfg] self.assertListEqual(config_files, expected_config_files) def test_can_use_multiple_config_files_under_default_config_dir(self): env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(self.custom_config_files)} config_files = server_flask._get_config_files(env) config_files.sort() expected_config_files = self.custom_config_files self.assertListEqual(config_files, expected_config_files) config_with_empty_strings = self.custom_config_files + ['', ' '] env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(config_with_empty_strings)} config_files = server_flask._get_config_files(env) config_files.sort() self.assertListEqual(config_files, expected_config_files) def test_can_use_single_absolute_path_config_file(self): cfg = self.custom_config_files[0] cfgpath = os.path.join(self.custom_config_dir, cfg) env = {'OS_KEYSTONE_CONFIG_FILES': cfgpath} config_files = server_flask._get_config_files(env) self.assertListEqual(config_files, [cfgpath]) def test_can_use_multiple_absolute_path_config_files(self): cfgpaths = [ os.path.join(self.custom_config_dir, cfg) for cfg in self.custom_config_files ] cfgpaths.sort() env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(cfgpaths)} config_files = server_flask._get_config_files(env) config_files.sort() self.assertListEqual(config_files, cfgpaths) env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join(cfgpaths + ['', ' '])} config_files = server_flask._get_config_files(env) config_files.sort() self.assertListEqual(config_files, cfgpaths) def test_can_use_default_config_files_with_custom_config_dir(self): env = {'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir} config_files = server_flask._get_config_files(env) config_files.sort() expected_config_files = [ os.path.join(self.custom_config_dir, self.default_config_file) ] self.assertListEqual(config_files, expected_config_files) def test_can_use_single_config_file_under_custom_config_dir(self): cfg = self.custom_config_files[0] env = { 'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir, 'OS_KEYSTONE_CONFIG_FILES': cfg, } config_files = server_flask._get_config_files(env) config_files.sort() expected_config_files = [os.path.join(self.custom_config_dir, cfg)] self.assertListEqual(config_files, expected_config_files) def test_can_use_multiple_config_files_under_custom_config_dir(self): env = { 'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir, 'OS_KEYSTONE_CONFIG_FILES': ';'.join(self.custom_config_files), } config_files = server_flask._get_config_files(env) config_files.sort() expected_config_files = [ os.path.join(self.custom_config_dir, s) for s in self.custom_config_files ] expected_config_files.sort() self.assertListEqual(config_files, expected_config_files) config_with_empty_strings = self.custom_config_files + ['', ' '] env = { 'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir, 'OS_KEYSTONE_CONFIG_FILES': ';'.join(config_with_empty_strings), } config_files = server_flask._get_config_files(env) config_files.sort() self.assertListEqual(config_files, expected_config_files) def test_can_mix_relative_and_absolute_paths_config_file(self): cfg0 = self.custom_config_files[0] cfgpath0 = os.path.join( self.custom_config_dir, self.custom_config_files[0] ) cfgpath1 = os.path.join( self.custom_config_dir, self.custom_config_files[1] ) env = { 'OS_KEYSTONE_CONFIG_DIR': self.custom_config_dir, 'OS_KEYSTONE_CONFIG_FILES': ';'.join([cfg0, cfgpath1]), } config_files = server_flask._get_config_files(env) config_files.sort() expected_config_files = [cfgpath0, cfgpath1] expected_config_files.sort() self.assertListEqual(config_files, expected_config_files) env = {'OS_KEYSTONE_CONFIG_FILES': ';'.join([cfg0, cfgpath1])} config_files = server_flask._get_config_files(env) config_files.sort() expected_config_files = [cfg0, cfgpath1] expected_config_files.sort() self.assertListEqual(config_files, expected_config_files) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_associate_project_endpoint_extension.py0000664000175000017500000017140700000000000030565 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client import uuid from testtools import matchers from keystone.common import provider_api from keystone.tests import unit from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs class EndpointFilterTestCase(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.default_request_url = ( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id, } ) class EndpointFilterCRUDTestCase(EndpointFilterTestCase): def test_create_endpoint_project_association(self): """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Valid endpoint and project id test case. """ self.put(self.default_request_url) def test_create_endpoint_project_association_with_invalid_project(self): """PUT OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid project id test case. """ self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id, }, expected_status=http.client.NOT_FOUND, ) def test_create_endpoint_project_association_with_invalid_endpoint(self): """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid endpoint id test case. """ self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex, }, expected_status=http.client.NOT_FOUND, ) def test_create_endpoint_project_association_with_unexpected_body(self): """PUT /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Unexpected body in request. The body should be ignored. """ self.put( self.default_request_url, body={'project_id': self.default_domain_project_id}, ) def test_check_endpoint_project_association(self): """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Valid project and endpoint id test case. """ self.put(self.default_request_url) self.head( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id, }, expected_status=http.client.NO_CONTENT, ) def test_check_endpoint_project_association_with_invalid_project(self): """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid project id test case. """ self.put(self.default_request_url) self.head( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id, }, expected_status=http.client.NOT_FOUND, ) def test_check_endpoint_project_association_with_invalid_endpoint(self): """HEAD /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid endpoint id test case. """ self.put(self.default_request_url) self.head( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex, }, expected_status=http.client.NOT_FOUND, ) def test_get_endpoint_project_association(self): """GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Valid project and endpoint id test case. """ self.put(self.default_request_url) self.get( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id, }, expected_status=http.client.NO_CONTENT, ) def test_get_endpoint_project_association_with_invalid_project(self): """GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid project id test case. """ self.put(self.default_request_url) self.get( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id, }, expected_status=http.client.NOT_FOUND, ) def test_get_endpoint_project_association_with_invalid_endpoint(self): """GET /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid endpoint id test case. """ self.put(self.default_request_url) self.get( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex, }, expected_status=http.client.NOT_FOUND, ) def test_list_endpoints_associated_with_valid_project(self): """GET & HEAD /OS-EP-FILTER/projects/{project_id}/endpoints. Valid project and endpoint id test case. """ self.put(self.default_request_url) resource_url = '/OS-EP-FILTER/projects/{project_id}/endpoints'.format( project_id=self.default_domain_project_id ) r = self.get(resource_url) self.assertValidEndpointListResponse( r, self.endpoint, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) def test_list_endpoints_associated_with_invalid_project(self): """GET & HEAD /OS-EP-FILTER/projects/{project_id}/endpoints. Invalid project id test case. """ self.put(self.default_request_url) url = '/OS-EP-FILTER/projects/{project_id}/endpoints'.format( project_id=uuid.uuid4().hex ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_list_projects_associated_with_endpoint(self): """GET & HEAD /OS-EP-FILTER/endpoints/{endpoint_id}/projects. Valid endpoint-project association test case. """ self.put(self.default_request_url) resource_url = '/OS-EP-FILTER/endpoints/{endpoint_id}/projects'.format( endpoint_id=self.endpoint_id ) r = self.get(resource_url, expected_status=http.client.OK) self.assertValidProjectListResponse( r, self.default_domain_project, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) def test_list_projects_with_no_endpoint_project_association(self): """GET & HEAD /OS-EP-FILTER/endpoints/{endpoint_id}/projects. Valid endpoint id but no endpoint-project associations test case. """ url = '/OS-EP-FILTER/endpoints/{endpoint_id}/projects'.format( endpoint_id=self.endpoint_id ) r = self.get(url, expected_status=http.client.OK) self.assertValidProjectListResponse(r, expected_length=0) self.head(url, expected_status=http.client.OK) def test_list_projects_associated_with_invalid_endpoint(self): """GET & HEAD /OS-EP-FILTER/endpoints/{endpoint_id}/projects. Invalid endpoint id test case. """ url = '/OS-EP-FILTER/endpoints/{endpoint_id}/projects'.format( endpoint_id=uuid.uuid4().hex ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_remove_endpoint_project_association(self): """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Valid project id and endpoint id test case. """ self.put(self.default_request_url) self.delete( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': self.endpoint_id, } ) def test_remove_endpoint_project_association_with_invalid_project(self): """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid project id test case. """ self.put(self.default_request_url) self.delete( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': uuid.uuid4().hex, 'endpoint_id': self.endpoint_id, }, expected_status=http.client.NOT_FOUND, ) def test_remove_endpoint_project_association_with_invalid_endpoint(self): """DELETE /OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}. Invalid endpoint id test case. """ self.put(self.default_request_url) self.delete( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': uuid.uuid4().hex, }, expected_status=http.client.NOT_FOUND, ) def test_endpoint_project_association_cleanup_when_project_deleted(self): self.put(self.default_request_url) association_url = ( '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' % {'endpoint_id': self.endpoint_id} ) r = self.get(association_url) self.assertValidProjectListResponse(r, expected_length=1) self.delete( '/projects/%(project_id)s' % {'project_id': self.default_domain_project_id} ) r = self.get(association_url) self.assertValidProjectListResponse(r, expected_length=0) def test_endpoint_project_association_cleanup_when_endpoint_deleted(self): self.put(self.default_request_url) association_url = ( '/OS-EP-FILTER/projects/{project_id}/endpoints'.format( project_id=self.default_domain_project_id ) ) r = self.get(association_url) self.assertValidEndpointListResponse(r, expected_length=1) self.delete(f'/endpoints/{self.endpoint_id}') r = self.get(association_url) self.assertValidEndpointListResponse(r, expected_length=0) @unit.skip_if_cache_disabled('catalog') def test_create_endpoint_project_association_invalidates_cache(self): # NOTE(davechen): create another endpoint which will be added to # default project, this should be done at first since # `create_endpoint` will also invalidate cache. endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref( service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2, ) PROVIDERS.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy()) # create endpoint project association. self.put(self.default_request_url) # should get back only one endpoint that was just created. user_id = uuid.uuid4().hex catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) # there is only one endpoints associated with the default project. self.assertEqual(1, len(catalog[0]['endpoints'])) self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id']) # add the second endpoint to default project, bypassing # catalog_api API manager. PROVIDERS.catalog_api.driver.add_endpoint_to_project( endpoint_id2, self.default_domain_project_id ) # but, we can just get back one endpoint from the cache, since the # catalog is pulled out from cache and its haven't been invalidated. catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertEqual(1, len(catalog[0]['endpoints'])) # remove the endpoint2 from the default project, and add it again via # catalog_api API manager. PROVIDERS.catalog_api.driver.remove_endpoint_from_project( endpoint_id2, self.default_domain_project_id ) # add second endpoint to default project, this can be done by calling # the catalog_api API manager directly but call the REST API # instead for consistency. self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': endpoint_id2, } ) # should get back two endpoints since the cache has been # invalidated when the second endpoint was added to default project. catalog = self.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertEqual(2, len(catalog[0]['endpoints'])) ep_id_list = [ catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id'], ] self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list) @unit.skip_if_cache_disabled('catalog') def test_remove_endpoint_from_project_invalidates_cache(self): endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref( service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2, ) PROVIDERS.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy()) # create endpoint project association. self.put(self.default_request_url) # add second endpoint to default project. self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': endpoint_id2, } ) # should get back only one endpoint that was just created. user_id = uuid.uuid4().hex catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) # there are two endpoints associated with the default project. ep_id_list = [ catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id'], ] self.assertEqual(2, len(catalog[0]['endpoints'])) self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list) # remove the endpoint2 from the default project, bypassing # catalog_api API manager. PROVIDERS.catalog_api.driver.remove_endpoint_from_project( endpoint_id2, self.default_domain_project_id ) # but, we can just still get back two endpoints from the cache, # since the catalog is pulled out from cache and its haven't # been invalidated. catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertEqual(2, len(catalog[0]['endpoints'])) # add back the endpoint2 to the default project, and remove it by # catalog_api API manage. PROVIDERS.catalog_api.driver.add_endpoint_to_project( endpoint_id2, self.default_domain_project_id ) # remove the endpoint2 from the default project, this can be done # by calling the catalog_api API manager directly but call # the REST API instead for consistency. self.delete( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.default_domain_project_id, 'endpoint_id': endpoint_id2, } ) # should only get back one endpoint since the cache has been # invalidated after the endpoint project association was removed. catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertEqual(1, len(catalog[0]['endpoints'])) self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id']) class EndpointFilterTokenRequestTestCase(EndpointFilterTestCase): def test_project_scoped_token_using_endpoint_filter(self): """Verify endpoints from project scoped token filtered.""" # create a project to work with ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': ref}) project = self.assertValidProjectResponse(r, ref) # grant the user a role on the project self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'project_id': project['id'], 'role_id': self.role['id'], } ) # set the user's preferred project body = {'user': {'default_project_id': project['id']}} r = self.patch( '/users/{user_id}'.format(user_id=self.user['id']), body=body ) self.assertValidUserResponse(r) # add one endpoint to the project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % {'project_id': project['id'], 'endpoint_id': self.endpoint_id} ) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1 ) self.assertEqual(project['id'], r.result['token']['project']['id']) def test_default_scoped_token_using_endpoint_filter(self): """Verify endpoints from default scoped token filtered.""" # add one endpoint to default project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id, } ) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1 ) self.assertEqual( self.project['id'], r.result['token']['project']['id'] ) # Ensure name of the service exists self.assertIn('name', r.result['token']['catalog'][0]) # region and region_id should be the same in endpoints endpoint = r.result['token']['catalog'][0]['endpoints'][0] self.assertIn('region', endpoint) self.assertIn('region_id', endpoint) self.assertEqual(endpoint['region'], endpoint['region_id']) def test_scoped_token_with_no_catalog_using_endpoint_filter(self): """Verify endpoint filter does not affect no catalog.""" self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id, } ) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.post('/auth/tokens?nocatalog', body=auth_data) self.assertValidProjectScopedTokenResponse(r, require_catalog=False) self.assertEqual( self.project['id'], r.result['token']['project']['id'] ) def test_invalid_endpoint_project_association(self): """Verify an invalid endpoint-project association is handled.""" # add first endpoint to default project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id, } ) # create a second temporary endpoint endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref( service_id=self.service_id, region_id=self.region_id, interface='public', id=endpoint_id2, ) PROVIDERS.catalog_api.create_endpoint(endpoint_id2, endpoint2.copy()) # add second endpoint to default project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint_id2} ) # remove the temporary reference # this will create inconsistency in the endpoint filter table # which is fixed during the catalog creation for token request PROVIDERS.catalog_api.delete_endpoint(endpoint_id2) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1 ) self.assertEqual( self.project['id'], r.result['token']['project']['id'] ) def test_disabled_endpoint(self): """Test that a disabled endpoint is handled.""" # Add an enabled endpoint to the default project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id, } ) # Add a disabled endpoint to the default project. # Create a disabled endpoint that's like the enabled one. disabled_endpoint_ref = copy.copy(self.endpoint) disabled_endpoint_id = uuid.uuid4().hex disabled_endpoint_ref.update( { 'id': disabled_endpoint_id, 'enabled': False, 'interface': 'internal', } ) PROVIDERS.catalog_api.create_endpoint( disabled_endpoint_id, disabled_endpoint_ref ) self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': disabled_endpoint_id, } ) # Authenticate to get token with catalog auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.post('/auth/tokens', body=auth_data) endpoints = r.result['token']['catalog'][0]['endpoints'] endpoint_ids = [ep['id'] for ep in endpoints] self.assertEqual([self.endpoint_id], endpoint_ids) def test_multiple_endpoint_project_associations(self): def _create_an_endpoint(): endpoint_ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, ) r = self.post('/endpoints', body={'endpoint': endpoint_ref}) return r.result['endpoint']['id'] # create three endpoints endpoint_id1 = _create_an_endpoint() endpoint_id2 = _create_an_endpoint() _create_an_endpoint() # only associate two endpoints with project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint_id1} ) self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint_id2} ) # there should be only two endpoints in token catalog auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( r, require_catalog=True, endpoint_filter=True, ep_filter_assoc=2 ) def test_get_auth_catalog_using_endpoint_filter(self): # add one endpoint to default project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % { 'project_id': self.project['id'], 'endpoint_id': self.endpoint_id, } ) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) token_data = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse( token_data, require_catalog=True, endpoint_filter=True, ep_filter_assoc=1, ) auth_catalog = self.get( '/auth/catalog', token=token_data.headers['X-Subject-Token'] ) self.assertEqual( token_data.result['token']['catalog'], auth_catalog.result['catalog'], ) class JsonHomeTests(EndpointFilterTestCase, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_projects': { 'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects', 'href-vars': { 'endpoint_id': 'https://docs.openstack.org/api/openstack-identity/3/param/' 'endpoint_id', }, }, 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_groups': { 'href': '/OS-EP-FILTER/endpoint_groups', }, 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_group': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}', 'href-vars': { 'endpoint_group_id': 'https://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoint_group_to_project_association': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects/{project_id}', 'href-vars': { 'project_id': 'https://docs.openstack.org/api/openstack-identity/3/param/' 'project_id', 'endpoint_group_id': 'https://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/projects_associated_with_endpoint_group': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/projects', 'href-vars': { 'endpoint_group_id': 'https://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/endpoints_in_endpoint_group': { 'href-template': '/OS-EP-FILTER/endpoint_groups/' '{endpoint_group_id}/endpoints', 'href-vars': { 'endpoint_group_id': 'https://docs.openstack.org/api/openstack-identity/3/' 'ext/OS-EP-FILTER/1.0/param/endpoint_group_id', }, }, 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-EP-FILTER/' '1.0/rel/project_endpoint_groups': { 'href-template': '/OS-EP-FILTER/projects/{project_id}/' 'endpoint_groups', 'href-vars': { 'project_id': 'https://docs.openstack.org/api/openstack-identity/3/param/' 'project_id', }, }, } class EndpointGroupCRUDTestCase(EndpointFilterTestCase): DEFAULT_ENDPOINT_GROUP_BODY = { 'endpoint_group': { 'description': 'endpoint group description', 'filters': {'interface': 'admin'}, 'name': 'endpoint_group_name', } } DEFAULT_ENDPOINT_GROUP_URL = '/OS-EP-FILTER/endpoint_groups' def test_create_endpoint_group(self): """POST /OS-EP-FILTER/endpoint_groups. Valid endpoint group test case. """ r = self.post( self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY, ) expected_filters = self.DEFAULT_ENDPOINT_GROUP_BODY['endpoint_group'][ 'filters' ] expected_name = self.DEFAULT_ENDPOINT_GROUP_BODY['endpoint_group'][ 'name' ] self.assertEqual( expected_filters, r.result['endpoint_group']['filters'] ) self.assertEqual(expected_name, r.result['endpoint_group']['name']) self.assertThat( r.result['endpoint_group']['links']['self'], matchers.EndsWith( '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' % {'endpoint_group_id': r.result['endpoint_group']['id']} ), ) def test_create_invalid_endpoint_group(self): """POST /OS-EP-FILTER/endpoint_groups. Invalid endpoint group creation test case. """ invalid_body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) invalid_body['endpoint_group']['filters'] = {'foobar': 'admin'} self.post( self.DEFAULT_ENDPOINT_GROUP_URL, body=invalid_body, expected_status=http.client.BAD_REQUEST, ) def test_get_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}. Valid endpoint group test case. """ # create an endpoint group to work with response = self.post( self.DEFAULT_ENDPOINT_GROUP_URL, body=self.DEFAULT_ENDPOINT_GROUP_BODY, ) endpoint_group_id = response.result['endpoint_group']['id'] endpoint_group_filters = response.result['endpoint_group']['filters'] endpoint_group_name = response.result['endpoint_group']['name'] url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.get(url) self.assertEqual( endpoint_group_id, response.result['endpoint_group']['id'] ) self.assertEqual( endpoint_group_filters, response.result['endpoint_group']['filters'], ) self.assertEqual( endpoint_group_name, response.result['endpoint_group']['name'] ) self.assertThat( response.result['endpoint_group']['links']['self'], matchers.EndsWith(url), ) def test_get_invalid_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}. Invalid endpoint group test case. """ endpoint_group_id = 'foobar' url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.get(url, expected_status=http.client.NOT_FOUND) def test_check_endpoint_group(self): """HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}. Valid endpoint_group_id test case. """ # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.head(url, expected_status=http.client.OK) def test_check_invalid_endpoint_group(self): """HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group_id}. Invalid endpoint_group_id test case. """ endpoint_group_id = 'foobar' url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.head(url, expected_status=http.client.NOT_FOUND) def test_patch_endpoint_group(self): """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}. Valid endpoint group patch test case. """ body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) body['endpoint_group']['filters'] = {'region_id': 'UK'} body['endpoint_group']['name'] = 'patch_test' # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) r = self.patch(url, body=body) self.assertEqual(endpoint_group_id, r.result['endpoint_group']['id']) self.assertEqual( body['endpoint_group']['filters'], r.result['endpoint_group']['filters'], ) self.assertThat( r.result['endpoint_group']['links']['self'], matchers.EndsWith(url) ) def test_patch_nonexistent_endpoint_group(self): """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}. Invalid endpoint group patch test case. """ body = {'endpoint_group': {'name': 'patch_test'}} url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id='ABC' ) self.patch(url, body=body, expected_status=http.client.NOT_FOUND) def test_patch_invalid_endpoint_group(self): """PATCH /OS-EP-FILTER/endpoint_groups/{endpoint_group}. Valid endpoint group patch test case. """ body = { 'endpoint_group': { 'description': 'endpoint group description', 'filters': {'region': 'UK'}, 'name': 'patch_test', } } # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.patch(url, body=body, expected_status=http.client.BAD_REQUEST) # Perform a GET call to ensure that the content remains # the same (as DEFAULT_ENDPOINT_GROUP_BODY) after attempting to update # with an invalid filter url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) r = self.get(url) del r.result['endpoint_group']['id'] del r.result['endpoint_group']['links'] self.assertDictEqual(self.DEFAULT_ENDPOINT_GROUP_BODY, r.result) def test_delete_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}. Valid endpoint group test case. """ # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.delete(url) self.get(url, expected_status=http.client.NOT_FOUND) def test_delete_invalid_endpoint_group(self): """GET /OS-EP-FILTER/endpoint_groups/{endpoint_group}. Invalid endpoint group test case. """ endpoint_group_id = 'foobar' url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.delete(url, expected_status=http.client.NOT_FOUND) def test_add_endpoint_group_to_project(self): """Create a valid endpoint group and project association.""" endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) self._create_endpoint_group_project_association( endpoint_group_id, self.project_id ) def test_add_endpoint_group_to_project_with_invalid_project_id(self): """Create an invalid endpoint group and project association.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # associate endpoint group with project project_id = uuid.uuid4().hex url = self._get_project_endpoint_group_url( endpoint_group_id, project_id ) self.put(url, expected_status=http.client.NOT_FOUND) def test_get_endpoint_group_in_project(self): """Test retrieving project endpoint group association.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # associate endpoint group with project url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id ) self.put(url) response = self.get(url) self.assertEqual( endpoint_group_id, response.result['project_endpoint_group']['endpoint_group_id'], ) self.assertEqual( self.project_id, response.result['project_endpoint_group']['project_id'], ) def test_get_invalid_endpoint_group_in_project(self): """Test retrieving project endpoint group association.""" endpoint_group_id = uuid.uuid4().hex project_id = uuid.uuid4().hex url = self._get_project_endpoint_group_url( endpoint_group_id, project_id ) self.get(url, expected_status=http.client.NOT_FOUND) def test_list_endpoint_groups_in_project(self): """GET & HEAD /OS-EP-FILTER/projects/{project_id}/endpoint_groups.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # associate endpoint group with project url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id ) self.put(url) url = '/OS-EP-FILTER/projects/{project_id}/endpoint_groups'.format( project_id=self.project_id ) response = self.get(url, expected_status=http.client.OK) self.assertEqual( endpoint_group_id, response.result['endpoint_groups'][0]['id'] ) self.head(url, expected_status=http.client.OK) def test_list_endpoint_groups_in_invalid_project(self): """Test retrieving from invalid project.""" project_id = uuid.uuid4().hex url = '/OS-EP-FILTER/projects/{project_id}/endpoint_groups'.format( project_id=project_id ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_empty_endpoint_groups_in_project(self): """Test when no endpoint groups associated with the project.""" url = '/OS-EP-FILTER/projects/{project_id}/endpoint_groups'.format( project_id=self.project_id ) response = self.get(url, expected_status=http.client.OK) self.assertEqual(0, len(response.result['endpoint_groups'])) self.head(url, expected_status=http.client.OK) def test_check_endpoint_group_to_project(self): """Test HEAD with a valid endpoint group and project association.""" endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) self._create_endpoint_group_project_association( endpoint_group_id, self.project_id ) url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id ) self.head(url, expected_status=http.client.OK) def test_check_endpoint_group_to_project_with_invalid_project_id(self): """Test HEAD with an invalid endpoint group and project association.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # create an endpoint group to project association url = self._get_project_endpoint_group_url( endpoint_group_id, self.project_id ) self.put(url) # send a head request with an invalid project id project_id = uuid.uuid4().hex url = self._get_project_endpoint_group_url( endpoint_group_id, project_id ) self.head(url, expected_status=http.client.NOT_FOUND) def test_list_endpoint_groups(self): """GET & HEAD /OS-EP-FILTER/endpoint_groups.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # recover all endpoint groups url = '/OS-EP-FILTER/endpoint_groups' r = self.get(url, expected_status=http.client.OK) self.assertNotEmpty(r.result['endpoint_groups']) self.assertEqual( endpoint_group_id, r.result['endpoint_groups'][0].get('id') ) self.head(url, expected_status=http.client.OK) def test_list_endpoint_groups_by_name(self): """GET & HEAD /OS-EP-FILTER/endpoint_groups.""" # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # retrieve the single endpointgroup by name url = '/OS-EP-FILTER/endpoint_groups?name={name}'.format( name='endpoint_group_name' ) r = self.get(url, expected_status=http.client.OK) self.assertNotEmpty(r.result['endpoint_groups']) self.assertEqual(1, len(r.result['endpoint_groups'])) self.assertEqual( endpoint_group_id, r.result['endpoint_groups'][0].get('id') ) self.head(url, expected_status=http.client.OK) # try to retrieve a non existant one url = '/OS-EP-FILTER/endpoint_groups?name={name}'.format(name='fake') r = self.get(url, expected_status=http.client.OK) self.assertEqual(0, len(r.result['endpoint_groups'])) def test_list_projects_associated_with_endpoint_group(self): """GET & HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group}/projects. Valid endpoint group test case. """ # create an endpoint group to work with endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # associate endpoint group with project self._create_endpoint_group_project_association( endpoint_group_id, self.project_id ) # recover list of projects associated with endpoint group url = ( '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/projects' % {'endpoint_group_id': endpoint_group_id} ) self.get(url, expected_status=http.client.OK) self.head(url, expected_status=http.client.OK) def test_list_endpoints_associated_with_endpoint_group(self): """GET & HEAD /OS-EP-FILTER/endpoint_groups/{endpoint_group}/endpoints. Valid endpoint group test case. """ # create a service service_ref = unit.new_service_ref() response = self.post('/services', body={'service': service_ref}) service_id = response.result['service']['id'] # create an endpoint endpoint_ref = unit.new_endpoint_ref( service_id=service_id, interface='public', region_id=self.region_id ) response = self.post('/endpoints', body={'endpoint': endpoint_ref}) endpoint_id = response.result['endpoint']['id'] # create an endpoint group body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) body['endpoint_group']['filters'] = {'service_id': service_id} endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, body ) # create association self._create_endpoint_group_project_association( endpoint_group_id, self.project_id ) # recover list of endpoints associated with endpoint group url = ( '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/endpoints' % {'endpoint_group_id': endpoint_group_id} ) r = self.get(url, expected_status=http.client.OK) self.assertNotEmpty(r.result['endpoints']) self.assertEqual(endpoint_id, r.result['endpoints'][0].get('id')) self.head(url, expected_status=http.client.OK) def test_list_endpoints_associated_with_project_endpoint_group(self): """GET & HEAD /OS-EP-FILTER/projects/{project_id}/endpoints. Valid project, endpoint id, and endpoint group test case. """ # create a temporary service service_ref = unit.new_service_ref() response = self.post('/services', body={'service': service_ref}) service_id2 = response.result['service']['id'] # create additional endpoints self._create_endpoint_and_associations( self.default_domain_project_id, service_id2 ) self._create_endpoint_and_associations(self.default_domain_project_id) # create project and endpoint association with default endpoint: self.put(self.default_request_url) # create an endpoint group that contains a different endpoint body = copy.deepcopy(self.DEFAULT_ENDPOINT_GROUP_BODY) body['endpoint_group']['filters'] = {'service_id': service_id2} endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, body ) # associate endpoint group with project self._create_endpoint_group_project_association( endpoint_group_id, self.default_domain_project_id ) # Now get a list of the filtered endpoints endpoints_url = '/OS-EP-FILTER/projects/{project_id}/endpoints'.format( project_id=self.default_domain_project_id ) r = self.get(endpoints_url, expected_status=http.client.OK) endpoints = self.assertValidEndpointListResponse(r) self.assertEqual(2, len(endpoints)) self.head(endpoints_url, expected_status=http.client.OK) # Ensure catalog includes the endpoints from endpoint_group project # association, this is needed when a project scoped token is issued # and "endpoint_filter.sql" backend driver is in place. user_id = uuid.uuid4().hex catalog_list = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertEqual(2, len(catalog_list)) # Now remove project endpoint group association url = self._get_project_endpoint_group_url( endpoint_group_id, self.default_domain_project_id ) self.delete(url) # Now remove endpoint group url = '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}'.format( endpoint_group_id=endpoint_group_id ) self.delete(url) r = self.get(endpoints_url) endpoints = self.assertValidEndpointListResponse(r) self.assertEqual(1, len(endpoints)) catalog_list = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertEqual(1, len(catalog_list)) def test_endpoint_group_project_cleanup_with_project(self): # create endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # create new project and associate with endpoint_group project_ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': project_ref}) project = self.assertValidProjectResponse(r, project_ref) url = self._get_project_endpoint_group_url( endpoint_group_id, project['id'] ) self.put(url) # check that we can recover the project endpoint group association self.get(url, expected_status=http.client.OK) self.get(url, expected_status=http.client.OK) # Now delete the project and then try and retrieve the project # endpoint group association again self.delete('/projects/{project_id}'.format(project_id=project['id'])) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_endpoint_group_project_cleanup_with_endpoint_group(self): # create endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # create new project and associate with endpoint_group project_ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': project_ref}) project = self.assertValidProjectResponse(r, project_ref) url = self._get_project_endpoint_group_url( endpoint_group_id, project['id'] ) self.put(url) # check that we can recover the project endpoint group association self.get(url) # now remove the project endpoint group association self.delete(url) self.get(url, expected_status=http.client.NOT_FOUND) def test_removing_an_endpoint_group_project(self): # create an endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # create an endpoint_group project url = self._get_project_endpoint_group_url( endpoint_group_id, self.default_domain_project_id ) self.put(url) # remove the endpoint group project self.delete(url) self.get(url, expected_status=http.client.NOT_FOUND) def test_remove_endpoint_group_with_project_association(self): # create an endpoint group endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # create an endpoint_group project project_endpoint_group_url = self._get_project_endpoint_group_url( endpoint_group_id, self.default_domain_project_id ) self.put(project_endpoint_group_url) # remove endpoint group, the associated endpoint_group project will # be removed as well. endpoint_group_url = ( '/OS-EP-FILTER/endpoint_groups/' '%(endpoint_group_id)s' % {'endpoint_group_id': endpoint_group_id} ) self.delete(endpoint_group_url) self.get(endpoint_group_url, expected_status=http.client.NOT_FOUND) self.get( project_endpoint_group_url, expected_status=http.client.NOT_FOUND ) @unit.skip_if_cache_disabled('catalog') def test_add_endpoint_group_to_project_invalidates_catalog_cache(self): # create another endpoint with 'admin' interface which matches # 'filters' definition in endpoint group, then there should be two # endpoints returned when retrieving v3 catalog if cache works as # expected. # this should be done at first since `create_endpoint` will also # invalidate cache. endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref( service_id=self.service_id, region_id=self.region_id, interface='admin', id=endpoint_id2, ) PROVIDERS.catalog_api.create_endpoint(endpoint_id2, endpoint2) # create a project and endpoint association. self.put(self.default_request_url) # there is only one endpoint associated with the default project. user_id = uuid.uuid4().hex catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1)) # create an endpoint group. endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # add the endpoint group to default project, bypassing # catalog_api API manager. PROVIDERS.catalog_api.driver.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id ) # can get back only one endpoint from the cache, since the catalog # is pulled out from cache. invalid_catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertThat(invalid_catalog[0]['endpoints'], matchers.HasLength(1)) self.assertEqual(catalog, invalid_catalog) # remove the endpoint group from default project, and add it again via # catalog_api API manager. PROVIDERS.catalog_api.driver.remove_endpoint_group_from_project( endpoint_group_id, self.default_domain_project_id ) # add the endpoint group to default project. PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id ) catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) # now, it will return 2 endpoints since the cache has been # invalidated. self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2)) ep_id_list = [ catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id'], ] self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list) @unit.skip_if_cache_disabled('catalog') def test_remove_endpoint_group_from_project_invalidates_cache(self): # create another endpoint with 'admin' interface which matches # 'filters' definition in endpoint group, then there should be two # endpoints returned when retrieving v3 catalog. But only one # endpoint will return after the endpoint group's deletion if cache # works as expected. # this should be done at first since `create_endpoint` will also # invalidate cache. endpoint_id2 = uuid.uuid4().hex endpoint2 = unit.new_endpoint_ref( service_id=self.service_id, region_id=self.region_id, interface='admin', id=endpoint_id2, ) PROVIDERS.catalog_api.create_endpoint(endpoint_id2, endpoint2) # create project and endpoint association. self.put(self.default_request_url) # create an endpoint group. endpoint_group_id = self._create_valid_endpoint_group( self.DEFAULT_ENDPOINT_GROUP_URL, self.DEFAULT_ENDPOINT_GROUP_BODY ) # add the endpoint group to default project. PROVIDERS.catalog_api.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id ) # should get back two endpoints, one from endpoint project # association, the other one is from endpoint_group project # association. user_id = uuid.uuid4().hex catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2)) ep_id_list = [ catalog[0]['endpoints'][0]['id'], catalog[0]['endpoints'][1]['id'], ] self.assertCountEqual([self.endpoint_id, endpoint_id2], ep_id_list) # remove endpoint_group project association, bypassing # catalog_api API manager. PROVIDERS.catalog_api.driver.remove_endpoint_group_from_project( endpoint_group_id, self.default_domain_project_id ) # still get back two endpoints, since the catalog is pulled out # from cache and the cache haven't been invalidated. invalid_catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertThat(invalid_catalog[0]['endpoints'], matchers.HasLength(2)) self.assertEqual(catalog, invalid_catalog) # add back the endpoint_group project association and remove it from # manager. PROVIDERS.catalog_api.driver.add_endpoint_group_to_project( endpoint_group_id, self.default_domain_project_id ) PROVIDERS.catalog_api.remove_endpoint_group_from_project( endpoint_group_id, self.default_domain_project_id ) # should only get back one endpoint since the cache has been # invalidated after the endpoint_group project association was # removed. catalog = PROVIDERS.catalog_api.get_v3_catalog( user_id, self.default_domain_project_id ) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1)) self.assertEqual(self.endpoint_id, catalog[0]['endpoints'][0]['id']) def _create_valid_endpoint_group(self, url, body): r = self.post(url, body=body) return r.result['endpoint_group']['id'] def _create_endpoint_group_project_association( self, endpoint_group_id, project_id ): url = self._get_project_endpoint_group_url( endpoint_group_id, project_id ) self.put(url) def _get_project_endpoint_group_url(self, endpoint_group_id, project_id): return ( '/OS-EP-FILTER/endpoint_groups/%(endpoint_group_id)s' '/projects/%(project_id)s' % { 'endpoint_group_id': endpoint_group_id, 'project_id': project_id, } ) def _create_endpoint_and_associations(self, project_id, service_id=None): """Create an endpoint associated with service and project.""" if not service_id: # create a new service service_ref = unit.new_service_ref() response = self.post('/services', body={'service': service_ref}) service_id = response.result['service']['id'] # create endpoint endpoint_ref = unit.new_endpoint_ref( service_id=service_id, interface='public', region_id=self.region_id ) response = self.post('/endpoints', body={'endpoint': endpoint_ref}) endpoint = response.result['endpoint'] # now add endpoint to project self.put( '/OS-EP-FILTER/projects/%(project_id)s' '/endpoints/%(endpoint_id)s' % {'project_id': self.project['id'], 'endpoint_id': endpoint['id']} ) return endpoint ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_auth_plugin.py0000664000175000017500000002131000000000000023372 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid import stevedore from keystone.api._shared import authentication from keystone import auth from keystone.auth.plugins import base from keystone.auth.plugins import mapped from keystone import exception from keystone.tests import unit from keystone.tests.unit.ksfixtures import auth_plugins # for testing purposes only METHOD_NAME = 'simple_challenge_response' EXPECTED_RESPONSE = uuid.uuid4().hex DEMO_USER_ID = uuid.uuid4().hex class SimpleChallengeResponse(base.AuthMethodHandler): def authenticate(self, auth_payload): response_data = {} if 'response' in auth_payload: if auth_payload['response'] != EXPECTED_RESPONSE: raise exception.Unauthorized('Wrong answer') response_data['user_id'] = DEMO_USER_ID return base.AuthHandlerResponse( status=True, response_body=None, response_data=response_data ) else: return base.AuthHandlerResponse( status=False, response_body={ "challenge": "What's the name of your high school?" }, response_data=None, ) class TestAuthPlugin(unit.SQLDriverOverrides, unit.TestCase): def test_unsupported_auth_method(self): method_name = uuid.uuid4().hex auth_data = {'methods': [method_name]} auth_data[method_name] = {'test': 'test'} auth_data = {'identity': auth_data} self.assertRaises( exception.AuthMethodNotSupported, auth.core.AuthInfo.create, auth_data, ) @mock.patch.object(auth.core, '_get_auth_driver_manager') def test_addition_auth_steps(self, stevedore_mock): simple_challenge_plugin = SimpleChallengeResponse() extension = stevedore.extension.Extension( name='simple_challenge', entry_point=None, plugin=None, obj=simple_challenge_plugin, ) test_manager = stevedore.DriverManager.make_test_instance(extension) stevedore_mock.return_value = test_manager self.useFixture( auth_plugins.ConfigAuthPlugins( self.config_fixture, methods=[METHOD_NAME] ) ) self.useFixture(auth_plugins.LoadAuthPlugins(METHOD_NAME)) auth_data = {'methods': [METHOD_NAME]} auth_data[METHOD_NAME] = {'test': 'test'} auth_data = {'identity': auth_data} auth_info = auth.core.AuthInfo.create(auth_data) auth_context = auth.core.AuthContext(method_names=[]) try: with self.make_request(): authentication.authenticate(auth_info, auth_context) except exception.AdditionalAuthRequired as e: self.assertIn('methods', e.authentication) self.assertIn(METHOD_NAME, e.authentication['methods']) self.assertIn(METHOD_NAME, e.authentication) self.assertIn('challenge', e.authentication[METHOD_NAME]) # test correct response auth_data = {'methods': [METHOD_NAME]} auth_data[METHOD_NAME] = {'response': EXPECTED_RESPONSE} auth_data = {'identity': auth_data} auth_info = auth.core.AuthInfo.create(auth_data) auth_context = auth.core.AuthContext(method_names=[]) with self.make_request(): authentication.authenticate(auth_info, auth_context) self.assertEqual(DEMO_USER_ID, auth_context['user_id']) # test incorrect response auth_data = {'methods': [METHOD_NAME]} auth_data[METHOD_NAME] = {'response': uuid.uuid4().hex} auth_data = {'identity': auth_data} auth_info = auth.core.AuthInfo.create(auth_data) auth_context = auth.core.AuthContext(method_names=[]) with self.make_request(): self.assertRaises( exception.Unauthorized, authentication.authenticate, auth_info, auth_context, ) def test_duplicate_method(self): # Having the same method twice doesn't cause load_auth_methods to fail. self.useFixture( auth_plugins.ConfigAuthPlugins( self.config_fixture, ['external', 'external'] ) ) auth.core.load_auth_methods() self.assertIn('external', auth.core.AUTH_METHODS) class TestAuthPluginDynamicOptions(TestAuthPlugin): def config_overrides(self): super().config_overrides() # Clear the override for the [auth] ``methods`` option so it is # possible to load the options from the config file. self.config_fixture.conf.clear_override('methods', group='auth') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('test_auth_plugin.conf')) return config_files class TestMapped(unit.TestCase): def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('test_auth_plugin.conf')) return config_files def _test_mapped_invocation_with_method_name(self, method_name): with mock.patch.object( auth.plugins.mapped.Mapped, 'authenticate', return_value=None ) as authenticate: auth_data = { 'identity': { 'methods': [method_name], method_name: {'protocol': method_name}, } } auth_info = auth.core.AuthInfo.create(auth_data) auth_context = auth.core.AuthContext( method_names=[], user_id=uuid.uuid4().hex ) with self.make_request(): authentication.authenticate(auth_info, auth_context) # make sure Mapped plugin got invoked with the correct payload ((auth_payload,), kwargs) = authenticate.call_args self.assertEqual(method_name, auth_payload['protocol']) def test_mapped_with_remote_user(self): method_name = 'saml2' auth_data = {'methods': [method_name]} # put the method name in the payload so its easier to correlate # method name with payload auth_data[method_name] = {'protocol': method_name} auth_data = {'identity': auth_data} auth_context = auth.core.AuthContext( method_names=[], user_id=uuid.uuid4().hex ) self.useFixture(auth_plugins.LoadAuthPlugins(method_name)) with mock.patch.object( auth.plugins.mapped.Mapped, 'authenticate', return_value=None ) as authenticate: auth_info = auth.core.AuthInfo.create(auth_data) with self.make_request(environ={'REMOTE_USER': 'foo@idp.com'}): authentication.authenticate(auth_info, auth_context) # make sure Mapped plugin got invoked with the correct payload ((auth_payload,), kwargs) = authenticate.call_args self.assertEqual(method_name, auth_payload['protocol']) @mock.patch('keystone.auth.plugins.mapped.PROVIDERS') def test_mapped_without_identity_provider_or_protocol( self, mock_providers ): mock_providers.resource_api = mock.Mock() mock_providers.federation_api = mock.Mock() mock_providers.identity_api = mock.Mock() mock_providers.assignment_api = mock.Mock() mock_providers.role_api = mock.Mock() test_mapped = mapped.Mapped() auth_payload = {'identity_provider': 'test_provider'} with self.make_request(): self.assertRaises( exception.ValidationError, test_mapped.authenticate, auth_payload, ) auth_payload = {'protocol': 'saml2'} with self.make_request(): self.assertRaises( exception.ValidationError, test_mapped.authenticate, auth_payload, ) def test_supporting_multiple_methods(self): method_names = ('saml2', 'openid', 'x509', 'mapped') self.useFixture(auth_plugins.LoadAuthPlugins(*method_names)) for method_name in method_names: self._test_mapped_invocation_with_method_name(method_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_endpoint_policy.py0000664000175000017500000003001500000000000025723 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from testtools import matchers from keystone.common import provider_api from keystone import exception from keystone.tests import unit PROVIDERS = provider_api.ProviderAPIs class PolicyAssociationTests: def _assert_correct_policy(self, endpoint, policy): ref = PROVIDERS.endpoint_policy_api.get_policy_for_endpoint( endpoint['id'] ) self.assertEqual(policy['id'], ref['id']) def _assert_correct_endpoints(self, policy, endpoint_list): endpoint_id_list = [ep['id'] for ep in endpoint_list] endpoints = PROVIDERS.endpoint_policy_api.list_endpoints_for_policy( policy['id'] ) self.assertThat(endpoints, matchers.HasLength(len(endpoint_list))) for endpoint in endpoints: self.assertIn(endpoint['id'], endpoint_id_list) def load_sample_data(self): """Create sample data to test policy associations. The following data is created: - 3 regions, in a hierarchy, 0 -> 1 -> 2 (where 0 is top) - 3 services - 6 endpoints, 2 in each region, with a mixture of services: 0 - region 0, Service 0 1 - region 0, Service 1 2 - region 1, Service 1 3 - region 1, Service 2 4 - region 2, Service 2 5 - region 2, Service 0 """ def new_endpoint(region_id, service_id): endpoint = unit.new_endpoint_ref( interface='test', region_id=region_id, service_id=service_id, url='/url', ) self.endpoint.append( PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) ) self.policy = [] self.endpoint = [] self.service = [] self.region = [] parent_region_id = None for i in range(3): policy = unit.new_policy_ref() self.policy.append( PROVIDERS.policy_api.create_policy(policy['id'], policy) ) service = unit.new_service_ref() self.service.append( PROVIDERS.catalog_api.create_service(service['id'], service) ) region = unit.new_region_ref(parent_region_id=parent_region_id) # Link the regions together as a hierarchy, [0] at the top parent_region_id = region['id'] self.region.append(PROVIDERS.catalog_api.create_region(region)) new_endpoint(self.region[0]['id'], self.service[0]['id']) new_endpoint(self.region[0]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[1]['id']) new_endpoint(self.region[1]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[2]['id']) new_endpoint(self.region[2]['id'], self.service[0]['id']) def test_policy_to_endpoint_association_crud(self): PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'] ) PROVIDERS.endpoint_policy_api.check_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'] ) PROVIDERS.endpoint_policy_api.delete_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'] ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], ) def test_overwriting_policy_to_endpoint_association(self): PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'] ) PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'] ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], ) PROVIDERS.endpoint_policy_api.check_policy_association( self.policy[1]['id'], endpoint_id=self.endpoint[0]['id'] ) def test_invalid_policy_to_endpoint_association(self): self.assertRaises( exception.InvalidPolicyAssociation, PROVIDERS.endpoint_policy_api.create_policy_association, self.policy[0]['id'], ) self.assertRaises( exception.InvalidPolicyAssociation, PROVIDERS.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], region_id=self.region[0]['id'], ) self.assertRaises( exception.InvalidPolicyAssociation, PROVIDERS.endpoint_policy_api.create_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], service_id=self.service[0]['id'], ) self.assertRaises( exception.InvalidPolicyAssociation, PROVIDERS.endpoint_policy_api.create_policy_association, self.policy[0]['id'], region_id=self.region[0]['id'], ) def test_policy_to_explicit_endpoint_association(self): # Associate policy 0 with endpoint 0 PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'] ) self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_endpoints(self.policy[0], [self.endpoint[0]]) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.get_policy_for_endpoint, uuid.uuid4().hex, ) def test_policy_to_service_association(self): PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'] ) PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id'] ) # Endpoints 0 and 5 are part of service 0 self._assert_correct_policy(self.endpoint[0], self.policy[0]) self._assert_correct_policy(self.endpoint[5], self.policy[0]) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]] ) # Endpoints 1 and 2 are part of service 1 self._assert_correct_policy(self.endpoint[1], self.policy[1]) self._assert_correct_policy(self.endpoint[2], self.policy[1]) self._assert_correct_endpoints( self.policy[1], [self.endpoint[1], self.endpoint[2]] ) def test_policy_to_region_and_service_association(self): PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id'], ) PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[1]['id'], region_id=self.region[1]['id'], ) PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[2]['id'], service_id=self.service[2]['id'], region_id=self.region[2]['id'], ) # Endpoint 0 is in region 0 with service 0, so should get policy 0 self._assert_correct_policy(self.endpoint[0], self.policy[0]) # Endpoint 5 is in Region 2 with service 0, so should also get # policy 0 by searching up the tree to Region 0 self._assert_correct_policy(self.endpoint[5], self.policy[0]) # Looking the other way round, policy 2 should only be in use by # endpoint 4, since that's the only endpoint in region 2 with the # correct service self._assert_correct_endpoints(self.policy[2], [self.endpoint[4]]) # Policy 1 should only be in use by endpoint 2, since that's the only # endpoint in region 1 (and region 2 below it) with the correct service self._assert_correct_endpoints(self.policy[1], [self.endpoint[2]]) # Policy 0 should be in use by endpoint 0, as well as 5 (since 5 is # of the correct service and in region 2 below it) self._assert_correct_endpoints( self.policy[0], [self.endpoint[0], self.endpoint[5]] ) def test_delete_association_by_entity(self): PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'] ) PROVIDERS.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id'] ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[0]['id'], endpoint_id=self.endpoint[0]['id'], ) # Make sure deleting it again is silent - since this method is used # in response to notifications by the controller. PROVIDERS.endpoint_policy_api.delete_association_by_endpoint( self.endpoint[0]['id'] ) # Now try with service - ensure both combined region & service # associations and explicit service ones are removed PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id'], ) PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id'], ) PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'] ) PROVIDERS.endpoint_policy_api.delete_association_by_service( self.service[0]['id'] ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id'], ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[1]['id'], service_id=self.service[0]['id'], region_id=self.region[1]['id'], ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], ) # Finally, check delete by region PROVIDERS.endpoint_policy_api.create_policy_association( self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id'], ) PROVIDERS.endpoint_policy_api.delete_association_by_region( self.region[0]['id'] ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], region_id=self.region[0]['id'], ) self.assertRaises( exception.NotFound, PROVIDERS.endpoint_policy_api.check_policy_association, self.policy[0]['id'], service_id=self.service[0]['id'], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_endpoint_policy_sql.py0000664000175000017500000000263200000000000026606 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.tests.unit import test_backend_endpoint_policy from keystone.tests.unit import test_backend_sql class SqlPolicyAssociationTable(test_backend_sql.SqlModels): """Set of tests for checking SQL Policy Association Mapping.""" def test_policy_association_mapping(self): cols = ( ('id', sql.String, 64), ('policy_id', sql.String, 64), ('endpoint_id', sql.String, 64), ('service_id', sql.String, 64), ('region_id', sql.String, 64), ) self.assertExpectedSchema('policy_association', cols) class SqlPolicyAssociationTests( test_backend_sql.SqlTests, test_backend_endpoint_policy.PolicyAssociationTests, ): def load_fixtures(self, fixtures): super().load_fixtures(fixtures) self.load_sample_data() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_federation_sql.py0000664000175000017500000000424400000000000025530 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import sql from keystone.tests.unit import test_backend_sql class SqlFederation(test_backend_sql.SqlModels): """Set of tests for checking SQL Federation.""" def test_identity_provider(self): cols = ( ('id', sql.String, 64), ('domain_id', sql.String, 64), ('enabled', sql.Boolean, None), ('description', sql.Text, None), ('authorization_ttl', sql.Integer, None), ) self.assertExpectedSchema('identity_provider', cols) def test_idp_remote_ids(self): cols = (('idp_id', sql.String, 64), ('remote_id', sql.String, 255)) self.assertExpectedSchema('idp_remote_ids', cols) def test_federated_protocol(self): cols = ( ('id', sql.String, 64), ('idp_id', sql.String, 64), ('mapping_id', sql.String, 64), ('remote_id_attribute', sql.String, 64), ) self.assertExpectedSchema('federation_protocol', cols) def test_mapping(self): cols = ( ('id', sql.String, 64), ('rules', sql.JsonBlob, None), ('schema_version', sql.String, 5), ) self.assertExpectedSchema('mapping', cols) def test_service_provider(self): cols = ( ('auth_url', sql.String, 256), ('id', sql.String, 64), ('enabled', sql.Boolean, None), ('description', sql.Text, None), ('relay_state_prefix', sql.String, 256), ('sp_url', sql.String, 256), ) self.assertExpectedSchema('service_provider', cols) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_id_mapping_sql.py0000664000175000017500000004733500000000000025527 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from testtools import matchers from keystone.common import provider_api from keystone.common import sql from keystone.identity.mapping_backends import mapping from keystone.tests import unit from keystone.tests.unit import identity_mapping as mapping_sql from keystone.tests.unit import test_backend_sql PROVIDERS = provider_api.ProviderAPIs class SqlIDMappingTable(test_backend_sql.SqlModels): """Set of tests for checking SQL Identity ID Mapping.""" def test_id_mapping(self): cols = ( ('public_id', sql.String, 64), ('domain_id', sql.String, 64), ('local_id', sql.String, 255), ('entity_type', sql.Enum, None), ) self.assertExpectedSchema('id_mapping', cols) class SqlIDMapping(test_backend_sql.SqlTests): def setUp(self): super().setUp() self.load_sample_data() def load_sample_data(self): self.addCleanup(self.clean_sample_data) domainA = unit.new_domain_ref() self.domainA = PROVIDERS.resource_api.create_domain( domainA['id'], domainA ) domainB = unit.new_domain_ref() self.domainB = PROVIDERS.resource_api.create_domain( domainB['id'], domainB ) def clean_sample_data(self): if hasattr(self, 'domainA'): self.domainA['enabled'] = False PROVIDERS.resource_api.update_domain( self.domainA['id'], self.domainA ) PROVIDERS.resource_api.delete_domain(self.domainA['id']) if hasattr(self, 'domainB'): self.domainB['enabled'] = False PROVIDERS.resource_api.update_domain( self.domainB['id'], self.domainB ) PROVIDERS.resource_api.delete_domain(self.domainB['id']) def test_invalid_public_key(self): self.assertIsNone( PROVIDERS.id_mapping_api.get_id_mapping(uuid.uuid4().hex) ) def test_id_mapping_crud(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id1 = uuid.uuid4().hex local_id2 = uuid.uuid4().hex local_entity1 = { 'domain_id': self.domainA['id'], 'local_id': local_id1, 'entity_type': mapping.EntityType.USER, } local_entity2 = { 'domain_id': self.domainB['id'], 'local_id': local_id2, 'entity_type': mapping.EntityType.GROUP, } # Check no mappings for the new local entities self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity1) ) self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity2) ) # Create the new mappings and then read them back public_id1 = PROVIDERS.id_mapping_api.create_id_mapping(local_entity1) public_id2 = PROVIDERS.id_mapping_api.create_id_mapping(local_entity2) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 2), ) self.assertEqual( public_id1, PROVIDERS.id_mapping_api.get_public_id(local_entity1) ) self.assertEqual( public_id2, PROVIDERS.id_mapping_api.get_public_id(local_entity2) ) local_id_ref = PROVIDERS.id_mapping_api.get_id_mapping(public_id1) self.assertEqual(self.domainA['id'], local_id_ref['domain_id']) self.assertEqual(local_id1, local_id_ref['local_id']) self.assertEqual(mapping.EntityType.USER, local_id_ref['entity_type']) # Check we have really created a new external ID self.assertNotEqual(local_id1, public_id1) local_id_ref = PROVIDERS.id_mapping_api.get_id_mapping(public_id2) self.assertEqual(self.domainB['id'], local_id_ref['domain_id']) self.assertEqual(local_id2, local_id_ref['local_id']) self.assertEqual(mapping.EntityType.GROUP, local_id_ref['entity_type']) # Check we have really created a new external ID self.assertNotEqual(local_id2, public_id2) # Create another mappings, this time specifying a public ID to use new_public_id = uuid.uuid4().hex public_id3 = PROVIDERS.id_mapping_api.create_id_mapping( { 'domain_id': self.domainB['id'], 'local_id': local_id2, 'entity_type': mapping.EntityType.USER, }, public_id=new_public_id, ) self.assertEqual(new_public_id, public_id3) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 3), ) # Delete the mappings we created, and make sure the mapping count # goes back to where it was PROVIDERS.id_mapping_api.delete_id_mapping(public_id1) PROVIDERS.id_mapping_api.delete_id_mapping(public_id2) PROVIDERS.id_mapping_api.delete_id_mapping(public_id3) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings), ) def test_id_mapping_handles_unicode(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id = 'fäké1' local_entity = { 'domain_id': self.domainA['id'], 'local_id': local_id, 'entity_type': mapping.EntityType.USER, } # Check no mappings for the new local entity self.assertIsNone(PROVIDERS.id_mapping_api.get_public_id(local_entity)) # Create the new mapping and then read it back public_id = PROVIDERS.id_mapping_api.create_id_mapping(local_entity) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 1), ) self.assertEqual( public_id, PROVIDERS.id_mapping_api.get_public_id(local_entity) ) def test_id_mapping_handles_bytes(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id = b'FaKeID' local_entity = { 'domain_id': self.domainA['id'], 'local_id': local_id, 'entity_type': mapping.EntityType.USER, } # Check no mappings for the new local entity self.assertIsNone(PROVIDERS.id_mapping_api.get_public_id(local_entity)) # Create the new mapping and then read it back public_id = PROVIDERS.id_mapping_api.create_id_mapping(local_entity) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 1), ) self.assertEqual( public_id, PROVIDERS.id_mapping_api.get_public_id(local_entity) ) def test_id_mapping_handles_ids_greater_than_64_characters(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id = 'Aa' * 100 local_entity = { 'domain_id': self.domainA['id'], 'local_id': local_id, 'entity_type': mapping.EntityType.GROUP, } # Check no mappings for the new local entity self.assertIsNone(PROVIDERS.id_mapping_api.get_public_id(local_entity)) # Create the new mapping and then read it back public_id = PROVIDERS.id_mapping_api.create_id_mapping(local_entity) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 1), ) self.assertEqual( public_id, PROVIDERS.id_mapping_api.get_public_id(local_entity) ) self.assertEqual( local_id, PROVIDERS.id_mapping_api.get_id_mapping(public_id)['local_id'], ) def test_delete_public_id_is_silent(self): # Test that deleting an invalid public key is silent PROVIDERS.id_mapping_api.delete_id_mapping(uuid.uuid4().hex) def test_purge_mappings(self): initial_mappings = len(mapping_sql.list_id_mappings()) local_id1 = uuid.uuid4().hex local_id2 = uuid.uuid4().hex local_id3 = uuid.uuid4().hex local_id4 = uuid.uuid4().hex local_id5 = uuid.uuid4().hex # Create five mappings,two in domainA, three in domainB PROVIDERS.id_mapping_api.create_id_mapping( { 'domain_id': self.domainA['id'], 'local_id': local_id1, 'entity_type': mapping.EntityType.USER, } ) PROVIDERS.id_mapping_api.create_id_mapping( { 'domain_id': self.domainA['id'], 'local_id': local_id2, 'entity_type': mapping.EntityType.USER, } ) public_id3 = PROVIDERS.id_mapping_api.create_id_mapping( { 'domain_id': self.domainB['id'], 'local_id': local_id3, 'entity_type': mapping.EntityType.GROUP, } ) public_id4 = PROVIDERS.id_mapping_api.create_id_mapping( { 'domain_id': self.domainB['id'], 'local_id': local_id4, 'entity_type': mapping.EntityType.USER, } ) public_id5 = PROVIDERS.id_mapping_api.create_id_mapping( { 'domain_id': self.domainB['id'], 'local_id': local_id5, 'entity_type': mapping.EntityType.USER, } ) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 5), ) # Purge mappings for domainA, should be left with those in B PROVIDERS.id_mapping_api.purge_mappings( {'domain_id': self.domainA['id']} ) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 3), ) PROVIDERS.id_mapping_api.get_id_mapping(public_id3) PROVIDERS.id_mapping_api.get_id_mapping(public_id4) PROVIDERS.id_mapping_api.get_id_mapping(public_id5) # Purge mappings for type Group, should purge one more PROVIDERS.id_mapping_api.purge_mappings( {'entity_type': mapping.EntityType.GROUP} ) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 2), ) PROVIDERS.id_mapping_api.get_id_mapping(public_id4) PROVIDERS.id_mapping_api.get_id_mapping(public_id5) # Purge mapping for a specific local identifier PROVIDERS.id_mapping_api.purge_mappings( { 'domain_id': self.domainB['id'], 'local_id': local_id4, 'entity_type': mapping.EntityType.USER, } ) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings + 1), ) PROVIDERS.id_mapping_api.get_id_mapping(public_id5) # Purge mappings the remaining mappings PROVIDERS.id_mapping_api.purge_mappings({}) self.assertThat( mapping_sql.list_id_mappings(), matchers.HasLength(initial_mappings), ) def test_create_duplicate_mapping(self): local_entity = { 'domain_id': self.domainA['id'], 'local_id': uuid.uuid4().hex, 'entity_type': mapping.EntityType.USER, } public_id1 = PROVIDERS.id_mapping_api.create_id_mapping(local_entity) # second call should be successful and return the same # public_id as above public_id2 = PROVIDERS.id_mapping_api.create_id_mapping(local_entity) self.assertEqual(public_id1, public_id2) # even if public_id was specified, it should not be used, # and still the same public_id should be returned public_id3 = PROVIDERS.id_mapping_api.create_id_mapping( local_entity, public_id=uuid.uuid4().hex ) self.assertEqual(public_id1, public_id3) @unit.skip_if_cache_disabled('identity') def test_cache_when_id_mapping_crud(self): local_id = uuid.uuid4().hex local_entity = { 'domain_id': self.domainA['id'], 'local_id': local_id, 'entity_type': mapping.EntityType.USER, } # Check no mappings for the new local entity self.assertIsNone(PROVIDERS.id_mapping_api.get_public_id(local_entity)) # Create new mappings, and it should be in the cache after created public_id = PROVIDERS.id_mapping_api.create_id_mapping(local_entity) self.assertEqual( public_id, PROVIDERS.id_mapping_api.get_public_id(local_entity) ) local_id_ref = PROVIDERS.id_mapping_api.get_id_mapping(public_id) self.assertEqual(self.domainA['id'], local_id_ref['domain_id']) self.assertEqual(local_id, local_id_ref['local_id']) self.assertEqual(mapping.EntityType.USER, local_id_ref['entity_type']) # After delete the mapping, should be deleted from cache too PROVIDERS.id_mapping_api.delete_id_mapping(public_id) self.assertIsNone(PROVIDERS.id_mapping_api.get_public_id(local_entity)) self.assertIsNone(PROVIDERS.id_mapping_api.get_id_mapping(public_id)) @unit.skip_if_cache_disabled('identity') def test_invalidate_cache_when_purge_mappings(self): local_id1 = uuid.uuid4().hex local_id2 = uuid.uuid4().hex local_id3 = uuid.uuid4().hex local_id4 = uuid.uuid4().hex local_id5 = uuid.uuid4().hex # Create five mappings,two in domainA, three in domainB local_entity1 = { 'domain_id': self.domainA['id'], 'local_id': local_id1, 'entity_type': mapping.EntityType.USER, } local_entity2 = { 'domain_id': self.domainA['id'], 'local_id': local_id2, 'entity_type': mapping.EntityType.USER, } local_entity3 = { 'domain_id': self.domainB['id'], 'local_id': local_id3, 'entity_type': mapping.EntityType.GROUP, } local_entity4 = { 'domain_id': self.domainB['id'], 'local_id': local_id4, 'entity_type': mapping.EntityType.USER, } local_entity5 = { 'domain_id': self.domainB['id'], 'local_id': local_id5, 'entity_type': mapping.EntityType.USER, } PROVIDERS.id_mapping_api.create_id_mapping(local_entity1) PROVIDERS.id_mapping_api.create_id_mapping(local_entity2) PROVIDERS.id_mapping_api.create_id_mapping(local_entity3) PROVIDERS.id_mapping_api.create_id_mapping(local_entity4) PROVIDERS.id_mapping_api.create_id_mapping(local_entity5) # Purge mappings for domainA, should be left with those in B PROVIDERS.id_mapping_api.purge_mappings( {'domain_id': self.domainA['id']} ) self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity1) ) self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity2) ) # Purge mappings for type Group, should purge one more PROVIDERS.id_mapping_api.purge_mappings( {'entity_type': mapping.EntityType.GROUP} ) self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity3) ) # Purge mapping for a specific local identifier PROVIDERS.id_mapping_api.purge_mappings( { 'domain_id': self.domainB['id'], 'local_id': local_id4, 'entity_type': mapping.EntityType.USER, } ) self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity4) ) # Purge mappings the remaining mappings PROVIDERS.id_mapping_api.purge_mappings({}) self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity5) ) def _prepare_domain_mappings_for_list(self): # Create five mappings: # two users in domainA, one group and two users in domainB local_entities = [ { 'domain_id': self.domainA['id'], 'entity_type': mapping.EntityType.USER, }, { 'domain_id': self.domainA['id'], 'entity_type': mapping.EntityType.USER, }, { 'domain_id': self.domainB['id'], 'entity_type': mapping.EntityType.GROUP, }, { 'domain_id': self.domainB['id'], 'entity_type': mapping.EntityType.USER, }, { 'domain_id': self.domainB['id'], 'entity_type': mapping.EntityType.USER, }, ] for e in local_entities: e['local_id'] = uuid.uuid4().hex e['public_id'] = PROVIDERS.id_mapping_api.create_id_mapping(e) return local_entities def test_get_domain_mapping_list(self): local_entities = self._prepare_domain_mappings_for_list() # NOTE(notmorgan): Always call to_dict in an active session context to # ensure that lazy-loaded relationships succeed. Edge cases could cause # issues especially in attribute mappers. with sql.session_for_read(): # list mappings for domainA domain_a_mappings = ( PROVIDERS.id_mapping_api.get_domain_mapping_list( self.domainA['id'] ) ) domain_a_mappings = [m.to_dict() for m in domain_a_mappings] self.assertCountEqual(local_entities[:2], domain_a_mappings) def test_get_domain_mapping_list_by_user_entity_type(self): local_entities = self._prepare_domain_mappings_for_list() # NOTE(notmorgan): Always call to_dict in an active session context to # ensure that lazy-loaded relationships succeed. Edge cases could cause # issues especially in attribute mappers. with sql.session_for_read(): # list user mappings for domainB domain_b_mappings_user = ( PROVIDERS.id_mapping_api.get_domain_mapping_list( self.domainB['id'], entity_type=mapping.EntityType.USER ) ) domain_b_mappings_user = [ m.to_dict() for m in domain_b_mappings_user ] self.assertCountEqual(local_entities[-2:], domain_b_mappings_user) def test_get_domain_mapping_list_by_group_entity_type(self): local_entities = self._prepare_domain_mappings_for_list() # NOTE(notmorgan): Always call to_dict in an active session context to # ensure that lazy-loaded relationships succeed. Edge cases could cause # issues especially in attribute mappers. with sql.session_for_read(): # List group mappings for domainB. Given the data set, this should # only return a single reference, so don't both iterating the query # response. domain_b_mappings_group = ( PROVIDERS.id_mapping_api.get_domain_mapping_list( self.domainB['id'], entity_type=mapping.EntityType.GROUP ) ) domain_b_mappings_group = domain_b_mappings_group.first().to_dict() self.assertCountEqual(local_entities[2], domain_b_mappings_group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_ldap.py0000664000175000017500000043553100000000000023460 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client from unittest import mock import uuid import fixtures import ldap from oslo_log import versionutils import pkg_resources from testtools import matchers from keystone.common import cache from keystone.common import driver_hints from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import identity from keystone.identity.backends import ldap as ldap_identity from keystone.identity.backends.ldap import common as common_ldap from keystone.identity.backends import sql as sql_identity from keystone.identity.mapping_backends import mapping as map from keystone.tests import unit from keystone.tests.unit.assignment import test_backends as assignment_tests from keystone.tests.unit import default_fixtures from keystone.tests.unit.identity import test_backends as identity_tests from keystone.tests.unit import identity_mapping as mapping_sql from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.ksfixtures import ldapdb from keystone.tests.unit.resource import test_backends as resource_tests CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def _assert_backends(testcase, **kwargs): def _get_backend_cls(testcase, subsystem): observed_backend = getattr(testcase, subsystem + '_api').driver return observed_backend.__class__ def _get_domain_specific_backend_cls(manager, domain): observed_backend = manager.domain_configs.get_domain_driver(domain) return observed_backend.__class__ def _get_entrypoint_cls(subsystem, name): entrypoint = entrypoint_map['keystone.' + subsystem][name] return entrypoint.resolve() def _load_domain_specific_configs(manager): if ( not manager.domain_configs.configured and CONF.identity.domain_specific_drivers_enabled ): manager.domain_configs.setup_domain_drivers( manager.driver, manager.resource_api ) def _assert_equal(expected_cls, observed_cls, subsystem, domain=None): msg = ( 'subsystem %(subsystem)s expected %(expected_cls)r, ' 'but observed %(observed_cls)r' ) if domain: subsystem = f'{subsystem}[domain={domain}]' assert expected_cls == observed_cls, msg % { 'expected_cls': expected_cls, 'observed_cls': observed_cls, 'subsystem': subsystem, } env = pkg_resources.Environment() keystone_dist = env['keystone'][0] entrypoint_map = pkg_resources.get_entry_map(keystone_dist) for subsystem, entrypoint_name in kwargs.items(): if isinstance(entrypoint_name, str): observed_cls = _get_backend_cls(testcase, subsystem) expected_cls = _get_entrypoint_cls(subsystem, entrypoint_name) _assert_equal(expected_cls, observed_cls, subsystem) elif isinstance(entrypoint_name, dict): manager = getattr(testcase, subsystem + '_api') _load_domain_specific_configs(manager) for domain, entrypoint_name in entrypoint_name.items(): if domain is None: observed_cls = _get_backend_cls(testcase, subsystem) expected_cls = _get_entrypoint_cls( subsystem, entrypoint_name ) _assert_equal(expected_cls, observed_cls, subsystem) continue observed_cls = _get_domain_specific_backend_cls( manager, domain ) expected_cls = _get_entrypoint_cls(subsystem, entrypoint_name) _assert_equal(expected_cls, observed_cls, subsystem, domain) else: raise ValueError( '%r is not an expected value for entrypoint name' % entrypoint_name ) class IdentityTests(identity_tests.IdentityTests): def test_update_domain_set_immutable(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_cannot_delete_disabled_domain_with_immutable(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_delete_immutable_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_create_domain_immutable(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_update_domain_unset_immutable(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_cannot_update_immutable_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_delete_user_with_group_project_domain_links(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_delete_group_with_user_project_domain_links(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_create_duplicate_user_name_in_different_domains(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_create_duplicate_group_name_in_different_domains(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_move_user_between_domains(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_move_group_between_domains(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_arbitrary_attributes_are_returned_from_get_user(self): self.skip_test_overrides( "Using arbitrary attributes doesn't work under LDAP" ) def test_new_arbitrary_attributes_are_returned_from_update_user(self): self.skip_test_overrides( "Using arbitrary attributes doesn't work under LDAP" ) def test_updated_arbitrary_attributes_are_returned_from_update_user(self): self.skip_test_overrides( "Using arbitrary attributes doesn't work under LDAP" ) def test_remove_user_from_group(self): self.skip_test_overrides('N/A: LDAP does not support write') def test_remove_user_from_group_returns_not_found(self): self.skip_test_overrides('N/A: LDAP does not support write') def test_delete_user_returns_not_found(self): self.skip_test_overrides('N/A: LDAP does not support write') class AssignmentTests(assignment_tests.AssignmentTests): def test_get_role_assignment_by_domain_not_found(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_del_role_assignment_by_domain_not_found(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_get_and_remove_role_grant_by_user_and_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_get_and_remove_correct_role_grant_from_a_mix(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_get_and_remove_role_grant_by_group_and_cross_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_get_and_remove_role_grant_by_user_and_cross_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_role_grant_by_group_and_cross_domain_project(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_role_grant_by_user_and_cross_domain_project(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_multi_role_grant_by_user_group_on_project_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_delete_role_with_user_and_group_grants(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_list_role_assignment_containing_names(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_get_roles_for_user_and_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_get_roles_for_groups_on_domain(self): self.skip_test_overrides( 'N/A: LDAP does not implement get_roles_for_groups; ' 'see bug 1333712 for details' ) def test_get_role_by_trustor_and_project(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_get_roles_for_groups_on_project(self): self.skip_test_overrides( 'N/A: LDAP does not implement get_roles_for_groups; ' 'see bug 1333712 for details' ) def test_list_domains_for_groups(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_list_projects_for_groups(self): self.skip_test_overrides( 'N/A: LDAP does not implement list_projects_for_groups; ' 'see bug 1333712 for details' ) def test_multi_group_grants_on_project_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') def test_delete_user_grant_no_user(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_delete_group_grant_no_group(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_delete_user_with_project_roles(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_delete_user_with_project_association(self): self.skip_test_overrides('N/A: LDAP has no write support') def test_delete_group_removes_role_assignments(self): self.skip_test_overrides('N/A: LDAP has no write support') class ResourceTests(resource_tests.ResourceTests): def test_create_duplicate_project_name_in_different_domains(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_move_project_between_domains(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_move_project_between_domains_with_clashing_names_fails(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_domain_delete_hierarchy(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_cache_layer_domain_crud(self): # TODO(morganfainberg): This also needs to be removed when full LDAP # implementation is submitted. No need to duplicate the above test, # just skip this time. self.skip_test_overrides('Domains are read-only against LDAP') def test_domain_crud(self): self.skip_test_overrides('N/A: Not relevant for multi ldap testing') def test_delete_domain_call_db_time(self): self.skip_test_overrides('Domains are read-only against LDAP') def test_create_project_with_parent_id_and_without_domain_id(self): self.skip_test_overrides('Resource LDAP has been removed') def test_create_domain_under_regular_project_hierarchy_fails(self): self.skip_test_overrides('Resource LDAP has been removed') def test_create_project_passing_is_domain_flag_true(self): self.skip_test_overrides('Resource LDAP has been removed') def test_check_leaf_projects(self): self.skip_test_overrides('Resource LDAP has been removed') def test_list_projects_in_subtree(self): self.skip_test_overrides('Resource LDAP has been removed') def test_list_projects_in_subtree_with_circular_reference(self): self.skip_test_overrides('Resource LDAP has been removed') def test_list_project_parents(self): self.skip_test_overrides('Resource LDAP has been removed') def test_update_project_enabled_cascade(self): self.skip_test_overrides('Resource LDAP has been removed') def test_cannot_enable_cascade_with_parent_disabled(self): self.skip_test_overrides('Resource LDAP has been removed') def test_hierarchical_projects_crud(self): self.skip_test_overrides('Resource LDAP has been removed') def test_create_project_under_disabled_one(self): self.skip_test_overrides('Resource LDAP has been removed') def test_create_project_with_invalid_parent(self): self.skip_test_overrides('Resource LDAP has been removed') def test_update_project_parent(self): self.skip_test_overrides('Resource LDAP has been removed') def test_enable_project_with_disabled_parent(self): self.skip_test_overrides('Resource LDAP has been removed') def test_disable_hierarchical_leaf_project(self): self.skip_test_overrides('Resource LDAP has been removed') def test_disable_hierarchical_not_leaf_project(self): self.skip_test_overrides('Resource LDAP has been removed') def test_delete_hierarchical_leaf_project(self): self.skip_test_overrides('Resource LDAP has been removed') def test_delete_hierarchical_not_leaf_project(self): self.skip_test_overrides('Resource LDAP has been removed') def test_check_hierarchy_depth(self): self.skip_test_overrides('Resource LDAP has been removed') def test_list_projects_for_alternate_domain(self): self.skip_test_overrides('N/A: LDAP does not support multiple domains') class LDAPTestSetup: """Common setup for LDAP tests.""" def setUp(self): super().setUp() self.ldapdb = self.useFixture(ldapdb.LDAPDatabase()) self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) self.assert_backends() class BaseLDAPIdentity( LDAPTestSetup, IdentityTests, AssignmentTests, ResourceTests ): def _get_domain_fixture(self): """Return the static domain, since domains in LDAP are read-only.""" return PROVIDERS.resource_api.get_domain( CONF.identity.default_domain_id ) def get_config(self, domain_id): # Only one conf structure unless we are using separate domain backends return CONF def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def new_user_ref(self, domain_id, project_id=None, **kwargs): ref = unit.new_user_ref( domain_id=domain_id, project_id=project_id, **kwargs ) if 'id' not in kwargs: del ref['id'] return ref def get_user_enabled_vals(self, user): user_dn = PROVIDERS.identity_api.driver.user._id_to_dn_string( user['id'] ) enabled_attr_name = CONF.ldap.user_enabled_attribute ldap_ = PROVIDERS.identity_api.driver.user.get_connection() res = ldap_.search_s( user_dn, ldap.SCOPE_BASE, '(sn=%s)' % user['name'] ) if enabled_attr_name in res[0][1]: return res[0][1][enabled_attr_name] else: return None def test_build_tree(self): """Regression test for building the tree names.""" user_api = identity.backends.ldap.UserApi(CONF) self.assertTrue(user_api) self.assertEqual("ou=Users,%s" % CONF.ldap.suffix, user_api.tree_dn) def test_configurable_allowed_user_actions(self): user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) PROVIDERS.identity_api.get_user(user['id']) user['password'] = 'fäképass2' PROVIDERS.identity_api.update_user(user['id'], user) self.assertRaises( exception.Forbidden, PROVIDERS.identity_api.delete_user, user['id'] ) def test_user_filter(self): user_ref = PROVIDERS.identity_api.get_user(self.user_foo['id']) self.user_foo.pop('password') self.assertDictEqual(self.user_foo, user_ref) driver = PROVIDERS.identity_api._select_identity_driver( user_ref['domain_id'] ) driver.user.ldap_filter = '(CN=DOES_NOT_MATCH)' # invalidate the cache if the result is cached. PROVIDERS.identity_api.get_user.invalidate( PROVIDERS.identity_api, self.user_foo['id'] ) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, self.user_foo['id'], ) def test_list_users_by_name_and_with_filter(self): # confirm that the user is not exposed when it does not match the # filter setting in conf even if it is requested by name in user list hints = driver_hints.Hints() hints.add_filter('name', self.user_foo['name']) domain_id = self.user_foo['domain_id'] driver = PROVIDERS.identity_api._select_identity_driver(domain_id) driver.user.ldap_filter = '(|(cn={})(cn={}))'.format( self.user_sna['id'], self.user_two['id'], ) users = PROVIDERS.identity_api.list_users( domain_scope=self._set_domain_scope(domain_id), hints=hints ) self.assertEqual(0, len(users)) def test_list_groups_by_name_and_with_filter(self): # Create some test groups. domain = self._get_domain_fixture() group_names = [] numgroups = 3 for _ in range(numgroups): group = unit.new_group_ref(domain_id=domain['id']) group = PROVIDERS.identity_api.create_group(group) group_names.append(group['name']) # confirm that the groups can all be listed groups = PROVIDERS.identity_api.list_groups( domain_scope=self._set_domain_scope(domain['id']) ) self.assertEqual(numgroups, len(groups)) # configure the group filter driver = PROVIDERS.identity_api._select_identity_driver(domain['id']) driver.group.ldap_filter = '(|(ou=%s)(ou=%s))' % tuple(group_names[:2]) # confirm that the group filter is working groups = PROVIDERS.identity_api.list_groups( domain_scope=self._set_domain_scope(domain['id']) ) self.assertEqual(2, len(groups)) # confirm that a group is not exposed when it does not match the # filter setting in conf even if it is requested by name in group list hints = driver_hints.Hints() hints.add_filter('name', group_names[2]) groups = PROVIDERS.identity_api.list_groups( domain_scope=self._set_domain_scope(domain['id']), hints=hints ) self.assertEqual(0, len(groups)) def test_remove_role_grant_from_user_and_project(self): PROVIDERS.assignment_api.create_grant( user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.project_baz['id'] ) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( user_id=self.user_foo['id'], project_id=self.project_baz['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, user_id=self.user_foo['id'], project_id=self.project_baz['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_and_remove_role_grant_by_group_and_project(self): new_domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = self.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], project_id=self.project_bar['id'] ) self.assertEqual([], roles_ref) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], project_id=self.project_bar['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], project_id=self.project_bar['id'] ) self.assertNotEmpty(roles_ref) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( group_id=new_group['id'], project_id=self.project_bar['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], project_id=self.project_bar['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.RoleAssignmentNotFound, PROVIDERS.assignment_api.delete_grant, group_id=new_group['id'], project_id=self.project_bar['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_get_and_remove_role_grant_by_group_and_domain(self): # TODO(henry-nash): We should really rewrite the tests in # unit.resource.test_backends to be more flexible as to where the # domains are sourced from, so that we would not need to override such # tests here. This is raised as bug 1373865. new_domain = self._get_domain_fixture() new_group = unit.new_group_ref( domain_id=new_domain['id'], ) new_group = PROVIDERS.identity_api.create_group(new_group) new_user = self.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertDictEqual(self.role_member, roles_ref[0]) PROVIDERS.assignment_api.delete_grant( group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) roles_ref = PROVIDERS.assignment_api.list_grants( group_id=new_group['id'], domain_id=new_domain['id'] ) self.assertEqual(0, len(roles_ref)) self.assertRaises( exception.NotFound, PROVIDERS.assignment_api.delete_grant, group_id=new_group['id'], domain_id=new_domain['id'], role_id=default_fixtures.MEMBER_ROLE_ID, ) def test_list_projects_for_user(self): domain = self._get_domain_fixture() user1 = self.new_user_ref(domain_id=domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertThat(user_projects, matchers.HasLength(0)) # new grant(user1, role_member, project_bar) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) # new grant(user1, role_member, project_baz) PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_baz['id'], role_id=self.role_member['id'], ) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertThat(user_projects, matchers.HasLength(2)) # Now, check number of projects through groups user2 = self.new_user_ref(domain_id=domain['id']) user2 = PROVIDERS.identity_api.create_user(user2) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.identity_api.add_user_to_group(user2['id'], group1['id']) # new grant(group1(user2), role_member, project_bar) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) # new grant(group1(user2), role_member, project_baz) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=self.project_baz['id'], role_id=self.role_member['id'], ) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user2['id'] ) self.assertThat(user_projects, matchers.HasLength(2)) # new grant(group1(user2), role_other, project_bar) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=self.project_bar['id'], role_id=self.role_other['id'], ) user_projects = PROVIDERS.assignment_api.list_projects_for_user( user2['id'] ) self.assertThat(user_projects, matchers.HasLength(2)) def test_list_projects_for_user_and_groups(self): domain = self._get_domain_fixture() # Create user1 user1 = self.new_user_ref(domain_id=domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) # Create new group for user1 group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) # Add user1 to group1 PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) # Now, add grant to user1 and group1 in project_bar PROVIDERS.assignment_api.create_grant( user_id=user1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) # The result is user1 has only one project granted user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertThat(user_projects, matchers.HasLength(1)) # Now, delete user1 grant into project_bar and check PROVIDERS.assignment_api.delete_grant( user_id=user1['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) # The result is user1 has only one project granted. # Granted through group1. user_projects = PROVIDERS.assignment_api.list_projects_for_user( user1['id'] ) self.assertThat(user_projects, matchers.HasLength(1)) def test_list_projects_for_user_with_grants(self): domain = self._get_domain_fixture() new_user = self.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain['id']) group2 = PROVIDERS.identity_api.create_group(group2) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) PROVIDERS.identity_api.add_user_to_group(new_user['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(new_user['id'], group2['id']) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], project_id=self.project_bar['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], project_id=project1['id'], role_id=self.role_admin['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group2['id'], project_id=project2['id'], role_id=self.role_admin['id'], ) user_projects = PROVIDERS.assignment_api.list_projects_for_user( new_user['id'] ) self.assertEqual(3, len(user_projects)) def test_list_role_assignments_unfiltered(self): new_domain = self._get_domain_fixture() new_user = self.new_user_ref(domain_id=new_domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) new_project = unit.new_project_ref(domain_id=new_domain['id']) PROVIDERS.resource_api.create_project(new_project['id'], new_project) # First check how many role grant already exist existing_assignments = len( PROVIDERS.assignment_api.list_role_assignments() ) PROVIDERS.assignment_api.create_grant( user_id=new_user['id'], project_id=new_project['id'], role_id=default_fixtures.OTHER_ROLE_ID, ) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], project_id=new_project['id'], role_id=default_fixtures.ADMIN_ROLE_ID, ) # Read back the list of assignments - check it is gone up by 2 after_assignments = len( PROVIDERS.assignment_api.list_role_assignments() ) self.assertEqual(existing_assignments + 2, after_assignments) def test_list_group_members_when_no_members(self): # List group members when there is no member in the group. # No exception should be raised. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) # If this doesn't raise, then the test is successful. PROVIDERS.identity_api.list_users_in_group(group['id']) def test_list_domains(self): # We have more domains here than the parent class, check for the # correct number of domains for the multildap backend configs domain1 = unit.new_domain_ref() domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) PROVIDERS.resource_api.create_domain(domain2['id'], domain2) domains = PROVIDERS.resource_api.list_domains() self.assertEqual(7, len(domains)) domain_ids = [] for domain in domains: domain_ids.append(domain.get('id')) self.assertIn(CONF.identity.default_domain_id, domain_ids) self.assertIn(domain1['id'], domain_ids) self.assertIn(domain2['id'], domain_ids) def test_authenticate_requires_simple_bind(self): user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], self.project_baz['id'], role_member['id'] ) driver = PROVIDERS.identity_api._select_identity_driver( user['domain_id'] ) driver.user.LDAP_USER = None driver.user.LDAP_PASSWORD = None with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=user['id'], password=None, ) @mock.patch.object(versionutils, 'report_deprecated_feature') def test_user_crud(self, mock_deprecator): # NOTE(stevemar): As of the Mitaka release, we now check for calls that # the LDAP write functionality has been deprecated. user_dict = self.new_user_ref( domain_id=CONF.identity.default_domain_id ) user = PROVIDERS.identity_api.create_user(user_dict) args, _kwargs = mock_deprecator.call_args self.assertIn("create_user for the LDAP identity backend", args[1]) del user_dict['password'] user_ref = PROVIDERS.identity_api.get_user(user['id']) user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertLessEqual(user_dict.items(), user_ref_dict.items()) user_dict['password'] = uuid.uuid4().hex PROVIDERS.identity_api.update_user(user['id'], user_dict) args, _kwargs = mock_deprecator.call_args self.assertIn("update_user for the LDAP identity backend", args[1]) del user_dict['password'] user_ref = PROVIDERS.identity_api.get_user(user['id']) user_ref_dict = {x: user_ref[x] for x in user_ref} self.assertLessEqual(user_dict.items(), user_ref_dict.items()) # The group and domain CRUD tests below override the standard ones in # unit.identity.test_backends.py so that we can exclude the update name # test, since we do not (and will not) support the update of either group # or domain names with LDAP. In the tests below, the update is tested by # updating description. @mock.patch.object(versionutils, 'report_deprecated_feature') def test_group_crud(self, mock_deprecator): # NOTE(stevemar): As of the Mitaka release, we now check for calls that # the LDAP write functionality has been deprecated. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) args, _kwargs = mock_deprecator.call_args self.assertIn("create_group for the LDAP identity backend", args[1]) group_ref = PROVIDERS.identity_api.get_group(group['id']) self.assertDictEqual(group, group_ref) group['description'] = uuid.uuid4().hex PROVIDERS.identity_api.update_group(group['id'], group) args, _kwargs = mock_deprecator.call_args self.assertIn("update_group for the LDAP identity backend", args[1]) group_ref = PROVIDERS.identity_api.get_group(group['id']) self.assertDictEqual(group, group_ref) @mock.patch.object(versionutils, 'report_deprecated_feature') def test_add_user_group_deprecated(self, mock_deprecator): group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) args, _kwargs = mock_deprecator.call_args self.assertIn("add_user_to_group for the LDAP identity", args[1]) @unit.skip_if_cache_disabled('identity') def test_cache_layer_group_crud(self): # Note(knikolla): Since delete logic has been deleted from LDAP, # this doesn't test caching on delete. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) # cache the result PROVIDERS.identity_api.get_group(group['id']) group['description'] = uuid.uuid4().hex group_ref = PROVIDERS.identity_api.update_group(group['id'], group) self.assertLessEqual( PROVIDERS.identity_api.get_group(group['id']).items(), group_ref.items(), ) @unit.skip_if_cache_disabled('identity') def test_cache_layer_get_user(self): # Note(knikolla): Since delete logic has been deleted from LDAP, # this doesn't test caching on delete. user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) ref = PROVIDERS.identity_api.get_user_by_name( user['name'], user['domain_id'] ) user['description'] = uuid.uuid4().hex # cache the result. PROVIDERS.identity_api.get_user(ref['id']) # update using identity api and get back updated user. user_updated = PROVIDERS.identity_api.update_user(ref['id'], user) self.assertLessEqual( PROVIDERS.identity_api.get_user(ref['id']).items(), user_updated.items(), ) self.assertLessEqual( PROVIDERS.identity_api.get_user_by_name( ref['name'], ref['domain_id'] ).items(), user_updated.items(), ) @unit.skip_if_cache_disabled('identity') def test_cache_layer_get_user_by_name(self): # Note(knikolla): Since delete logic has been deleted from LDAP, # this doesn't test caching on delete. user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) ref = PROVIDERS.identity_api.get_user_by_name( user['name'], user['domain_id'] ) user['description'] = uuid.uuid4().hex user_updated = PROVIDERS.identity_api.update_user(ref['id'], user) self.assertLessEqual( PROVIDERS.identity_api.get_user(ref['id']).items(), user_updated.items(), ) self.assertLessEqual( PROVIDERS.identity_api.get_user_by_name( ref['name'], ref['domain_id'] ).items(), user_updated.items(), ) def test_create_user_none_mapping(self): # When create a user where an attribute maps to None, the entry is # created without that attribute and it doesn't fail with a TypeError. driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.attribute_ignore = [ 'enabled', 'email', 'projects', 'projectId', ] user = self.new_user_ref( domain_id=CONF.identity.default_domain_id, project_id='maps_to_none', ) # If this doesn't raise, then the test is successful. user = PROVIDERS.identity_api.create_user(user) def test_unignored_user_none_mapping(self): # Ensure that an attribute that maps to None that is not explicitly # ignored in configuration is implicitly ignored without triggering # an error. driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.attribute_ignore = [ 'enabled', 'email', 'projects', 'projectId', ] user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = PROVIDERS.identity_api.create_user(user) # If this doesn't raise, then the test is successful. PROVIDERS.identity_api.get_user(user_ref['id']) def test_update_user_name(self): """A user's name cannot be changed through the LDAP driver.""" self.assertRaises( exception.Conflict, super().test_update_user_name, ) def test_user_id_comma(self): """Even if the user has a , in their ID, groups can be listed.""" # Create a user with a , in their ID # NOTE(blk-u): the DN for this user is hard-coded in fakeldap! # Since we want to fake up this special ID, we'll squirt this # direct into the driver and bypass the manager layer. user_id = 'Doe, John' user = self.new_user_ref( id=user_id, domain_id=CONF.identity.default_domain_id ) user = PROVIDERS.identity_api.driver.create_user(user_id, user) # Now we'll use the manager to discover it, which will create a # Public ID for it. ref_list = PROVIDERS.identity_api.list_users() public_user_id = None for ref in ref_list: if ref['name'] == user['name']: public_user_id = ref['id'] break # Create a group group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group_id = group['id'] group = PROVIDERS.identity_api.driver.create_group(group_id, group) # Now we'll use the manager to discover it, which will create a # Public ID for it. ref_list = PROVIDERS.identity_api.list_groups() public_group_id = None for ref in ref_list: if ref['name'] == group['name']: public_group_id = ref['id'] break # Put the user in the group PROVIDERS.identity_api.add_user_to_group( public_user_id, public_group_id ) # List groups for user. ref_list = PROVIDERS.identity_api.list_groups_for_user(public_user_id) for ref in ref_list: del ref['membership_expires_at'] group['id'] = public_group_id self.assertThat(ref_list, matchers.Equals([group])) def test_user_id_comma_grants(self): """List user and group grants, even with a comma in the user's ID.""" # Create a user with a , in their ID # NOTE(blk-u): the DN for this user is hard-coded in fakeldap! # Since we want to fake up this special ID, we'll squirt this # direct into the driver and bypass the manager layer user_id = 'Doe, John' user = self.new_user_ref( id=user_id, domain_id=CONF.identity.default_domain_id ) PROVIDERS.identity_api.driver.create_user(user_id, user) # Now we'll use the manager to discover it, which will create a # Public ID for it. ref_list = PROVIDERS.identity_api.list_users() public_user_id = None for ref in ref_list: if ref['name'] == user['name']: public_user_id = ref['id'] break # Grant the user a role on a project. role_id = default_fixtures.MEMBER_ROLE_ID project_id = self.project_baz['id'] PROVIDERS.assignment_api.create_grant( role_id, user_id=public_user_id, project_id=project_id ) role_ref = PROVIDERS.assignment_api.get_grant( role_id, user_id=public_user_id, project_id=project_id ) self.assertEqual(role_id, role_ref['id']) def test_user_enabled_ignored_disable_error(self): # When the server is configured so that the enabled attribute is # ignored for users, users cannot be disabled. self.config_fixture.config( group='ldap', user_attribute_ignore=['enabled'] ) # Need to re-load backends for the config change to take effect. self.load_backends() # Attempt to disable the user. self.assertRaises( exception.ForbiddenAction, PROVIDERS.identity_api.update_user, self.user_foo['id'], {'enabled': False}, ) user_info = PROVIDERS.identity_api.get_user(self.user_foo['id']) # If 'enabled' is ignored then 'enabled' isn't returned as part of the # ref. self.assertNotIn('enabled', user_info) def test_group_enabled_ignored_disable_error(self): # When the server is configured so that the enabled attribute is # ignored for groups, groups cannot be disabled. self.config_fixture.config( group='ldap', group_attribute_ignore=['enabled'] ) # Need to re-load backends for the config change to take effect. self.load_backends() # There's no group fixture so create a group. new_domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=new_domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) # Attempt to disable the group. self.assertRaises( exception.ForbiddenAction, PROVIDERS.identity_api.update_group, new_group['id'], {'enabled': False}, ) group_info = PROVIDERS.identity_api.get_group(new_group['id']) # If 'enabled' is ignored then 'enabled' isn't returned as part of the # ref. self.assertNotIn('enabled', group_info) def test_list_role_assignment_by_domain(self): """Multiple domain assignments are not supported.""" self.assertRaises( ( exception.Forbidden, exception.DomainNotFound, exception.ValidationError, ), super().test_list_role_assignment_by_domain, ) def test_list_role_assignment_by_user_with_domain_group_roles(self): """Multiple domain assignments are not supported.""" self.assertRaises( ( exception.Forbidden, exception.DomainNotFound, exception.ValidationError, ), super().test_list_role_assignment_by_user_with_domain_group_roles, ) def test_list_role_assignment_using_sourced_groups_with_domains(self): """Multiple domain assignments are not supported.""" self.assertRaises( ( exception.Forbidden, exception.ValidationError, exception.DomainNotFound, ), super().test_list_role_assignment_using_sourced_groups_with_domains, ) def test_create_project_with_domain_id_and_without_parent_id(self): """Multiple domains are not supported.""" self.assertRaises( exception.ValidationError, super().test_create_project_with_domain_id_and_without_parent_id, ) def test_create_project_with_domain_id_mismatch_to_parent_domain(self): """Multiple domains are not supported.""" self.assertRaises( exception.ValidationError, super().test_create_project_with_domain_id_mismatch_to_parent_domain, ) def test_remove_foreign_assignments_when_deleting_a_domain(self): """Multiple domains are not supported.""" self.assertRaises( (exception.ValidationError, exception.DomainNotFound), super().test_remove_foreign_assignments_when_deleting_a_domain, ) class LDAPIdentity(BaseLDAPIdentity): def assert_backends(self): _assert_backends( self, assignment='sql', identity='ldap', resource='sql' ) def test_list_domains(self): domains = PROVIDERS.resource_api.list_domains() default_domain = unit.new_domain_ref( description='The default domain', id=CONF.identity.default_domain_id, name='Default', ) self.assertEqual([default_domain], domains) def test_authenticate_wrong_credentials(self): self.assertRaises( exception.LDAPInvalidCredentialsError, PROVIDERS.identity_api.driver.user.get_connection, user='demo', password='demo', end_user_auth=True, ) def test_configurable_allowed_project_actions(self): domain = self._get_domain_fixture() project = unit.new_project_ref(domain_id=domain['id']) project = PROVIDERS.resource_api.create_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertEqual(project['id'], project_ref['id']) project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) PROVIDERS.resource_api.delete_project(project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) def test_user_enable_attribute_mask(self): self.config_fixture.config( group='ldap', user_enabled_mask=2, user_enabled_default='512' ) self.ldapdb.clear() self.load_backends() user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user_ref = PROVIDERS.identity_api.create_user(user) # Use assertIs rather than assertTrue because assertIs will assert the # value is a Boolean as expected. self.assertIs(True, user_ref['enabled']) self.assertNotIn('enabled_nomask', user_ref) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([512], enabled_vals) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) self.assertNotIn('enabled_nomask', user_ref) user['enabled'] = False user_ref = PROVIDERS.identity_api.update_user(user_ref['id'], user) self.assertIs(False, user_ref['enabled']) self.assertNotIn('enabled_nomask', user_ref) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([514], enabled_vals) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(False, user_ref['enabled']) self.assertNotIn('enabled_nomask', user_ref) user['enabled'] = True user_ref = PROVIDERS.identity_api.update_user(user_ref['id'], user) self.assertIs(True, user_ref['enabled']) self.assertNotIn('enabled_nomask', user_ref) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([512], enabled_vals) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) self.assertNotIn('enabled_nomask', user_ref) def test_user_enabled_invert(self): self.config_fixture.config( group='ldap', user_enabled_invert=True, user_enabled_default='False', ) self.ldapdb.clear() self.load_backends() user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = self.new_user_ref( enabled=False, domain_id=CONF.identity.default_domain_id ) user3 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) # Ensure that the LDAP attribute is False for a newly created # enabled user. user_ref = PROVIDERS.identity_api.create_user(user1) self.assertIs(True, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([False], enabled_vals) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) # Ensure that the LDAP attribute is True for a disabled user. user1['enabled'] = False user_ref = PROVIDERS.identity_api.update_user(user_ref['id'], user1) self.assertIs(False, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([True], enabled_vals) # Enable the user and ensure that the LDAP attribute is True again. user1['enabled'] = True user_ref = PROVIDERS.identity_api.update_user(user_ref['id'], user1) self.assertIs(True, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([False], enabled_vals) # Ensure that the LDAP attribute is True for a newly created # disabled user. user_ref = PROVIDERS.identity_api.create_user(user2) self.assertIs(False, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([True], enabled_vals) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(False, user_ref['enabled']) # Ensure that the LDAP attribute is inverted for a newly created # user when the user_enabled_default setting is used. user_ref = PROVIDERS.identity_api.create_user(user3) self.assertIs(True, user_ref['enabled']) enabled_vals = self.get_user_enabled_vals(user_ref) self.assertEqual([False], enabled_vals) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_user_enabled_invert_default_str_value(self, mock_ldap_get): self.config_fixture.config( group='ldap', user_enabled_invert=True, user_enabled_default='False', ) # Mock the search results to return an entry with # no enabled value. mock_ldap_get.return_value = ( 'cn=junk,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'cn': ['junk'], }, ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('junk') # Ensure that the model enabled attribute is inverted # from the resource default. self.assertIs(True, user_ref['enabled']) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'connect') @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'search_s') @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_filter_ldap_result_by_attr( self, mock_simple_bind_s, mock_search_s, mock_connect ): # Mock the ldap search results to return user entries with # user_name_attribute('sn') value has emptyspaces, emptystring # and attibute itself is not set. mock_search_s.return_value = [ ( 'sn=junk1,dc=example,dc=com', { 'cn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'sn': ['junk1'], }, ), ( '', { 'cn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], }, ), ( 'sn=,dc=example,dc=com', { 'cn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'sn': [''], }, ), ( 'sn= ,dc=example,dc=com', { 'cn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'sn': [' '], }, ), ] user_api = identity.backends.ldap.UserApi(CONF) user_refs = user_api.get_all() # validate that keystone.identity.backends.ldap.common.BaseLdap. # _filter_ldap_result_by_attr() method filtered the ldap query results # whose name attribute values has emptyspaces, emptystring # and attibute itself is not set. self.assertEqual(1, len(user_refs)) self.assertEqual('junk1', user_refs[0]['name']) self.assertEqual('sn=junk1,dc=example,dc=com', user_refs[0]['dn']) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'connect') @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'search_s') @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_filter_ldap_result_with_case_sensitive_attr( self, mock_simple_bind_s, mock_search_s, mock_connect ): # Mock the ldap search results to return user entries # irrespective of lowercase and uppercase characters in # ldap_result attribute keys e.g. {'Sn': ['junk1']} with # user_name_attribute('sn') mock_search_s.return_value = [ ( 'sn=junk1,dc=example,dc=com', { 'cn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'sN': ['junk1'], }, ), ( 'sn=junk1,dc=example,dc=com', { 'cn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'Sn': ['junk1'], }, ), ( 'sn=junk1,dc=example,dc=com', { 'cn': [uuid.uuid4().hex], 'email': [uuid.uuid4().hex], 'sn': [' '], }, ), ] user_api = identity.backends.ldap.UserApi(CONF) user_refs = user_api.get_all() # validate that keystone.identity.backends.ldap.common.BaseLdap. # _filter_ldap_result_by_attr() method filtered the ldap query results # whose name attribute keys having case insensitive characters. self.assertEqual(2, len(user_refs)) self.assertEqual('junk1', user_refs[0]['name']) self.assertEqual('sn=junk1,dc=example,dc=com', user_refs[0]['dn']) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_user_enabled_attribute_handles_expired(self, mock_ldap_get): # If using 'passwordisexpired' as enabled attribute, and inverting it, # Then an unauthorized user (expired password) should not be enabled. self.config_fixture.config( group='ldap', user_enabled_invert=True, user_enabled_attribute='passwordisexpired', ) mock_ldap_get.return_value = ( 'uid=123456789,c=us,ou=our_ldap,o=acme.com', { 'uid': [123456789], 'mail': ['shaun@acme.com'], 'passwordisexpired': ['TRUE'], 'cn': ['uid=123456789,c=us,ou=our_ldap,o=acme.com'], }, ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('123456789') self.assertIs(False, user_ref['enabled']) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get): # If using 'passwordisexpired' as enabled attribute, and inverting it, # and the result is utf8 encoded, then the an authorized user should # be enabled. self.config_fixture.config( group='ldap', user_enabled_invert=True, user_enabled_attribute='passwordisexpired', ) mock_ldap_get.return_value = ( 'uid=123456789,c=us,ou=our_ldap,o=acme.com', { 'uid': [123456789], 'mail': ['shaun@acme.com'], 'passwordisexpired': ['false'], 'cn': ['uid=123456789,c=us,ou=our_ldap,o=acme.com'], }, ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('123456789') self.assertIs(True, user_ref['enabled']) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_user_api_get_connection_no_user_password(self, mocked_method): """Bind anonymously when the user and password are blank.""" # Ensure the username/password are in-fact blank self.config_fixture.config(group='ldap', user=None, password=None) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) self.assertTrue(mocked_method.called) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'connect') def test_chase_referrals_off(self, mocked_fakeldap): self.config_fixture.config( group='ldap', url='fake://memory', chase_referrals=False ) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) # The last call_arg should be a dictionary and should contain # chase_referrals. Check to make sure the value of chase_referrals # is as expected. self.assertFalse(mocked_fakeldap.call_args[-1]['chase_referrals']) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'connect') def test_chase_referrals_on(self, mocked_fakeldap): self.config_fixture.config( group='ldap', url='fake://memory', chase_referrals=True ) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) # The last call_arg should be a dictionary and should contain # chase_referrals. Check to make sure the value of chase_referrals # is as expected. self.assertTrue(mocked_fakeldap.call_args[-1]['chase_referrals']) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'connect') def test_debug_level_set(self, mocked_fakeldap): level = 12345 self.config_fixture.config( group='ldap', url='fake://memory', debug_level=level ) user_api = identity.backends.ldap.UserApi(CONF) user_api.get_connection(user=None, password=None) # The last call_arg should be a dictionary and should contain # debug_level. Check to make sure the value of debug_level # is as expected. self.assertEqual(level, mocked_fakeldap.call_args[-1]['debug_level']) def test_user_extra_attribute_mapping(self): self.config_fixture.config( group='ldap', user_additional_attribute_mapping=['description:name'], ) self.load_backends() user = self.new_user_ref( name='EXTRA_ATTRIBUTES', password='extra', domain_id=CONF.identity.default_domain_id, ) user = PROVIDERS.identity_api.create_user(user) dn, attrs = PROVIDERS.identity_api.driver.user._ldap_get(user['id']) self.assertThat([user['name']], matchers.Equals(attrs['description'])) def test_user_description_attribute_mapping(self): self.config_fixture.config( group='ldap', user_description_attribute='displayName' ) self.load_backends() user = self.new_user_ref( domain_id=CONF.identity.default_domain_id, displayName=uuid.uuid4().hex, ) description = user['displayName'] user = PROVIDERS.identity_api.create_user(user) res = PROVIDERS.identity_api.driver.user.get_all() new_user = [u for u in res if u['id'] == user['id']][0] self.assertThat(new_user['description'], matchers.Equals(description)) def test_user_extra_attribute_mapping_description_is_returned(self): # Given a mapping like description:description, the description is # returned. self.config_fixture.config( group='ldap', user_additional_attribute_mapping=['description:description'], ) self.load_backends() user = self.new_user_ref( domain_id=CONF.identity.default_domain_id, description=uuid.uuid4().hex, ) description = user['description'] user = PROVIDERS.identity_api.create_user(user) res = PROVIDERS.identity_api.driver.user.get_all() new_user = [u for u in res if u['id'] == user['id']][0] self.assertThat(new_user['description'], matchers.Equals(description)) def test_user_with_missing_id(self): # create a user that doesn't have the id attribute ldap_ = PROVIDERS.identity_api.driver.user.get_connection() # `sn` is used for the attribute in the DN because it's allowed by # the entry's objectclasses so that this test could conceivably run in # the live tests. ldap_id_field = 'sn' ldap_id_value = uuid.uuid4().hex dn = '{}={},ou=Users,cn=example,cn=com'.format( ldap_id_field, ldap_id_value, ) modlist = [ ('objectClass', ['person', 'inetOrgPerson']), (ldap_id_field, [ldap_id_value]), ('mail', ['email@example.com']), ('userPassword', [uuid.uuid4().hex]), ] ldap_.add_s(dn, modlist) # make sure the user doesn't break other users users = PROVIDERS.identity_api.driver.user.get_all() self.assertThat(users, matchers.HasLength(len(default_fixtures.USERS))) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_user_mixed_case_attribute(self, mock_ldap_get): # Mock the search results to return attribute names # with unexpected case. mock_ldap_get.return_value = ( 'cn=junk,dc=example,dc=com', { 'sN': [uuid.uuid4().hex], 'MaIl': [uuid.uuid4().hex], 'cn': ['junk'], }, ) user = PROVIDERS.identity_api.get_user('junk') self.assertEqual(mock_ldap_get.return_value[1]['sN'][0], user['name']) self.assertEqual( mock_ldap_get.return_value[1]['MaIl'][0], user['email'] ) def test_parse_extra_attribute_mapping(self): option_list = [ 'description:name', 'gecos:password', 'fake:invalid', 'invalid1', 'invalid2:', 'description:name:something', ] mapping = PROVIDERS.identity_api.driver.user._parse_extra_attrs( option_list ) expected_dict = { 'description': 'name', 'gecos': 'password', 'fake': 'invalid', 'invalid2': '', } self.assertDictEqual(expected_dict, mapping) def test_create_domain(self): domain = unit.new_domain_ref() self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.create_domain, domain['id'], domain, ) @unit.skip_if_no_multiple_domains_support def test_create_domain_case_sensitivity(self): # domains are read-only, so case sensitivity isn't an issue ref = unit.new_domain_ref() self.assertRaises( exception.Forbidden, PROVIDERS.resource_api.create_domain, ref['id'], ref, ) def test_domain_rename_invalidates_get_domain_by_name_cache(self): parent = super() self.assertRaises( exception.Forbidden, parent.test_domain_rename_invalidates_get_domain_by_name_cache, ) def test_project_rename_invalidates_get_project_by_name_cache(self): parent = super() self.assertRaises( exception.Forbidden, parent.test_project_rename_invalidates_get_project_by_name_cache, ) def test_project_crud(self): # NOTE(topol): LDAP implementation does not currently support the # updating of a project name so this method override # provides a different update test project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) project['description'] = uuid.uuid4().hex PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) PROVIDERS.resource_api.delete_project(project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) @unit.skip_if_cache_disabled('assignment') def test_cache_layer_project_crud(self): # NOTE(morganfainberg): LDAP implementation does not currently support # updating project names. This method override provides a different # update test. project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project_id = project['id'] # Create a project project = PROVIDERS.resource_api.create_project(project_id, project) PROVIDERS.resource_api.get_project(project_id) updated_project = copy.deepcopy(project) updated_project['description'] = uuid.uuid4().hex # Update project, bypassing resource manager PROVIDERS.resource_api.driver.update_project( project_id, updated_project ) # Verify get_project still returns the original project_ref self.assertLessEqual( project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Invalidate cache PROVIDERS.resource_api.get_project.invalidate( PROVIDERS.resource_api, project_id ) # Verify get_project now returns the new project self.assertLessEqual( updated_project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Update project using the resource_api manager back to original PROVIDERS.resource_api.update_project(project['id'], project) # Verify get_project returns the original project_ref self.assertLessEqual( project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Delete project bypassing resource_api PROVIDERS.resource_api.driver.delete_project(project_id) # Verify get_project still returns the project_ref self.assertLessEqual( project.items(), PROVIDERS.resource_api.get_project(project_id).items(), ) # Invalidate cache PROVIDERS.resource_api.get_project.invalidate( PROVIDERS.resource_api, project_id ) # Verify ProjectNotFound now raised self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project_id, ) # recreate project PROVIDERS.resource_api.create_project(project_id, project) PROVIDERS.resource_api.get_project(project_id) # delete project PROVIDERS.resource_api.delete_project(project_id) # Verify ProjectNotFound is raised self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project_id, ) def test_update_is_domain_field(self): domain = self._get_domain_fixture() project = unit.new_project_ref(domain_id=domain['id']) project = PROVIDERS.resource_api.create_project(project['id'], project) # Try to update the is_domain field to True project['is_domain'] = True self.assertRaises( exception.ValidationError, PROVIDERS.resource_api.update_project, project['id'], project, ) def test_multi_role_grant_by_user_group_on_project_domain(self): # This is a partial implementation of the standard test that # is defined in unit.assignment.test_backends.py. It omits # both domain and group grants. since neither of these are # yet supported by the ldap backend. role_list = [] for _ in range(2): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user1 = PROVIDERS.identity_api.create_user(user1) project1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project1['id'], project1) PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=user1['id'], project_id=project1['id'], role_id=role_list[0]['id'], ) PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=user1['id'], project_id=project1['id'], role_id=role_list[1]['id'], ) # Although list_grants are not yet supported, we can test the # alternate way of getting back lists of grants, where user # and group roles are combined. Only directly assigned user # roles are available, since group grants are not yet supported combined_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_project( user1['id'], project1['id'] ) ) self.assertEqual(2, len(combined_list)) self.assertIn(role_list[0]['id'], combined_list) self.assertIn(role_list[1]['id'], combined_list) # Finally, although domain roles are not implemented, check we can # issue the combined get roles call with benign results, since thus is # used in token generation combined_role_list = ( PROVIDERS.assignment_api.get_roles_for_user_and_domain( user1['id'], CONF.identity.default_domain_id ) ) self.assertEqual(0, len(combined_role_list)) def test_get_default_domain_by_name(self): domain = self._get_domain_fixture() domain_ref = PROVIDERS.resource_api.get_domain_by_name(domain['name']) self.assertEqual(domain_ref, domain) def test_base_ldap_connection_deref_option(self): def get_conn(deref_name): self.config_fixture.config( group='ldap', alias_dereferencing=deref_name ) base_ldap = common_ldap.BaseLdap(CONF) return base_ldap.get_connection() conn = get_conn('default') self.assertEqual( ldap.get_option(ldap.OPT_DEREF), conn.get_option(ldap.OPT_DEREF) ) conn = get_conn('always') self.assertEqual(ldap.DEREF_ALWAYS, conn.get_option(ldap.OPT_DEREF)) conn = get_conn('finding') self.assertEqual(ldap.DEREF_FINDING, conn.get_option(ldap.OPT_DEREF)) conn = get_conn('never') self.assertEqual(ldap.DEREF_NEVER, conn.get_option(ldap.OPT_DEREF)) conn = get_conn('searching') self.assertEqual(ldap.DEREF_SEARCHING, conn.get_option(ldap.OPT_DEREF)) def test_list_users_no_dn(self): users = PROVIDERS.identity_api.list_users() self.assertEqual(len(default_fixtures.USERS), len(users)) user_ids = {user['id'] for user in users} expected_user_ids = { getattr(self, 'user_%s' % user['name'])['id'] for user in default_fixtures.USERS } for user_ref in users: self.assertNotIn('dn', user_ref) self.assertEqual(expected_user_ids, user_ids) def test_list_groups_no_dn(self): # Create some test groups. domain = self._get_domain_fixture() expected_group_ids = [] numgroups = 3 for _ in range(numgroups): group = unit.new_group_ref(domain_id=domain['id']) group = PROVIDERS.identity_api.create_group(group) expected_group_ids.append(group['id']) # Fetch the test groups and ensure that they don't contain a dn. groups = PROVIDERS.identity_api.list_groups() self.assertEqual(numgroups, len(groups)) group_ids = {group['id'] for group in groups} for group_ref in groups: self.assertNotIn('dn', group_ref) self.assertEqual(set(expected_group_ids), group_ids) def test_list_groups_for_user_no_dn(self): # Create a test user. user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) # Create some test groups and add the test user as a member. domain = self._get_domain_fixture() expected_group_ids = [] numgroups = 3 for _ in range(numgroups): group = unit.new_group_ref(domain_id=domain['id']) group = PROVIDERS.identity_api.create_group(group) expected_group_ids.append(group['id']) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) # Fetch the groups for the test user # and ensure they don't contain a dn. groups = PROVIDERS.identity_api.list_groups_for_user(user['id']) self.assertEqual(numgroups, len(groups)) group_ids = {group['id'] for group in groups} for group_ref in groups: self.assertNotIn('dn', group_ref) self.assertEqual(set(expected_group_ids), group_ids) def test_user_id_attribute_in_create(self): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.id_attr = 'mail' user = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) user_ref = PROVIDERS.identity_api.get_user(user['id']) # 'email' attribute should've created because it is also being used # as user_id self.assertEqual(user_ref['id'], user_ref['email']) def test_user_id_attribute_map(self): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.id_attr = 'mail' user_ref = PROVIDERS.identity_api.get_user(self.user_foo['email']) # the user_id_attribute map should be honored, which means # user_ref['id'] should contains the email attribute self.assertEqual(self.user_foo['email'], user_ref['id']) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_get_multivalued_attribute_id_from_dn(self, mock_ldap_get): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.id_attr = 'mail' # make 'email' multivalued so we can test the error condition email1 = uuid.uuid4().hex email2 = uuid.uuid4().hex # Mock the ldap search results to return user entries with # user_name_attribute('sn') value has emptyspaces, emptystring # and attibute itself is not set. mock_ldap_get.return_value = ( 'cn=users,dc=example,dc=com', { 'mail': [email1, email2], }, ) # This is not a valid scenario, since we do not support multiple value # attribute id on DN. self.assertRaises( exception.NotFound, PROVIDERS.identity_api.get_user, email1 ) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_raise_not_found_dn_for_multivalued_attribute_id( self, mock_ldap_get ): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.id_attr = 'mail' # make 'email' multivalued so we can test the error condition email1 = uuid.uuid4().hex email2 = uuid.uuid4().hex mock_ldap_get.return_value = ( 'cn=nobodycares,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'mail': [email1, email2], 'cn': 'nobodycares', }, ) # This is not a valid scenario, since we do not support multiple value # attribute id on DN. self.assertRaises( exception.NotFound, PROVIDERS.identity_api.get_user, email1 ) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_get_id_not_in_dn(self, mock_ldap_get): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.id_attr = 'sAMAccountName' user_id = uuid.uuid4().hex mock_ldap_get.return_value = ( 'cn=someuser,dc=example,dc=com', { 'cn': 'someuser', 'sn': [uuid.uuid4().hex], 'sAMAccountName': [user_id], }, ) user_ref = PROVIDERS.identity_api.get_user(user_id) self.assertEqual(user_id, user_ref['id']) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_id_attribute_not_found(self, mock_ldap_get): mock_ldap_get.return_value = ( 'cn=nobodycares,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], }, ) user_api = identity.backends.ldap.UserApi(CONF) self.assertRaises(exception.NotFound, user_api.get, 'nobodycares') @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_user_id_not_in_dn(self, mock_ldap_get): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.id_attr = 'uid' driver.user.attribute_mapping['name'] = 'cn' mock_ldap_get.return_value = ( 'foo=bar,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'foo': ['bar'], 'cn': ['junk'], 'uid': ['crap'], }, ) user_ref = PROVIDERS.identity_api.get_user('crap') self.assertEqual('crap', user_ref['id']) self.assertEqual('junk', user_ref['name']) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_user_name_in_dn(self, mock_ldap_get): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.id_attr = 'SAMAccountName' driver.user.attribute_mapping['name'] = 'cn' mock_ldap_get.return_value = ( 'cn=Foo Bar,dc=example,dc=com', { 'sn': [uuid.uuid4().hex], 'cn': ['Foo Bar'], 'SAMAccountName': ['crap'], }, ) user_ref = PROVIDERS.identity_api.get_user('crap') self.assertEqual('crap', user_ref['id']) self.assertEqual('Foo Bar', user_ref['name']) def test_identity_manager_catches_forbidden_when_deleting_a_project(self): # The identity API registers a callback that listens for notifications # that a project has been deleted. When it receives one, it uses the ID # and attempts to clear any users who have `default_project_id` # attributes associated to that project. Since the LDAP backend is # read-only, clearing the `default_project_id` requires a write which # isn't possible. project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) with mock.patch.object( ldap_identity.Identity, '_disallow_write' ) as mocked: mocked.side_effect = exception.Forbidden() PROVIDERS.resource_api.delete_project(project['id']) mocked.assert_called_once() class LDAPLimitTests(unit.TestCase, identity_tests.LimitTests): def setUp(self): super().setUp() self.useFixture(ldapdb.LDAPDatabase()) self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) identity_tests.LimitTests.setUp(self) _assert_backends( self, assignment='sql', identity='ldap', resource='sql' ) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config( group='identity', list_limit=len(default_fixtures.USERS) - 1 ) def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files class LDAPIdentityEnabledEmulation(LDAPIdentity, unit.TestCase): def setUp(self): super().setUp() _assert_backends(self, identity='ldap') def load_fixtures(self, fixtures): # Override super impl since need to create group container. super(LDAPIdentity, self).load_fixtures(fixtures) for obj in [ self.project_bar, self.project_baz, self.user_foo, self.user_two, self.user_badguy, ]: obj.setdefault('enabled', True) def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def config_overrides(self): super().config_overrides() self.config_fixture.config(group='ldap', user_enabled_emulation=True) def test_project_crud(self): # NOTE(topol): LDAPIdentityEnabledEmulation will create an # enabled key in the project dictionary so this # method override handles this side-effect project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) # PROVIDERS.resource_api.create_project adds an enabled # key with a value of True when LDAPIdentityEnabledEmulation # is used so we now add this expected key to the project dictionary project['enabled'] = True self.assertDictEqual(project, project_ref) project['description'] = uuid.uuid4().hex PROVIDERS.resource_api.update_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) PROVIDERS.resource_api.delete_project(project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) def test_user_auth_emulated(self): driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.user.enabled_emulation_dn = 'cn=test,dc=test' with self.make_request(): PROVIDERS.identity_api.authenticate( user_id=self.user_foo['id'], password=self.user_foo['password'] ) def test_user_enable_attribute_mask(self): self.skip_test_overrides( "Enabled emulation conflicts with enabled mask" ) def test_user_enabled_use_group_config(self): # Establish enabled-emulation group name to later query its members group_name = 'enabled_users' driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) group_dn = f'cn={group_name},{driver.group.tree_dn}' self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=True, user_enabled_emulation_dn=group_dn, group_name_attribute='cn', group_member_attribute='uniqueMember', group_objectclass='groupOfUniqueNames', ) self.ldapdb.clear() self.load_backends() # Create a user and ensure they are enabled. user1 = unit.new_user_ref( enabled=True, domain_id=CONF.identity.default_domain_id ) user_ref = PROVIDERS.identity_api.create_user(user1) self.assertIs(True, user_ref['enabled']) # Get a user and ensure they are enabled. user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) # Ensure state matches the group config group_ref = PROVIDERS.identity_api.get_group_by_name( group_name, CONF.identity.default_domain_id ) PROVIDERS.identity_api.check_user_in_group( user_ref['id'], group_ref['id'] ) def test_user_enabled_use_group_config_with_ids(self): # Establish enabled-emulation group name to later query its members group_name = 'enabled_users' driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) group_dn = f'cn={group_name},{driver.group.tree_dn}' self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=True, user_enabled_emulation_dn=group_dn, group_name_attribute='cn', group_member_attribute='memberUid', group_members_are_ids=True, group_objectclass='posixGroup', ) self.ldapdb.clear() self.load_backends() # Create a user and ensure they are enabled. user1 = unit.new_user_ref( enabled=True, domain_id=CONF.identity.default_domain_id ) user_ref = PROVIDERS.identity_api.create_user(user1) self.assertIs(True, user_ref['enabled']) # Get a user and ensure they are enabled. user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) # Ensure state matches the group config group_ref = PROVIDERS.identity_api.get_group_by_name( group_name, CONF.identity.default_domain_id ) PROVIDERS.identity_api.check_user_in_group( user_ref['id'], group_ref['id'] ) def test_user_enabled_invert(self): self.config_fixture.config( group='ldap', user_enabled_invert=True, user_enabled_default='False', ) self.ldapdb.clear() self.load_backends() user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = self.new_user_ref( enabled=False, domain_id=CONF.identity.default_domain_id ) user3 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) # Ensure that the enabled LDAP attribute is not set for a # newly created enabled user. user_ref = PROVIDERS.identity_api.create_user(user1) self.assertIs(True, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) # Ensure that an enabled LDAP attribute is not set for a disabled user. user1['enabled'] = False user_ref = PROVIDERS.identity_api.update_user(user_ref['id'], user1) self.assertIs(False, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) # Enable the user and ensure that the LDAP enabled # attribute is not set. user1['enabled'] = True user_ref = PROVIDERS.identity_api.update_user(user_ref['id'], user1) self.assertIs(True, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) # Ensure that the LDAP enabled attribute is not set for a # newly created disabled user. user_ref = PROVIDERS.identity_api.create_user(user2) self.assertIs(False, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(False, user_ref['enabled']) # Ensure that the LDAP enabled attribute is not set for a newly created # user when the user_enabled_default setting is used. user_ref = PROVIDERS.identity_api.create_user(user3) self.assertIs(True, user_ref['enabled']) self.assertIsNone(self.get_user_enabled_vals(user_ref)) user_ref = PROVIDERS.identity_api.get_user(user_ref['id']) self.assertIs(True, user_ref['enabled']) def test_user_enabled_invert_default_str_value(self): self.skip_test_overrides("N/A: Covered by test_user_enabled_invert") @mock.patch.object(common_ldap.BaseLdap, '_ldap_get') def test_user_enabled_attribute_handles_utf8(self, mock_ldap_get): # Since user_enabled_emulation is enabled in this test, this test will # fail since it's using user_enabled_invert. self.config_fixture.config( group='ldap', user_enabled_invert=True, user_enabled_attribute='passwordisexpired', ) mock_ldap_get.return_value = ( 'uid=123456789,c=us,ou=our_ldap,o=acme.com', { 'uid': [123456789], 'mail': ['shaun@acme.com'], 'passwordisexpired': ['false'], 'cn': ['uid=123456789,c=us,ou=our_ldap,o=acme.com'], }, ) user_api = identity.backends.ldap.UserApi(CONF) user_ref = user_api.get('123456789') self.assertIs(False, user_ref['enabled']) def test_escape_member_dn(self): # The enabled member DN is properly escaped when querying for enabled # user. object_id = uuid.uuid4().hex driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) # driver.user is the EnabledEmuMixIn implementation used for this test. mixin_impl = driver.user # ) is a special char in a filter and must be escaped. sample_dn = 'cn=foo)bar' # LDAP requires ) is escaped by being replaced with "\29" sample_dn_filter_esc = r'cn=foo\29bar' # Override the tree_dn, it's used to build the enabled member filter mixin_impl.tree_dn = sample_dn # The filter, which _is_id_enabled is going to build, contains the # tree_dn, which better be escaped in this case. exp_filter = '({}={}={},{})'.format( mixin_impl.member_attribute, mixin_impl.id_attr, object_id, sample_dn_filter_esc, ) with mixin_impl.get_connection() as conn: m = self.useFixture( fixtures.MockPatchObject(conn, 'search_s') ).mock mixin_impl._is_id_enabled(object_id, conn) # The 3rd argument is the DN. self.assertEqual(exp_filter, m.call_args[0][2]) class LDAPPosixGroupsTest(LDAPTestSetup, unit.TestCase): def assert_backends(self): _assert_backends(self, identity='ldap') def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config( group='ldap', group_members_are_ids=True, group_member_attribute='memberUID', ) def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def _get_domain_fixture(self): """Return the static domain, since domains in LDAP are read-only.""" return PROVIDERS.resource_api.get_domain( CONF.identity.default_domain_id ) def test_posix_member_id(self): domain = self._get_domain_fixture() new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) # Make sure we get an empty list back on a new group, not an error. user_refs = PROVIDERS.identity_api.list_users_in_group(new_group['id']) self.assertEqual([], user_refs) # Make sure we get the correct users back once they have been added # to the group. new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) # NOTE(amakarov): Create the group directly using LDAP operations # rather than going through the manager. group_api = PROVIDERS.identity_api.driver.group group_ref = group_api.get(new_group['id']) mod = (ldap.MOD_ADD, group_api.member_attribute, new_user['id']) conn = group_api.get_connection() conn.modify_s(group_ref['dn'], [mod]) # Testing the case "the group contains a user" user_refs = PROVIDERS.identity_api.list_users_in_group(new_group['id']) self.assertIn(new_user['id'], (x['id'] for x in user_refs)) # Testing the case "the user is a member of a group" group_refs = PROVIDERS.identity_api.list_groups_for_user( new_user['id'] ) self.assertIn(new_group['id'], (x['id'] for x in group_refs)) class LdapIdentityWithMapping( BaseLDAPIdentity, unit.SQLDriverOverrides, unit.TestCase ): """Class to test mapping of default LDAP backend. The default configuration is not to enable mapping when using a single backend LDAP driver. However, a cloud provider might want to enable the mapping, hence hiding the LDAP IDs from any clients of keystone. Setting backward_compatible_ids to False will enable this mapping. """ def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def setUp(self): super().setUp() cache.configure_cache() def assert_backends(self): _assert_backends(self, identity='ldap') def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) def test_dynamic_mapping_build(self): """Test to ensure entities not create via controller are mapped. Many LDAP backends will, essentially, by Read Only. In these cases the mapping is not built by creating objects, rather from enumerating the entries. We test this here my manually deleting the mapping and then trying to re-read the entries. """ initial_mappings = len(mapping_sql.list_id_mappings()) user1 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user1 = PROVIDERS.identity_api.create_user(user1) user2 = self.new_user_ref(domain_id=CONF.identity.default_domain_id) user2 = PROVIDERS.identity_api.create_user(user2) mappings = mapping_sql.list_id_mappings() self.assertEqual(initial_mappings + 2, len(mappings)) # Now delete the mappings for the two users above PROVIDERS.id_mapping_api.purge_mappings({'public_id': user1['id']}) PROVIDERS.id_mapping_api.purge_mappings({'public_id': user2['id']}) # We should no longer be able to get these users via their old IDs self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, user1['id'], ) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, user2['id'], ) # Now enumerate all users...this should re-build the mapping, and # we should be able to find the users via their original public IDs. PROVIDERS.identity_api.list_users() PROVIDERS.identity_api.get_user(user1['id']) PROVIDERS.identity_api.get_user(user2['id']) def test_list_domains(self): domains = PROVIDERS.resource_api.list_domains() default_domain = unit.new_domain_ref( description='The default domain', id=CONF.identity.default_domain_id, name='Default', ) self.assertEqual([default_domain], domains) class BaseMultiLDAPandSQLIdentity: """Mixin class with support methods for domain-specific config testing.""" def create_users_across_domains(self): """Create a set of users, each with a role on their own domain.""" # We also will check that the right number of id mappings get created initial_mappings = len(mapping_sql.list_id_mappings()) users = {} users['user0'] = unit.create_user( PROVIDERS.identity_api, self.domain_default['id'] ) PROVIDERS.assignment_api.create_grant( user_id=users['user0']['id'], domain_id=self.domain_default['id'], role_id=self.role_member['id'], ) for x in range(1, self.domain_count): users['user%s' % x] = unit.create_user( PROVIDERS.identity_api, self.domains['domain%s' % x]['id'] ) PROVIDERS.assignment_api.create_grant( user_id=users['user%s' % x]['id'], domain_id=self.domains['domain%s' % x]['id'], role_id=self.role_member['id'], ) # So how many new id mappings should have been created? One for each # user created in a domain that is using the non default driver.. self.assertEqual( initial_mappings + self.domain_specific_count, len(mapping_sql.list_id_mappings()), ) return users def check_user(self, user, domain_id, expected_status): """Check user is in correct backend. As part of the tests, we want to force ourselves to manually select the driver for a given domain, to make sure the entity ended up in the correct backend. """ driver = PROVIDERS.identity_api._select_identity_driver(domain_id) unused, unused, entity_id = ( PROVIDERS.identity_api._get_domain_driver_and_entity_id(user['id']) ) if expected_status == http.client.OK: ref = driver.get_user(entity_id) ref = PROVIDERS.identity_api._set_domain_id_and_mapping( ref, domain_id, driver, map.EntityType.USER ) user = user.copy() del user['password'] self.assertDictEqual(user, ref) else: # TODO(henry-nash): Use AssertRaises here, although # there appears to be an issue with using driver.get_user # inside that construct try: driver.get_user(entity_id) except expected_status: pass def setup_initial_domains(self): def create_domain(domain): try: ref = PROVIDERS.resource_api.create_domain( domain['id'], domain ) except exception.Conflict: ref = PROVIDERS.resource_api.get_domain_by_name(domain['name']) return ref self.domains = {} for x in range(1, self.domain_count): domain = 'domain%s' % x self.domains[domain] = create_domain( {'id': uuid.uuid4().hex, 'name': domain} ) def test_authenticate_to_each_domain(self): """Test that a user in each domain can authenticate.""" users = self.create_users_across_domains() for user_num in range(self.domain_count): user = 'user%s' % user_num with self.make_request(): PROVIDERS.identity_api.authenticate( user_id=users[user]['id'], password=users[user]['password'] ) class MultiLDAPandSQLIdentity( BaseLDAPIdentity, unit.SQLDriverOverrides, unit.TestCase, BaseMultiLDAPandSQLIdentity, ): """Class to test common SQL plus individual LDAP backends. We define a set of domains and domain-specific backends: - A separate LDAP backend for the default domain - A separate LDAP backend for domain1 - domain2 shares the same LDAP as domain1, but uses a different tree attach point - An SQL backend for all other domains (which will include domain3 and domain4) Normally one would expect that the default domain would be handled as part of the "other domains" - however the above provides better test coverage since most of the existing backend tests use the default domain. """ def load_fixtures(self, fixtures): self.domain_count = 5 self.domain_specific_count = 3 PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) self.setup_initial_domains() # All initial test data setup complete, time to switch on support # for separate backends per domain. self.enable_multi_domain() super().load_fixtures(fixtures) def assert_backends(self): _assert_backends( self, assignment='sql', identity={ None: 'sql', self.domain_default['id']: 'ldap', self.domains['domain1']['id']: 'ldap', self.domains['domain2']['id']: 'ldap', }, resource='sql', ) def config_overrides(self): super().config_overrides() # Make sure identity and assignment are actually SQL drivers, # BaseLDAPIdentity sets these options to use LDAP. self.config_fixture.config(group='identity', driver='sql') self.config_fixture.config(group='resource', driver='sql') self.config_fixture.config(group='assignment', driver='sql') def enable_multi_domain(self): """Enable the chosen form of multi domain configuration support. This method enables the file-based configuration support. Child classes that wish to use the database domain configuration support should override this method and set the appropriate config_fixture option. """ self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap', list_limit=1000, ) self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) def get_config(self, domain_id): # Get the config for this domain, will return CONF # if no specific config defined for this domain return PROVIDERS.identity_api.domain_configs.get_domain_conf(domain_id) def test_list_users(self): _users = self.create_users_across_domains() # Override the standard list users, since we have added an extra user # to the default domain, so the number of expected users is one more # than in the standard test. users = PROVIDERS.identity_api.list_users( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id ) ) self.assertEqual(len(default_fixtures.USERS) + 1, len(users)) user_ids = {user['id'] for user in users} expected_user_ids = { getattr(self, 'user_%s' % user['name'])['id'] for user in default_fixtures.USERS } expected_user_ids.add(_users['user0']['id']) for user_ref in users: self.assertNotIn('password', user_ref) self.assertEqual(expected_user_ids, user_ids) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get_all') def test_list_limit_domain_specific_inheritance(self, ldap_get_all): # passiging hints is important, because if it's not passed, limiting # is considered be disabled hints = driver_hints.Hints() PROVIDERS.identity_api.list_users( domain_scope=self.domains['domain2']['id'], hints=hints ) # since list_limit is not specified in keystone.domain2.conf, it should # take the default, which is 1000 self.assertTrue(ldap_get_all.called) args, kwargs = ldap_get_all.call_args hints = args[0] self.assertEqual(1000, hints.limit['limit']) @mock.patch.object(common_ldap.BaseLdap, '_ldap_get_all') def test_list_limit_domain_specific_override(self, ldap_get_all): # passiging hints is important, because if it's not passed, limiting # is considered to be disabled hints = driver_hints.Hints() PROVIDERS.identity_api.list_users( domain_scope=self.domains['domain1']['id'], hints=hints ) # this should have the list_limit set in Keystone.domain1.conf, which # is 101 self.assertTrue(ldap_get_all.called) args, kwargs = ldap_get_all.call_args hints = args[0] self.assertEqual(101, hints.limit['limit']) def test_domain_segregation(self): """Test that separate configs have segregated the domain. Test Plan: - Users were created in each domain as part of setup, now make sure you can only find a given user in its relevant domain/backend - Make sure that for a backend that supports multiple domains you can get the users via any of its domains """ users = self.create_users_across_domains() # Check that I can read a user with the appropriate domain-selected # driver, but won't find it via any other domain driver check_user = self.check_user check_user(users['user0'], self.domain_default['id'], http.client.OK) for domain in [ self.domains['domain1']['id'], self.domains['domain2']['id'], self.domains['domain3']['id'], self.domains['domain4']['id'], ]: check_user(users['user0'], domain, exception.UserNotFound) check_user( users['user1'], self.domains['domain1']['id'], http.client.OK ) for domain in [ self.domain_default['id'], self.domains['domain2']['id'], self.domains['domain3']['id'], self.domains['domain4']['id'], ]: check_user(users['user1'], domain, exception.UserNotFound) check_user( users['user2'], self.domains['domain2']['id'], http.client.OK ) for domain in [ self.domain_default['id'], self.domains['domain1']['id'], self.domains['domain3']['id'], self.domains['domain4']['id'], ]: check_user(users['user2'], domain, exception.UserNotFound) # domain3 and domain4 share the same backend, so you should be # able to see user3 and user4 from either. check_user( users['user3'], self.domains['domain3']['id'], http.client.OK ) check_user( users['user3'], self.domains['domain4']['id'], http.client.OK ) check_user( users['user4'], self.domains['domain3']['id'], http.client.OK ) check_user( users['user4'], self.domains['domain4']['id'], http.client.OK ) for domain in [ self.domain_default['id'], self.domains['domain1']['id'], self.domains['domain2']['id'], ]: check_user(users['user3'], domain, exception.UserNotFound) check_user(users['user4'], domain, exception.UserNotFound) # Finally, going through the regular manager layer, make sure we # only see the right number of users in each of the non-default # domains. One might have expected two users in domain1 (since we # created one before we switched to multi-backend), however since # that domain changed backends in the switch we don't find it anymore. # This is as designed - we don't support moving domains between # backends. # # The listing of the default domain is already handled in the # test_lists_users() method. for domain in [ self.domains['domain1']['id'], self.domains['domain2']['id'], self.domains['domain4']['id'], ]: self.assertThat( PROVIDERS.identity_api.list_users(domain_scope=domain), matchers.HasLength(1), ) # domain3 had a user created before we switched on # multiple backends, plus one created afterwards - and its # backend has not changed - so we should find two. self.assertThat( PROVIDERS.identity_api.list_users( domain_scope=self.domains['domain3']['id'] ), matchers.HasLength(1), ) def test_existing_uuids_work(self): """Test that 'uni-domain' created IDs still work. Throwing the switch to domain-specific backends should not cause existing identities to be inaccessible via ID. """ userA = unit.create_user( PROVIDERS.identity_api, self.domain_default['id'] ) userB = unit.create_user( PROVIDERS.identity_api, self.domains['domain1']['id'] ) userC = unit.create_user( PROVIDERS.identity_api, self.domains['domain3']['id'] ) PROVIDERS.identity_api.get_user(userA['id']) PROVIDERS.identity_api.get_user(userB['id']) PROVIDERS.identity_api.get_user(userC['id']) def test_scanning_of_config_dir(self): """Test the Manager class scans the config directory. The setup for the main tests above load the domain configs directly so that the test overrides can be included. This test just makes sure that the standard config directory scanning does pick up the relevant domain config files. """ # Confirm that config has drivers_enabled as True, which we will # check has been set to False later in this test self.assertTrue(CONF.identity.domain_specific_drivers_enabled) self.load_backends() # Execute any command to trigger the lazy loading of domain configs PROVIDERS.identity_api.list_users( domain_scope=self.domains['domain1']['id'] ) # ...and now check the domain configs have been set up self.assertIn('default', PROVIDERS.identity_api.domain_configs) self.assertIn( self.domains['domain1']['id'], PROVIDERS.identity_api.domain_configs, ) self.assertIn( self.domains['domain2']['id'], PROVIDERS.identity_api.domain_configs, ) self.assertNotIn( self.domains['domain3']['id'], PROVIDERS.identity_api.domain_configs, ) self.assertNotIn( self.domains['domain4']['id'], PROVIDERS.identity_api.domain_configs, ) # Finally check that a domain specific config contains items from both # the primary config and the domain specific config conf = PROVIDERS.identity_api.domain_configs.get_domain_conf( self.domains['domain1']['id'] ) # This should now be false, as is the default, since this is not # set in the standard primary config file self.assertFalse(conf.identity.domain_specific_drivers_enabled) # ..and make sure a domain-specific options is also set self.assertEqual('fake://memory1', conf.ldap.url) def test_delete_domain_with_user_added(self): domain = unit.new_domain_ref() project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_domain(domain['id'], domain) project = PROVIDERS.resource_api.create_project(project['id'], project) project_ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, project_ref) PROVIDERS.assignment_api.create_grant( user_id=self.user_foo['id'], project_id=project['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.delete_grant( user_id=self.user_foo['id'], project_id=project['id'], role_id=self.role_member['id'], ) domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) PROVIDERS.resource_api.delete_domain(domain['id']) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, domain['id'], ) def test_user_enabled_ignored_disable_error(self): # Override. self.skip_test_overrides( "Doesn't apply since LDAP config has no " "affect on the SQL identity backend." ) def test_group_enabled_ignored_disable_error(self): # Override. self.skip_test_overrides( "Doesn't apply since LDAP config has no " "affect on the SQL identity backend." ) def test_list_role_assignments_filtered_by_role(self): # Domain roles are supported by the SQL Assignment backend base = super(BaseLDAPIdentity, self) base.test_list_role_assignments_filtered_by_role() def test_list_role_assignment_by_domain(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super(BaseLDAPIdentity, self).test_list_role_assignment_by_domain() def test_list_role_assignment_by_user_with_domain_group_roles(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super( BaseLDAPIdentity, self ).test_list_role_assignment_by_user_with_domain_group_roles() def test_list_role_assignment_using_sourced_groups_with_domains(self): # With SQL Assignment this method should work, so override the override # from BaseLDAPIdentity base = super(BaseLDAPIdentity, self) base.test_list_role_assignment_using_sourced_groups_with_domains() def test_create_project_with_domain_id_and_without_parent_id(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super( BaseLDAPIdentity, self ).test_create_project_with_domain_id_and_without_parent_id() def test_create_project_with_domain_id_mismatch_to_parent_domain(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity super( BaseLDAPIdentity, self ).test_create_project_with_domain_id_mismatch_to_parent_domain() def test_remove_foreign_assignments_when_deleting_a_domain(self): # With multi LDAP this method should work, so override the override # from BaseLDAPIdentity base = super(BaseLDAPIdentity, self) base.test_remove_foreign_assignments_when_deleting_a_domain() @mock.patch.object(ldap_identity.Identity, 'unset_default_project_id') @mock.patch.object(sql_identity.Identity, 'unset_default_project_id') def test_delete_project_unset_project_ids_for_all_backends( self, sql_mock, ldap_mock ): ldap_mock.side_effect = exception.Forbidden project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project = PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.resource_api.delete_project(project['id']) ldap_mock.assert_called_with(project['id']) sql_mock.assert_called_with(project['id']) class MultiLDAPandSQLIdentityDomainConfigsInSQL(MultiLDAPandSQLIdentity): """Class to test the use of domain configs stored in the database. Repeat the same tests as MultiLDAPandSQLIdentity, but instead of using the domain specific config files, store the domain specific values in the database. """ def assert_backends(self): _assert_backends( self, assignment='sql', identity={ None: 'sql', self.domain_default['id']: 'ldap', self.domains['domain1']['id']: 'ldap', self.domains['domain2']['id']: 'ldap', }, resource='sql', ) def enable_multi_domain(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': { 'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com', }, 'identity': {'driver': 'ldap'}, } domain1_config = { 'ldap': { 'url': 'fake://memory1', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com', }, 'identity': {'driver': 'ldap', 'list_limit': 101}, } domain2_config = { 'ldap': { 'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=myroot,cn=com', 'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org', 'user_tree_dn': 'ou=Users,dc=myroot,dc=org', }, 'identity': {'driver': 'ldap'}, } PROVIDERS.domain_config_api.create_config( CONF.identity.default_domain_id, default_config ) PROVIDERS.domain_config_api.create_config( self.domains['domain1']['id'], domain1_config ) PROVIDERS.domain_config_api.create_config( self.domains['domain2']['id'], domain2_config ) self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_configurations_from_database=True, list_limit=1000, ) self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) def test_domain_config_has_no_impact_if_database_support_disabled(self): """Ensure database domain configs have no effect if disabled. Set reading from database configs to false, restart the backends and then try and set and use database configs. """ self.config_fixture.config( group='identity', domain_configurations_from_database=False ) self.load_backends() new_config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config( CONF.identity.default_domain_id, new_config ) # Trigger the identity backend to initialise any domain specific # configurations PROVIDERS.identity_api.list_users() # Check that the new config has not been passed to the driver for # the default domain. default_config = PROVIDERS.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id ) self.assertEqual(CONF.ldap.url, default_config.ldap.url) def test_reloading_domain_config(self): """Ensure domain drivers are reloaded on a config modification.""" domain_cfgs = PROVIDERS.identity_api.domain_configs # Create a new config for the default domain, hence overwriting the # current settings. new_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': 'ldap'}, } PROVIDERS.domain_config_api.create_config( CONF.identity.default_domain_id, new_config ) default_config = domain_cfgs.get_domain_conf( CONF.identity.default_domain_id ) self.assertEqual(new_config['ldap']['url'], default_config.ldap.url) # Ensure updating is also honored updated_config = {'url': uuid.uuid4().hex} PROVIDERS.domain_config_api.update_config( CONF.identity.default_domain_id, updated_config, group='ldap', option='url', ) default_config = domain_cfgs.get_domain_conf( CONF.identity.default_domain_id ) self.assertEqual(updated_config['url'], default_config.ldap.url) # ...and finally ensure delete causes the driver to get the standard # config again. PROVIDERS.domain_config_api.delete_config( CONF.identity.default_domain_id ) default_config = domain_cfgs.get_domain_conf( CONF.identity.default_domain_id ) self.assertEqual(CONF.ldap.url, default_config.ldap.url) def test_setting_multiple_sql_driver_raises_exception(self): """Ensure setting multiple domain specific sql drivers is prevented.""" new_config = {'identity': {'driver': 'sql'}} PROVIDERS.domain_config_api.create_config( CONF.identity.default_domain_id, new_config ) PROVIDERS.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id ) PROVIDERS.domain_config_api.create_config( self.domains['domain1']['id'], new_config ) self.assertRaises( exception.MultipleSQLDriversInConfig, PROVIDERS.identity_api.domain_configs.get_domain_conf, self.domains['domain1']['id'], ) def test_same_domain_gets_sql_driver(self): """Ensure we can set an SQL driver if we have had it before.""" new_config = {'identity': {'driver': 'sql'}} PROVIDERS.domain_config_api.create_config( CONF.identity.default_domain_id, new_config ) PROVIDERS.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id ) # By using a slightly different config, we cause the driver to be # reloaded...and hence check if we can reuse the sql driver new_config = { 'identity': {'driver': 'sql'}, 'ldap': {'url': 'fake://memory1'}, } PROVIDERS.domain_config_api.create_config( CONF.identity.default_domain_id, new_config ) PROVIDERS.identity_api.domain_configs.get_domain_conf( CONF.identity.default_domain_id ) def test_delete_domain_clears_sql_registration(self): """Ensure registration is deleted when a domain is deleted.""" domain = unit.new_domain_ref() domain = PROVIDERS.resource_api.create_domain(domain['id'], domain) new_config = {'identity': {'driver': 'sql'}} PROVIDERS.domain_config_api.create_config(domain['id'], new_config) PROVIDERS.identity_api.domain_configs.get_domain_conf(domain['id']) # First show that trying to set SQL for another driver fails PROVIDERS.domain_config_api.create_config( self.domains['domain1']['id'], new_config ) self.assertRaises( exception.MultipleSQLDriversInConfig, PROVIDERS.identity_api.domain_configs.get_domain_conf, self.domains['domain1']['id'], ) PROVIDERS.domain_config_api.delete_config( self.domains['domain1']['id'] ) # Now we delete the domain domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) PROVIDERS.resource_api.delete_domain(domain['id']) # The registration should now be available PROVIDERS.domain_config_api.create_config( self.domains['domain1']['id'], new_config ) PROVIDERS.identity_api.domain_configs.get_domain_conf( self.domains['domain1']['id'] ) def test_orphaned_registration_does_not_prevent_getting_sql_driver(self): """Ensure we self heal an orphaned sql registration.""" domain = unit.new_domain_ref() domain = PROVIDERS.resource_api.create_domain(domain['id'], domain) new_config = {'identity': {'driver': 'sql'}} PROVIDERS.domain_config_api.create_config(domain['id'], new_config) PROVIDERS.identity_api.domain_configs.get_domain_conf(domain['id']) # First show that trying to set SQL for another driver fails PROVIDERS.domain_config_api.create_config( self.domains['domain1']['id'], new_config ) self.assertRaises( exception.MultipleSQLDriversInConfig, PROVIDERS.identity_api.domain_configs.get_domain_conf, self.domains['domain1']['id'], ) # Now we delete the domain by using the backend driver directly, # which causes the domain to be deleted without any of the cleanup # that is in the manager (this is simulating a server process crashing # in the middle of a delete domain operation, and somehow leaving the # domain config settings in place, but the domain is deleted). We # should still be able to set another domain to SQL, since we should # self heal this issue. PROVIDERS.resource_api.driver.delete_project(domain['id']) # Invalidate cache (so we will see the domain has gone) PROVIDERS.resource_api.get_domain.invalidate( PROVIDERS.resource_api, domain['id'] ) # The registration should now be available PROVIDERS.domain_config_api.create_config( self.domains['domain1']['id'], new_config ) PROVIDERS.identity_api.domain_configs.get_domain_conf( self.domains['domain1']['id'] ) class DomainSpecificLDAPandSQLIdentity( BaseLDAPIdentity, unit.SQLDriverOverrides, unit.TestCase, BaseMultiLDAPandSQLIdentity, ): """Class to test when all domains use specific configs, including SQL. We define a set of domains and domain-specific backends: - A separate LDAP backend for the default domain - A separate SQL backend for domain1 Although the default driver still exists, we don't use it. """ DOMAIN_COUNT = 2 DOMAIN_SPECIFIC_COUNT = 2 def setUp(self): self.domain_count = self.DOMAIN_COUNT self.domain_specific_count = self.DOMAIN_SPECIFIC_COUNT super().setUp() def load_fixtures(self, fixtures): PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) self.setup_initial_domains() super().load_fixtures(fixtures) def assert_backends(self): _assert_backends( self, assignment='sql', identity={ None: 'ldap', 'default': 'ldap', self.domains['domain1']['id']: 'sql', }, resource='sql', ) def config_overrides(self): super().config_overrides() # Make sure resource & assignment are actually SQL drivers, # BaseLDAPIdentity causes this option to use LDAP. self.config_fixture.config(group='resource', driver='sql') self.config_fixture.config(group='assignment', driver='sql') # We aren't setting up any initial data ahead of switching to # domain-specific operation, so make the switch straight away. self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_config_dir=( unit.TESTCONF + '/domain_configs_one_sql_one_ldap' ), ) self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) def get_config(self, domain_id): # Get the config for this domain, will return CONF # if no specific config defined for this domain return PROVIDERS.identity_api.domain_configs.get_domain_conf(domain_id) def test_list_domains(self): self.skip_test_overrides('N/A: Not relevant for multi ldap testing') def test_delete_domain(self): # With this restricted multi LDAP class, tests that use multiple # domains and identity, are still not supported self.assertRaises( exception.DomainNotFound, super(BaseLDAPIdentity, self).test_delete_domain_with_project_api, ) def test_list_users(self): _users = self.create_users_across_domains() # Override the standard list users, since we have added an extra user # to the default domain, so the number of expected users is one more # than in the standard test. users = PROVIDERS.identity_api.list_users( domain_scope=self._set_domain_scope( CONF.identity.default_domain_id ) ) self.assertEqual(len(default_fixtures.USERS) + 1, len(users)) user_ids = {user['id'] for user in users} expected_user_ids = { getattr(self, 'user_%s' % user['name'])['id'] for user in default_fixtures.USERS } expected_user_ids.add(_users['user0']['id']) for user_ref in users: self.assertNotIn('password', user_ref) self.assertEqual(expected_user_ids, user_ids) def test_domain_segregation(self): """Test that separate configs have segregated the domain. Test Plan: - Users were created in each domain as part of setup, now make sure you can only find a given user in its relevant domain/backend - Make sure that for a backend that supports multiple domains you can get the users via any of its domains """ users = self.create_users_across_domains() # Check that I can read a user with the appropriate domain-selected # driver, but won't find it via any other domain driver self.check_user( users['user0'], self.domain_default['id'], http.client.OK ) self.check_user( users['user0'], self.domains['domain1']['id'], exception.UserNotFound, ) self.check_user( users['user1'], self.domains['domain1']['id'], http.client.OK ) self.check_user( users['user1'], self.domain_default['id'], exception.UserNotFound ) # Finally, going through the regular manager layer, make sure we # only see the right number of users in the non-default domain. self.assertThat( PROVIDERS.identity_api.list_users( domain_scope=self.domains['domain1']['id'] ), matchers.HasLength(1), ) def test_get_domain_mapping_list_is_used(self): # before get_domain_mapping_list was introduced, it was required to # make N calls to the database for N users, and it was slow. # get_domain_mapping_list solves this problem and should be used # when multiple users are fetched from domain-specific backend. for i in range(5): unit.create_user( PROVIDERS.identity_api, domain_id=self.domains['domain1']['id'] ) with mock.patch.multiple( PROVIDERS.id_mapping_api, get_domain_mapping_list=mock.DEFAULT, get_id_mapping=mock.DEFAULT, ) as mocked: PROVIDERS.identity_api.list_users( domain_scope=self.domains['domain1']['id'] ) mocked['get_domain_mapping_list'].assert_called() mocked['get_id_mapping'].assert_not_called() def test_user_id_comma(self): self.skip_test_overrides( 'Only valid if it is guaranteed to be ' 'talking to the fakeldap backend' ) def test_user_enabled_ignored_disable_error(self): # Override. self.skip_test_overrides( "Doesn't apply since LDAP config has no " "affect on the SQL identity backend." ) def test_group_enabled_ignored_disable_error(self): # Override. self.skip_test_overrides( "Doesn't apply since LDAP config has no " "affect on the SQL identity backend." ) def test_list_role_assignments_filtered_by_role(self): # Domain roles are supported by the SQL Assignment backend base = super(BaseLDAPIdentity, self) base.test_list_role_assignments_filtered_by_role() def test_delete_domain_with_project_api(self): # With this restricted multi LDAP class, tests that use multiple # domains and identity, are still not supported self.assertRaises( exception.DomainNotFound, super(BaseLDAPIdentity, self).test_delete_domain_with_project_api, ) def test_create_project_with_domain_id_and_without_parent_id(self): # With restricted multi LDAP, tests that don't use identity, but do # required aditional domains will work base = super(BaseLDAPIdentity, self) base.test_create_project_with_domain_id_and_without_parent_id() def test_create_project_with_domain_id_mismatch_to_parent_domain(self): # With restricted multi LDAP, tests that don't use identity, but do # required aditional domains will work base = super(BaseLDAPIdentity, self) base.test_create_project_with_domain_id_mismatch_to_parent_domain() def test_list_domains_filtered_and_limited(self): # With this restricted multi LDAP class, tests that use multiple # domains and identity, are still not supported self.skip_test_overrides( 'Restricted multi LDAP class does not support multiple domains' ) def test_list_limit_for_domains(self): # With this restricted multi LDAP class, tests that use multiple # domains and identity, are still not supported self.skip_test_overrides( 'Restricted multi LDAP class does not support multiple domains' ) class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity): """Class to test simplest use of domain-specific SQL driver. The simplest use of an SQL domain-specific backend is when it is used to augment the standard case when LDAP is the default driver defined in the main config file. This would allow, for example, service users to be stored in SQL while LDAP handles the rest. Hence we define: - The default driver uses the LDAP backend for the default domain - A separate SQL backend for domain1 """ DOMAIN_COUNT = 2 DOMAIN_SPECIFIC_COUNT = 1 def assert_backends(self): _assert_backends( self, assignment='sql', identity='ldap', resource='sql' ) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') # We aren't setting up any initial data ahead of switching to # domain-specific operation, so make the switch straight away. self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True, domain_config_dir=( unit.TESTCONF + '/domain_configs_default_ldap_one_sql' ), ) # Part of the testing counts how many new mappings get created as # we create users, so ensure we are NOT using mapping for the default # LDAP domain so this doesn't confuse the calculation. self.config_fixture.config( group='identity_mapping', backward_compatible_ids=True ) def get_config(self, domain_id): if domain_id == CONF.identity.default_domain_id: return CONF else: return PROVIDERS.identity_api.domain_configs.get_domain_conf( domain_id ) def test_default_sql_plus_sql_specific_driver_fails(self): # First confirm that if ldap is default driver, domain1 can be # loaded as sql self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='assignment', driver='sql') self.load_backends() # Make any identity call to initiate the lazy loading of configs PROVIDERS.identity_api.list_users( domain_scope=CONF.identity.default_domain_id ) self.assertIsNotNone(self.get_config(self.domains['domain1']['id'])) # Now re-initialize, but with sql as the identity driver self.config_fixture.config(group='identity', driver='sql') self.config_fixture.config(group='assignment', driver='sql') self.load_backends() # Make any identity call to initiate the lazy loading of configs, which # should fail since we would now have two sql drivers. self.assertRaises( exception.MultipleSQLDriversInConfig, PROVIDERS.identity_api.list_users, domain_scope=CONF.identity.default_domain_id, ) def test_multiple_sql_specific_drivers_fails(self): self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config(group='assignment', driver='sql') self.load_backends() # Ensure default, domain1 and domain2 exist self.domain_count = 3 self.setup_initial_domains() # Make any identity call to initiate the lazy loading of configs PROVIDERS.identity_api.list_users( domain_scope=CONF.identity.default_domain_id ) # This will only load domain1, since the domain2 config file is # not stored in the same location self.assertIsNotNone(self.get_config(self.domains['domain1']['id'])) # Now try and manually load a 2nd sql specific driver, for domain2, # which should fail. self.assertRaises( exception.MultipleSQLDriversInConfig, PROVIDERS.identity_api.domain_configs._load_config_from_file, PROVIDERS.resource_api, [ unit.TESTCONF + '/domain_configs_one_extra_sql/' + 'keystone.domain2.conf' ], 'domain2', ) class LdapFilterTests( identity_tests.FilterTests, LDAPTestSetup, unit.TestCase ): def assert_backends(self): _assert_backends(self, identity='ldap') def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def test_list_users_in_group_inexact_filtered(self): # The LDAP identity driver currently does not support filtering on the # listing users for a given group, so will fail this test. self.skip_test_overrides('Not supported by LDAP identity driver') def test_list_users_in_group_exact_filtered(self): # The LDAP identity driver currently does not support filtering on the # listing users for a given group, so will fail this test. self.skip_test_overrides('Not supported by LDAP identity driver') class LDAPMatchingRuleInChainTests(LDAPTestSetup, unit.TestCase): def setUp(self): super().setUp() group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) self.group = PROVIDERS.identity_api.create_group(group) user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) self.user = PROVIDERS.identity_api.create_user(user) PROVIDERS.identity_api.add_user_to_group( self.user['id'], self.group['id'] ) def assert_backends(self): _assert_backends(self, identity='ldap') def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config( group='ldap', group_ad_nesting=True, url='fake://memory', chase_referrals=False, group_tree_dn='cn=UserGroups,cn=example,cn=com', query_scope='one', ) def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap.conf')) return config_files def test_get_group(self): group_ref = PROVIDERS.identity_api.get_group(self.group['id']) self.assertDictEqual(self.group, group_ref) def test_list_user_groups(self): PROVIDERS.identity_api.list_groups_for_user(self.user['id']) def test_list_groups_for_user(self): groups_ref = PROVIDERS.identity_api.list_groups_for_user( self.user['id'] ) self.assertEqual(0, len(groups_ref)) def test_list_groups(self): groups_refs = PROVIDERS.identity_api.list_groups() self.assertEqual(1, len(groups_refs)) self.assertEqual(self.group['id'], groups_refs[0]['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_ldap_pool.py0000664000175000017500000003415200000000000024503 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import fixtures import ldappool from keystone.common import provider_api import keystone.conf from keystone.identity.backends import ldap from keystone.identity.backends.ldap import common as common_ldap from keystone.tests import unit from keystone.tests.unit import fakeldap from keystone.tests.unit import test_backend_ldap CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class LdapPoolCommonTestMixin: """LDAP pool specific common tests used here and in live tests.""" def cleanup_pools(self): common_ldap.PooledLDAPHandler.connection_pools.clear() def test_handler_with_use_pool_enabled(self): # by default use_pool and use_auth_pool is enabled in test pool config user_ref = PROVIDERS.identity_api.get_user(self.user_foo['id']) self.user_foo.pop('password') self.assertDictEqual(self.user_foo, user_ref) handler = common_ldap._get_connection(CONF.ldap.url, use_pool=True) self.assertIsInstance(handler, common_ldap.PooledLDAPHandler) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'connect') @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_handler_with_use_pool_not_enabled( self, bind_method, connect_method ): self.config_fixture.config(group='ldap', use_pool=False) self.config_fixture.config(group='ldap', use_auth_pool=True) self.cleanup_pools() user_api = ldap.UserApi(CONF) handler = user_api.get_connection( user=None, password=None, end_user_auth=True ) # use_auth_pool flag does not matter when use_pool is False # still handler is non pool version self.assertIsInstance(handler.conn, common_ldap.PythonLDAPHandler) @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'connect') @mock.patch.object(common_ldap.KeystoneLDAPHandler, 'simple_bind_s') def test_handler_with_end_user_auth_use_pool_not_enabled( self, bind_method, connect_method ): # by default use_pool is enabled in test pool config # now disabling use_auth_pool flag to test handler instance self.config_fixture.config(group='ldap', use_auth_pool=False) self.cleanup_pools() user_api = ldap.UserApi(CONF) handler = user_api.get_connection( user=None, password=None, end_user_auth=True ) self.assertIsInstance(handler.conn, common_ldap.PythonLDAPHandler) # For end_user_auth case, flag should not be false otherwise # it will use, admin connections ldap pool handler = user_api.get_connection( user=None, password=None, end_user_auth=False ) self.assertIsInstance(handler.conn, common_ldap.PooledLDAPHandler) def test_pool_size_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_size, ldappool_cm.size) def test_pool_retry_max_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_retry_max, ldappool_cm.retry_max) def test_pool_retry_delay_set(self): # just make one identity call to initiate ldap connection if not there PROVIDERS.identity_api.get_user(self.user_foo['id']) # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.pool_retry_delay, ldappool_cm.retry_delay) def test_pool_use_tls_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.use_tls, ldappool_cm.use_tls) def test_pool_timeout_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual( CONF.ldap.pool_connection_timeout, ldappool_cm.timeout ) def test_pool_use_pool_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual(CONF.ldap.use_pool, ldappool_cm.use_pool) def test_pool_connection_lifetime_set(self): # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] self.assertEqual( CONF.ldap.pool_connection_lifetime, ldappool_cm.max_lifetime ) def test_max_connection_error_raised(self): who = CONF.ldap.user cred = CONF.ldap.password # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] ldappool_cm.size = 2 # 3rd connection attempt should raise Max connection error with ldappool_cm.connection(who, cred) as _: # conn1 with ldappool_cm.connection(who, cred) as _: # conn2 try: with ldappool_cm.connection(who, cred) as _: # conn3 _.unbind_s() self.fail() except Exception as ex: self.assertIsInstance( ex, ldappool.MaxConnectionReachedError ) ldappool_cm.size = CONF.ldap.pool_size def test_pool_size_expands_correctly(self): who = CONF.ldap.user cred = CONF.ldap.password # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] ldappool_cm.size = 3 def _get_conn(): return ldappool_cm.connection(who, cred) # Open 3 connections first with _get_conn() as _: # conn1 self.assertEqual(1, len(ldappool_cm)) with _get_conn() as _: # conn2 self.assertEqual(2, len(ldappool_cm)) with _get_conn() as _: # conn2 _.unbind_ext_s() self.assertEqual(3, len(ldappool_cm)) # Then open 3 connections again and make sure size does not grow # over 3 with _get_conn() as c1: # conn1 self.assertEqual(3, len(ldappool_cm)) c1.connected = False with _get_conn() as c2: # conn2 self.assertEqual(3, len(ldappool_cm)) c2.connected = False with _get_conn() as c3: # conn3 c3.connected = False c3.unbind_ext_s() self.assertEqual(3, len(ldappool_cm)) with _get_conn() as c1: # conn1 self.assertEqual(1, len(ldappool_cm)) with _get_conn() as c2: # conn2 self.assertEqual(2, len(ldappool_cm)) with _get_conn() as c3: # conn3 c3.unbind_ext_s() self.assertEqual(3, len(ldappool_cm)) def test_password_change_with_pool(self): old_password = self.user_sna['password'] self.cleanup_pools() # authenticate so that connection is added to pool before password # change with self.make_request(): user_ref = PROVIDERS.identity_api.authenticate( user_id=self.user_sna['id'], password=self.user_sna['password'] ) self.user_sna.pop('password') self.user_sna['enabled'] = True self.assertUserDictEqual(self.user_sna, user_ref) new_password = 'new_password' user_ref['password'] = new_password PROVIDERS.identity_api.update_user(user_ref['id'], user_ref) # now authenticate again to make sure new password works with # connection pool with self.make_request(): user_ref2 = PROVIDERS.identity_api.authenticate( user_id=self.user_sna['id'], password=new_password ) user_ref.pop('password') self.assertUserDictEqual(user_ref, user_ref2) # Authentication with old password would not work here as there # is only one connection in pool which get bind again with updated # password..so no old bind is maintained in this case. with self.make_request(): self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, user_id=self.user_sna['id'], password=old_password, ) @mock.patch.object(fakeldap.FakeLdap, 'search_ext') def test_search_ext_ensure_pool_connection_released(self, mock_search_ext): """Test search_ext exception resiliency. Call search_ext function in isolation. Doing so will cause search_ext to borrow a connection from the pool and associate it with an AsynchronousMessage object. Borrowed connection ought to be released if anything goes wrong during LDAP API call. This test case intentionally throws an exception to ensure everything goes as expected when LDAP connection raises an exception. """ class CustomDummyException(Exception): pass # Throw an exception intentionally when LDAP # connection search_ext function is called mock_search_ext.side_effect = CustomDummyException() self.config_fixture.config(group='ldap', pool_size=1) pool = self.conn_pools[CONF.ldap.url] user_api = ldap.UserApi(CONF) # setUp primes the pool so pool # must have one connection self.assertEqual(1, len(pool)) for i in range(1, 10): handler = user_api.get_connection() # Just to ensure that we're using pooled connections self.assertIsInstance(handler.conn, common_ldap.PooledLDAPHandler) # LDAP API will throw CustomDummyException. In this scenario # we expect LDAP connection to be made available back to the # pool. self.assertRaises( CustomDummyException, lambda: handler.search_ext( 'dc=example,dc=test', 'dummy', 'objectclass=*', ['mail', 'userPassword'], ), ) # Pooled connection must not be evicted from the pool self.assertEqual(1, len(pool)) # Ensure that the connection is inactive afterwards with pool._pool_lock: for slot, conn in enumerate(pool._pool): self.assertFalse(conn.active) self.assertEqual(mock_search_ext.call_count, i) @mock.patch.object(fakeldap.FakeLdap, 'result3') def test_result3_ensure_pool_connection_released(self, mock_result3): """Test search_ext-->result3 exception resiliency. Call search_ext function, grab an AsynchronousMessage object and call result3 with it. During the result3 call, LDAP API will throw an exception.The expectation is that the associated LDAP pool connection for AsynchronousMessage must be released back to the LDAP connection pool. """ class CustomDummyException(Exception): pass # Throw an exception intentionally when LDAP # connection result3 function is called mock_result3.side_effect = CustomDummyException() self.config_fixture.config(group='ldap', pool_size=1) pool = self.conn_pools[CONF.ldap.url] user_api = ldap.UserApi(CONF) # setUp primes the pool so pool # must have one connection self.assertEqual(1, len(pool)) for i in range(1, 10): handler = user_api.get_connection() # Just to ensure that we're using pooled connections self.assertIsInstance(handler.conn, common_ldap.PooledLDAPHandler) msg = handler.search_ext( 'dc=example,dc=test', 'dummy', 'objectclass=*', ['mail', 'userPassword'], ) # Connection is in use, must be already marked active self.assertTrue(msg.connection.active) # Pooled connection must not be evicted from the pool self.assertEqual(1, len(pool)) # LDAP API will throw CustomDummyException. In this # scenario we expect LDAP connection to be made # available back to the pool. self.assertRaises( CustomDummyException, lambda: handler.result3(msg) ) # Connection must be set inactive self.assertFalse(msg.connection.active) # Pooled connection must not be evicted from the pool self.assertEqual(1, len(pool)) self.assertEqual(mock_result3.call_count, i) class LDAPIdentity( LdapPoolCommonTestMixin, test_backend_ldap.LDAPIdentity, unit.TestCase ): """Executes tests in existing base class with pooled LDAP handler.""" def setUp(self): self.useFixture( fixtures.MockPatchObject( common_ldap.PooledLDAPHandler, 'Connector', fakeldap.FakeLdapPool, ) ) super().setUp() self.addCleanup(self.cleanup_pools) # storing to local variable to avoid long references self.conn_pools = common_ldap.PooledLDAPHandler.connection_pools # super class loads db fixtures which establishes ldap connection # so adding dummy call to highlight connection pool initialization # as its not that obvious though its not needed here PROVIDERS.identity_api.get_user(self.user_foo['id']) def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_pool.conf')) return config_files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_rules.py0000664000175000017500000000376500000000000023672 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone import exception from keystone.tests import unit from keystone.tests.unit.policy import test_backends as policy_tests class RulesPolicy(unit.TestCase, policy_tests.PolicyTests): def setUp(self): super().setUp() self.load_backends() def config_overrides(self): super().config_overrides() self.config_fixture.config(group='policy', driver='rules') def test_create(self): self.assertRaises(exception.NotImplemented, super().test_create) def test_get(self): self.assertRaises(exception.NotImplemented, super().test_get) def test_list(self): self.assertRaises(exception.NotImplemented, super().test_list) def test_update(self): self.assertRaises(exception.NotImplemented, super().test_update) def test_delete(self): self.assertRaises(exception.NotImplemented, super().test_delete) def test_get_policy_returns_not_found(self): self.assertRaises( exception.NotImplemented, super().test_get_policy_returns_not_found, ) def test_update_policy_returns_not_found(self): self.assertRaises( exception.NotImplemented, super().test_update_policy_returns_not_found, ) def test_delete_policy_returns_not_found(self): self.assertRaises( exception.NotImplemented, super().test_delete_policy_returns_not_found, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_sql.py0000664000175000017500000017033600000000000023336 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import uuid import fixtures import freezegun from oslo_db import exception as db_exception from oslo_db import options from oslo_log import log from oslo_utils import timeutils import sqlalchemy from sqlalchemy import exc from testtools import matchers from keystone.common import driver_hints from keystone.common import provider_api from keystone.common import sql from keystone.common.sql import core import keystone.conf from keystone.credential.providers import fernet as credential_provider from keystone import exception from keystone.identity.backends import sql_model as identity_sql from keystone.resource.backends import base as resource from keystone.tests import unit from keystone.tests.unit.assignment import test_backends as assignment_tests from keystone.tests.unit.catalog import test_backends as catalog_tests from keystone.tests.unit import default_fixtures from keystone.tests.unit.identity import test_backends as identity_tests from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.limit import test_backends as limit_tests from keystone.tests.unit.policy import test_backends as policy_tests from keystone.tests.unit.resource import test_backends as resource_tests from keystone.tests.unit.trust import test_backends as trust_tests from keystone.trust.backends import sql as trust_sql CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class SqlTests(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): super().setUp() self.database_fixture = self.useFixture(database.Database()) self.load_backends() # populate the engine with tables & fixtures self.load_fixtures(default_fixtures) # defaulted by the data load self.user_foo['enabled'] = True def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files class DataTypeRoundTrips(SqlTests): def test_json_blob_roundtrip(self): """Test round-trip of a JSON data structure with JsonBlob.""" with sql.session_for_read() as session: val = session.scalar( sqlalchemy.select( sqlalchemy.literal({"key": "value"}, type_=core.JsonBlob), ) ) self.assertEqual({"key": "value"}, val) def test_json_blob_sql_null(self): """Test that JsonBlob can accommodate a SQL NULL value in a result set. SQL NULL may be handled by JsonBlob in the case where a table is storing NULL in a JsonBlob column, as several models use this type in a column that is nullable. It also comes back when the column is left NULL from being in an OUTER JOIN. In Python, this means the None constant is handled by the datatype. """ with sql.session_for_read() as session: val = session.scalar( sqlalchemy.select( sqlalchemy.cast(sqlalchemy.null(), type_=core.JsonBlob), ) ) self.assertIsNone(val) def test_json_blob_python_none(self): """Test that JsonBlob round-trips a Python None. This is where JSON datatypes get a little nutty, in that JSON has a 'null' keyword, and JsonBlob right now will persist Python None as the json string 'null', not SQL NULL. """ with sql.session_for_read() as session: val = session.scalar( sqlalchemy.select( sqlalchemy.literal(None, type_=core.JsonBlob), ) ) self.assertIsNone(val) def test_json_blob_python_none_renders(self): """Test that JsonBlob actually renders JSON 'null' for Python None.""" with sql.session_for_read() as session: val = session.scalar( sqlalchemy.select( sqlalchemy.cast( sqlalchemy.literal(None, type_=core.JsonBlob), sqlalchemy.String, ), ) ) self.assertEqual("null", val) def test_datetimeint_roundtrip(self): """Test round-trip of a Python datetime with DateTimeInt.""" with sql.session_for_read() as session: datetime_value = datetime.datetime(2019, 5, 15, 10, 17, 55) val = session.scalar( sqlalchemy.select( sqlalchemy.literal(datetime_value, type_=core.DateTimeInt), ) ) self.assertEqual(datetime_value, val) def test_datetimeint_persistence(self): """Test integer persistence with DateTimeInt.""" with sql.session_for_read() as session: datetime_value = datetime.datetime(2019, 5, 15, 10, 17, 55) val = session.scalar( sqlalchemy.select( sqlalchemy.cast( sqlalchemy.literal( datetime_value, type_=core.DateTimeInt ), sqlalchemy.Integer, ), ) ) self.assertEqual(1557915475000000, val) def test_datetimeint_python_none(self): """Test round-trip of a Python None with DateTimeInt.""" with sql.session_for_read() as session: val = session.scalar( sqlalchemy.select( sqlalchemy.literal(None, type_=core.DateTimeInt), ) ) self.assertIsNone(val) class SqlModels(SqlTests): def load_table(self, name): table = sqlalchemy.Table( name, sql.ModelBase.metadata, autoload_with=self.database_fixture.engine, ) return table def assertExpectedSchema(self, table, expected_schema): """Assert that a table's schema is what we expect. :param string table: the name of the table to inspect :param tuple expected_schema: a tuple of tuples containing the expected schema :raises AssertionError: when the database schema doesn't match the expected schema The expected_schema format is simply:: ( ('column name', sql type, qualifying detail), ... ) The qualifying detail varies based on the type of the column:: - sql.Boolean columns must indicate the column's default value or None if there is no default - Columns with a length, like sql.String, must indicate the column's length - All other column types should use None Example:: cols = (('id', sql.String, 64), ('enabled', sql.Boolean, True), ('extra', sql.JsonBlob, None)) self.assertExpectedSchema('table_name', cols) """ table = self.load_table(table) actual_schema = [] for column in table.c: if isinstance(column.type, sql.Boolean): default = None if column.default: default = column.default.arg actual_schema.append((column.name, type(column.type), default)) elif hasattr(column.type, 'length') and not isinstance( column.type, sql.Enum ): # NOTE(dstanek): Even though sql.Enum columns have a length # set we don't want to catch them here. Maybe in the future # we'll check to see that they contain a list of the correct # possible values. actual_schema.append( (column.name, type(column.type), column.type.length) ) else: actual_schema.append((column.name, type(column.type), None)) self.assertCountEqual(expected_schema, actual_schema) def test_user_model(self): cols = ( ('id', sql.String, 64), ('domain_id', sql.String, 64), ('default_project_id', sql.String, 64), ('enabled', sql.Boolean, None), ('extra', sql.JsonBlob, None), ('created_at', sql.DateTime, None), ('last_active_at', sqlalchemy.Date, None), ) self.assertExpectedSchema('user', cols) def test_local_user_model(self): cols = ( ('id', sql.Integer, None), ('user_id', sql.String, 64), ('name', sql.String, 255), ('domain_id', sql.String, 64), ('failed_auth_count', sql.Integer, None), ('failed_auth_at', sql.DateTime, None), ) self.assertExpectedSchema('local_user', cols) def test_password_model(self): cols = ( ('id', sql.Integer, None), ('local_user_id', sql.Integer, None), ('password_hash', sql.String, 255), ('created_at', sql.DateTime, None), ('expires_at', sql.DateTime, None), ('created_at_int', sql.DateTimeInt, None), ('expires_at_int', sql.DateTimeInt, None), ('self_service', sql.Boolean, False), ) self.assertExpectedSchema('password', cols) def test_federated_user_model(self): cols = ( ('id', sql.Integer, None), ('user_id', sql.String, 64), ('idp_id', sql.String, 64), ('protocol_id', sql.String, 64), ('unique_id', sql.String, 255), ('display_name', sql.String, 255), ) self.assertExpectedSchema('federated_user', cols) def test_nonlocal_user_model(self): cols = ( ('domain_id', sql.String, 64), ('name', sql.String, 255), ('user_id', sql.String, 64), ) self.assertExpectedSchema('nonlocal_user', cols) def test_group_model(self): cols = ( ('id', sql.String, 64), ('name', sql.String, 64), ('description', sql.Text, None), ('domain_id', sql.String, 64), ('extra', sql.JsonBlob, None), ) self.assertExpectedSchema('group', cols) def test_project_model(self): cols = ( ('id', sql.String, 64), ('name', sql.String, 64), ('description', sql.Text, None), ('domain_id', sql.String, 64), ('enabled', sql.Boolean, None), ('extra', sql.JsonBlob, None), ('parent_id', sql.String, 64), ('is_domain', sql.Boolean, False), ) self.assertExpectedSchema('project', cols) def test_role_assignment_model(self): cols = ( ('type', sql.Enum, None), ('actor_id', sql.String, 64), ('target_id', sql.String, 64), ('role_id', sql.String, 64), ('inherited', sql.Boolean, False), ) self.assertExpectedSchema('assignment', cols) def test_user_group_membership(self): cols = (('group_id', sql.String, 64), ('user_id', sql.String, 64)) self.assertExpectedSchema('user_group_membership', cols) def test_revocation_event_model(self): cols = ( ('id', sql.Integer, None), ('domain_id', sql.String, 64), ('project_id', sql.String, 64), ('user_id', sql.String, 64), ('role_id', sql.String, 64), ('trust_id', sql.String, 64), ('consumer_id', sql.String, 64), ('access_token_id', sql.String, 64), ('issued_before', sql.DateTime, None), ('expires_at', sql.DateTime, None), ('revoked_at', sql.DateTime, None), ('audit_id', sql.String, 32), ('audit_chain_id', sql.String, 32), ) self.assertExpectedSchema('revocation_event', cols) def test_project_tags_model(self): cols = (('project_id', sql.String, 64), ('name', sql.Unicode, 255)) self.assertExpectedSchema('project_tag', cols) class SqlIdentity( SqlTests, identity_tests.IdentityTests, assignment_tests.AssignmentTests, assignment_tests.SystemAssignmentTests, resource_tests.ResourceTests, ): def test_password_hashed(self): with sql.session_for_read() as session: user_ref = PROVIDERS.identity_api._get_user( session, self.user_foo['id'] ) self.assertNotEqual( self.user_foo['password'], user_ref['password'] ) def test_create_user_with_null_password(self): user_dict = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) user_dict["password"] = None new_user_dict = PROVIDERS.identity_api.create_user(user_dict) with sql.session_for_read() as session: new_user_ref = PROVIDERS.identity_api._get_user( session, new_user_dict['id'] ) self.assertIsNone(new_user_ref.password) def test_update_user_with_null_password(self): user_dict = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) self.assertTrue(user_dict['password']) new_user_dict = PROVIDERS.identity_api.create_user(user_dict) new_user_dict["password"] = None new_user_dict = PROVIDERS.identity_api.update_user( new_user_dict['id'], new_user_dict ) with sql.session_for_read() as session: new_user_ref = PROVIDERS.identity_api._get_user( session, new_user_dict['id'] ) self.assertIsNone(new_user_ref.password) def test_delete_user_with_project_association(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], self.project_bar['id'], role_member['id'] ) PROVIDERS.identity_api.delete_user(user['id']) self.assertRaises( exception.UserNotFound, PROVIDERS.assignment_api.list_projects_for_user, user['id'], ) def test_create_user_case_sensitivity(self): # user name case sensitivity is down to the fact that it is marked as # an SQL UNIQUE column, which may not be valid for other backends, like # LDAP. # create a ref with a lowercase name ref = unit.new_user_ref( name=uuid.uuid4().hex.lower(), domain_id=CONF.identity.default_domain_id, ) ref = PROVIDERS.identity_api.create_user(ref) # assign a new ID with the same name, but this time in uppercase ref['name'] = ref['name'].upper() PROVIDERS.identity_api.create_user(ref) def test_create_project_case_sensitivity(self): # project name case sensitivity is down to the fact that it is marked # as an SQL UNIQUE column, which may not be valid for other backends, # like LDAP. # create a ref with a lowercase name ref = unit.new_project_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.resource_api.create_project(ref['id'], ref) # assign a new ID with the same name, but this time in uppercase ref['id'] = uuid.uuid4().hex ref['name'] = ref['name'].upper() PROVIDERS.resource_api.create_project(ref['id'], ref) def test_delete_project_with_user_association(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], self.project_bar['id'], role_member['id'] ) PROVIDERS.resource_api.delete_project(self.project_bar['id']) projects = PROVIDERS.assignment_api.list_projects_for_user(user['id']) self.assertEqual([], projects) def test_update_project_returns_extra(self): """Test for backward compatibility with an essex/folsom bug. Non-indexed attributes were returned in an 'extra' attribute, instead of on the entity itself; for consistency and backwards compatibility, those attributes should be included twice. This behavior is specific to the SQL driver. """ arbitrary_key = uuid.uuid4().hex arbitrary_value = uuid.uuid4().hex project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) project[arbitrary_key] = arbitrary_value ref = PROVIDERS.resource_api.create_project(project['id'], project) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertNotIn('extra', ref) ref['name'] = uuid.uuid4().hex ref = PROVIDERS.resource_api.update_project(ref['id'], ref) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key]) def test_update_user_returns_extra(self): """Test for backwards-compatibility with an essex/folsom bug. Non-indexed attributes were returned in an 'extra' attribute, instead of on the entity itself; for consistency and backwards compatibility, those attributes should be included twice. This behavior is specific to the SQL driver. """ arbitrary_key = uuid.uuid4().hex arbitrary_value = uuid.uuid4().hex user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user[arbitrary_key] = arbitrary_value del user["id"] ref = PROVIDERS.identity_api.create_user(user) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertNotIn('password', ref) self.assertNotIn('extra', ref) user['name'] = uuid.uuid4().hex user['password'] = uuid.uuid4().hex ref = PROVIDERS.identity_api.update_user(ref['id'], user) self.assertNotIn('password', ref) self.assertNotIn('password', ref['extra']) self.assertEqual(arbitrary_value, ref[arbitrary_key]) self.assertEqual(arbitrary_value, ref['extra'][arbitrary_key]) def test_sql_user_to_dict_null_default_project_id(self): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) user = PROVIDERS.identity_api.create_user(user) with sql.session_for_read() as session: query = session.query(identity_sql.User) query = query.filter_by(id=user['id']) raw_user_ref = query.one() self.assertIsNone(raw_user_ref.default_project_id) user_ref = raw_user_ref.to_dict() self.assertNotIn('default_project_id', user_ref) session.close() def test_list_domains_for_user(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.new_user_ref(domain_id=domain['id']) test_domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(test_domain1['id'], test_domain1) test_domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(test_domain2['id'], test_domain2) user = PROVIDERS.identity_api.create_user(user) user_domains = PROVIDERS.assignment_api.list_domains_for_user( user['id'] ) self.assertEqual(0, len(user_domains)) PROVIDERS.assignment_api.create_grant( user_id=user['id'], domain_id=test_domain1['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user['id'], domain_id=test_domain2['id'], role_id=self.role_member['id'], ) user_domains = PROVIDERS.assignment_api.list_domains_for_user( user['id'] ) self.assertThat(user_domains, matchers.HasLength(2)) def test_list_domains_for_user_with_grants(self): # Create two groups each with a role on a different domain, and # make user1 a member of both groups. Both these new domains # should now be included, along with any direct user grants. domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.new_user_ref(domain_id=domain['id']) user = PROVIDERS.identity_api.create_user(user) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain['id']) group2 = PROVIDERS.identity_api.create_group(group2) test_domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(test_domain1['id'], test_domain1) test_domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(test_domain2['id'], test_domain2) test_domain3 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(test_domain3['id'], test_domain3) PROVIDERS.identity_api.add_user_to_group(user['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user['id'], group2['id']) # Create 3 grants, one user grant, the other two as group grants PROVIDERS.assignment_api.create_grant( user_id=user['id'], domain_id=test_domain1['id'], role_id=self.role_member['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group1['id'], domain_id=test_domain2['id'], role_id=self.role_admin['id'], ) PROVIDERS.assignment_api.create_grant( group_id=group2['id'], domain_id=test_domain3['id'], role_id=self.role_admin['id'], ) user_domains = PROVIDERS.assignment_api.list_domains_for_user( user['id'] ) self.assertThat(user_domains, matchers.HasLength(3)) def test_list_domains_for_user_with_inherited_grants(self): """Test that inherited roles on the domain are excluded. Test Plan: - Create two domains, one user, group and role - Domain1 is given an inherited user role, Domain2 an inherited group role (for a group of which the user is a member) - When listing domains for user, neither domain should be returned """ domain1 = unit.new_domain_ref() domain1 = PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() domain2 = PROVIDERS.resource_api.create_domain(domain2['id'], domain2) user = unit.new_user_ref(domain_id=domain1['id']) user = PROVIDERS.identity_api.create_user(user) group = unit.new_group_ref(domain_id=domain1['id']) group = PROVIDERS.identity_api.create_group(group) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # Create a grant on each domain, one user grant, one group grant, # both inherited. PROVIDERS.assignment_api.create_grant( user_id=user['id'], domain_id=domain1['id'], role_id=role['id'], inherited_to_projects=True, ) PROVIDERS.assignment_api.create_grant( group_id=group['id'], domain_id=domain2['id'], role_id=role['id'], inherited_to_projects=True, ) user_domains = PROVIDERS.assignment_api.list_domains_for_user( user['id'] ) # No domains should be returned since both domains have only inherited # roles assignments. self.assertThat(user_domains, matchers.HasLength(0)) def test_list_groups_for_user(self): domain = self._get_domain_fixture() test_groups = [] test_users = [] GROUP_COUNT = 3 USER_COUNT = 2 for x in range(0, USER_COUNT): new_user = unit.new_user_ref(domain_id=domain['id']) new_user = PROVIDERS.identity_api.create_user(new_user) test_users.append(new_user) positive_user = test_users[0] negative_user = test_users[1] for x in range(0, USER_COUNT): group_refs = PROVIDERS.identity_api.list_groups_for_user( test_users[x]['id'] ) self.assertEqual(0, len(group_refs)) for x in range(0, GROUP_COUNT): before_count = x after_count = x + 1 new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) test_groups.append(new_group) # add the user to the group and ensure that the # group count increases by one for each group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(before_count, len(group_refs)) PROVIDERS.identity_api.add_user_to_group( positive_user['id'], new_group['id'] ) group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(after_count, len(group_refs)) # Make sure the group count for the unrelated user did not change group_refs = PROVIDERS.identity_api.list_groups_for_user( negative_user['id'] ) self.assertEqual(0, len(group_refs)) # remove the user from each group and ensure that # the group count reduces by one for each for x in range(0, 3): before_count = GROUP_COUNT - x after_count = GROUP_COUNT - x - 1 group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(before_count, len(group_refs)) PROVIDERS.identity_api.remove_user_from_group( positive_user['id'], test_groups[x]['id'] ) group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(after_count, len(group_refs)) # Make sure the group count for the unrelated user # did not change group_refs = PROVIDERS.identity_api.list_groups_for_user( negative_user['id'] ) self.assertEqual(0, len(group_refs)) def test_add_user_to_group_expiring_mapped(self): self._build_fed_resource() domain = self._get_domain_fixture() self.config_fixture.config( group='federation', default_authorization_ttl=5 ) time = timeutils.utcnow() tick = datetime.timedelta(minutes=5) new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) fed_dict = unit.new_federated_user_ref() fed_dict['id'] = fed_dict['unique_id'] fed_dict['name'] = fed_dict['display_name'] fed_dict['domain'] = {'id': uuid.uuid4().hex} fed_dict['idp_id'] = 'myidp' fed_dict['protocol_id'] = 'mapped' with freezegun.freeze_time(time - tick) as frozen_time: user = PROVIDERS.identity_api.shadow_federated_user( fed_dict['idp_id'], fed_dict['protocol_id'], fed_dict, group_ids=[new_group['id']], ) PROVIDERS.identity_api.check_user_in_group( user['id'], new_group['id'] ) # Expiration frozen_time.tick(tick) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.check_user_in_group, user['id'], new_group['id'], ) # Renewal PROVIDERS.identity_api.shadow_federated_user( fed_dict['idp_id'], fed_dict['protocol_id'], fed_dict, group_ids=[new_group['id']], ) PROVIDERS.identity_api.check_user_in_group( user['id'], new_group['id'] ) def test_add_user_to_group_expiring(self): self._build_fed_resource() domain = self._get_domain_fixture() time = timeutils.utcnow() tick = datetime.timedelta(minutes=5) new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) fed_dict = unit.new_federated_user_ref() fed_dict['idp_id'] = 'myidp' fed_dict['protocol_id'] = 'mapped' new_user = PROVIDERS.shadow_users_api.create_federated_user( domain['id'], fed_dict ) with freezegun.freeze_time(time - tick) as frozen_time: PROVIDERS.shadow_users_api.add_user_to_group_expires( new_user['id'], new_group['id'] ) self.config_fixture.config( group='federation', default_authorization_ttl=0 ) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.check_user_in_group, new_user['id'], new_group['id'], ) self.config_fixture.config( group='federation', default_authorization_ttl=5 ) PROVIDERS.identity_api.check_user_in_group( new_user['id'], new_group['id'] ) # Expiration frozen_time.tick(tick) self.assertRaises( exception.NotFound, PROVIDERS.identity_api.check_user_in_group, new_user['id'], new_group['id'], ) # Renewal PROVIDERS.shadow_users_api.add_user_to_group_expires( new_user['id'], new_group['id'] ) PROVIDERS.identity_api.check_user_in_group( new_user['id'], new_group['id'] ) def test_add_user_to_group_expiring_list(self): self._build_fed_resource() domain = self._get_domain_fixture() self.config_fixture.config( group='federation', default_authorization_ttl=5 ) time = timeutils.utcnow() tick = datetime.timedelta(minutes=5) new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) exp_new_group = unit.new_group_ref(domain_id=domain['id']) exp_new_group = PROVIDERS.identity_api.create_group(exp_new_group) fed_dict = unit.new_federated_user_ref() fed_dict['idp_id'] = 'myidp' fed_dict['protocol_id'] = 'mapped' new_user = PROVIDERS.shadow_users_api.create_federated_user( domain['id'], fed_dict ) PROVIDERS.identity_api.add_user_to_group( new_user['id'], new_group['id'] ) PROVIDERS.identity_api.check_user_in_group( new_user['id'], new_group['id'] ) with freezegun.freeze_time(time - tick) as frozen_time: PROVIDERS.shadow_users_api.add_user_to_group_expires( new_user['id'], exp_new_group['id'] ) PROVIDERS.identity_api.check_user_in_group( new_user['id'], new_group['id'] ) groups = PROVIDERS.identity_api.list_groups_for_user( new_user['id'] ) self.assertEqual(len(groups), 2) for group in groups: if group.get('membership_expires_at'): self.assertEqual(group['membership_expires_at'], time) frozen_time.tick(tick) groups = PROVIDERS.identity_api.list_groups_for_user( new_user['id'] ) self.assertEqual(len(groups), 1) def test_storing_null_domain_id_in_project_ref(self): """Test the special storage of domain_id=None in sql resource driver. The resource driver uses a special value in place of None for domain_id in the project record. This shouldn't escape the driver. Hence we test the interface to ensure that you can store a domain_id of None, and that any special value used inside the driver does not escape through the interface. """ spoiler_project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project( spoiler_project['id'], spoiler_project ) # First let's create a project with a None domain_id and make sure we # can read it back. project = unit.new_project_ref(domain_id=None, is_domain=True) project = PROVIDERS.resource_api.create_project(project['id'], project) ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, ref) # Can we get it by name? ref = PROVIDERS.resource_api.get_project_by_name(project['name'], None) self.assertDictEqual(project, ref) # Can we filter for them - create a second domain to ensure we are # testing the receipt of more than one. project2 = unit.new_project_ref(domain_id=None, is_domain=True) project2 = PROVIDERS.resource_api.create_project( project2['id'], project2 ) hints = driver_hints.Hints() hints.add_filter('domain_id', None) refs = PROVIDERS.resource_api.list_projects(hints) self.assertThat(refs, matchers.HasLength(2 + self.domain_count)) self.assertIn(project, refs) self.assertIn(project2, refs) # Can we update it? project['name'] = uuid.uuid4().hex PROVIDERS.resource_api.update_project(project['id'], project) ref = PROVIDERS.resource_api.get_project(project['id']) self.assertDictEqual(project, ref) # Finally, make sure we can delete it project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) PROVIDERS.resource_api.delete_project(project['id']) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project['id'], ) def test_hidden_project_domain_root_is_really_hidden(self): """Ensure we cannot access the hidden root of all project domains. Calling any of the driver methods should result in the same as would be returned if we passed a project that does not exist. We don't test create_project, since we do not allow a caller of our API to specify their own ID for a new entity. """ def _exercise_project_api(ref_id): driver = PROVIDERS.resource_api.driver self.assertRaises( exception.ProjectNotFound, driver.get_project, ref_id ) self.assertRaises( exception.ProjectNotFound, driver.get_project_by_name, resource.NULL_DOMAIN_ID, ref_id, ) project_ids = [ x['id'] for x in driver.list_projects(driver_hints.Hints()) ] self.assertNotIn(ref_id, project_ids) projects = driver.list_projects_from_ids([ref_id]) self.assertThat(projects, matchers.HasLength(0)) project_ids = [ x for x in driver.list_project_ids_from_domain_ids([ref_id]) ] self.assertNotIn(ref_id, project_ids) self.assertRaises( exception.DomainNotFound, driver.list_projects_in_domain, ref_id, ) project_ids = [ x['id'] for x in driver.list_projects_acting_as_domain( driver_hints.Hints() ) ] self.assertNotIn(ref_id, project_ids) projects = driver.list_projects_in_subtree(ref_id) self.assertThat(projects, matchers.HasLength(0)) self.assertRaises( exception.ProjectNotFound, driver.list_project_parents, ref_id ) # A non-existing project just returns True from the driver self.assertTrue(driver.is_leaf_project(ref_id)) self.assertRaises( exception.ProjectNotFound, driver.update_project, ref_id, {} ) self.assertRaises( exception.ProjectNotFound, driver.delete_project, ref_id ) # Deleting list of projects that includes a non-existing project # should be silent. The root domain <> can't # be deleted. if ref_id != resource.NULL_DOMAIN_ID: driver.delete_projects_from_ids([ref_id]) _exercise_project_api(uuid.uuid4().hex) _exercise_project_api(resource.NULL_DOMAIN_ID) def test_list_users_call_count(self): """There should not be O(N) queries.""" # create 10 users. 10 is just a random number for i in range(10): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(user) # sqlalchemy emits various events and allows to listen to them. Here # bound method `query_counter` will be called each time when a query # is compiled class CallCounter: def __init__(self): self.calls = 0 def reset(self): self.calls = 0 def query_counter(self, query): self.calls += 1 counter = CallCounter() sqlalchemy.event.listen( sqlalchemy.orm.query.Query, 'before_compile', counter.query_counter ) first_call_users = PROVIDERS.identity_api.list_users() first_call_counter = counter.calls # add 10 more users for i in range(10): user = unit.new_user_ref(domain_id=CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(user) counter.reset() second_call_users = PROVIDERS.identity_api.list_users() # ensure that the number of calls does not depend on the number of # users fetched. self.assertNotEqual(len(first_call_users), len(second_call_users)) self.assertEqual(first_call_counter, counter.calls) self.assertEqual(3, counter.calls) def test_check_project_depth(self): # Create a 3 level project tree: # # default_domain # | # project_1 # | # project_2 project_1 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project_1['id'], project_1) project_2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id, parent_id=project_1['id'], ) PROVIDERS.resource_api.create_project(project_2['id'], project_2) # if max_depth is None or >= current project depth, return nothing. resp = PROVIDERS.resource_api.check_project_depth(max_depth=None) self.assertIsNone(resp) resp = PROVIDERS.resource_api.check_project_depth(max_depth=3) self.assertIsNone(resp) resp = PROVIDERS.resource_api.check_project_depth(max_depth=4) self.assertIsNone(resp) # if max_depth < current project depth, raise LimitTreeExceedError self.assertRaises( exception.LimitTreeExceedError, PROVIDERS.resource_api.check_project_depth, 2, ) def test_update_user_with_stale_data_forces_retry(self): # Capture log output so we know oslo.db attempted a retry log_fixture = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) # Create a new user user_dict = unit.new_user_ref( domain_id=CONF.identity.default_domain_id ) new_user_dict = PROVIDERS.identity_api.create_user(user_dict) side_effects = [ # Raise a StaleDataError simulating that another client has # updated the user's password while this client's request was # being processed sqlalchemy.orm.exc.StaleDataError, # The oslo.db library will retry the request, so the second # time this method is called let's return a valid session # object sql.session_for_write(), ] with mock.patch('keystone.common.sql.session_for_write') as m: m.side_effect = side_effects # Update a user's attribute, the first attempt will fail but # oslo.db will handle the exception and retry, the second attempt # will succeed new_user_dict['email'] = uuid.uuid4().hex PROVIDERS.identity_api.update_user( new_user_dict['id'], new_user_dict ) # Make sure oslo.db retried the update by checking the log output expected_log_message = ( 'Performing DB retry for function keystone.identity.backends.' 'sql.Identity.update_user' ) self.assertIn(expected_log_message, log_fixture.output) class SqlTrust(SqlTests, trust_tests.TrustTests): def test_trust_expires_at_int_matches_expires_at(self): with sql.session_for_write() as session: new_id = uuid.uuid4().hex self.create_sample_trust(new_id) trust_ref = session.get(trust_sql.TrustModel, new_id) self.assertIsNotNone(trust_ref._expires_at) self.assertEqual(trust_ref._expires_at, trust_ref.expires_at_int) self.assertEqual(trust_ref.expires_at, trust_ref.expires_at_int) class SqlCatalog(SqlTests, catalog_tests.CatalogTests): _legacy_endpoint_id_in_endpoint = True _enabled_default_to_true_when_creating_endpoint = True def test_get_v3_catalog_project_non_exist(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) malformed_url = "http://192.168.1.104:8774/v2/$(project)s" endpoint = unit.new_endpoint_ref( service_id=service['id'], url=malformed_url, region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint.copy()) self.assertRaises( exception.ProjectNotFound, PROVIDERS.catalog_api.get_v3_catalog, 'fake-user', 'fake-project', ) def test_get_v3_catalog_with_empty_public_url(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref( url='', service_id=service['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint.copy()) catalog = PROVIDERS.catalog_api.get_v3_catalog( self.user_foo['id'], self.project_bar['id'] ) catalog_endpoint = catalog[0] self.assertEqual(service['name'], catalog_endpoint['name']) self.assertEqual(service['id'], catalog_endpoint['id']) self.assertEqual([], catalog_endpoint['endpoints']) def test_create_endpoint_region_returns_not_found(self): service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) endpoint = unit.new_endpoint_ref( region_id=uuid.uuid4().hex, service_id=service['id'] ) self.assertRaises( exception.ValidationError, PROVIDERS.catalog_api.create_endpoint, endpoint['id'], endpoint.copy(), ) def test_create_region_invalid_id(self): region = unit.new_region_ref(id='0' * 256) self.assertRaises( exception.StringLengthExceeded, PROVIDERS.catalog_api.create_region, region, ) def test_create_region_invalid_parent_id(self): region = unit.new_region_ref(parent_region_id='0' * 256) self.assertRaises( exception.RegionNotFound, PROVIDERS.catalog_api.create_region, region, ) def test_delete_region_with_endpoint(self): # create a region region = unit.new_region_ref() PROVIDERS.catalog_api.create_region(region) # create a child region child_region = unit.new_region_ref(parent_region_id=region['id']) PROVIDERS.catalog_api.create_region(child_region) # create a service service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(service['id'], service) # create an endpoint attached to the service and child region child_endpoint = unit.new_endpoint_ref( region_id=child_region['id'], service_id=service['id'] ) PROVIDERS.catalog_api.create_endpoint( child_endpoint['id'], child_endpoint ) self.assertRaises( exception.RegionDeletionError, PROVIDERS.catalog_api.delete_region, child_region['id'], ) # create an endpoint attached to the service and parent region endpoint = unit.new_endpoint_ref( region_id=region['id'], service_id=service['id'] ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) self.assertRaises( exception.RegionDeletionError, PROVIDERS.catalog_api.delete_region, region['id'], ) def test_v3_catalog_domain_scoped_token(self): # test the case that project_id is None. srv_1 = unit.new_service_ref() PROVIDERS.catalog_api.create_service(srv_1['id'], srv_1) endpoint_1 = unit.new_endpoint_ref( service_id=srv_1['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint_1['id'], endpoint_1) srv_2 = unit.new_service_ref() PROVIDERS.catalog_api.create_service(srv_2['id'], srv_2) endpoint_2 = unit.new_endpoint_ref( service_id=srv_2['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint_2['id'], endpoint_2) self.config_fixture.config( group='endpoint_filter', return_all_endpoints_if_no_filter=True ) catalog_ref = PROVIDERS.catalog_api.get_v3_catalog( uuid.uuid4().hex, None ) self.assertThat(catalog_ref, matchers.HasLength(2)) self.config_fixture.config( group='endpoint_filter', return_all_endpoints_if_no_filter=False ) catalog_ref = PROVIDERS.catalog_api.get_v3_catalog( uuid.uuid4().hex, None ) self.assertThat(catalog_ref, matchers.HasLength(0)) def test_v3_catalog_endpoint_filter_enabled(self): srv_1 = unit.new_service_ref() PROVIDERS.catalog_api.create_service(srv_1['id'], srv_1) endpoint_1 = unit.new_endpoint_ref( service_id=srv_1['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint_1['id'], endpoint_1) endpoint_2 = unit.new_endpoint_ref( service_id=srv_1['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint_2['id'], endpoint_2) # create endpoint-project association. PROVIDERS.catalog_api.add_endpoint_to_project( endpoint_1['id'], self.project_bar['id'] ) catalog_ref = PROVIDERS.catalog_api.get_v3_catalog( uuid.uuid4().hex, self.project_bar['id'] ) self.assertThat(catalog_ref, matchers.HasLength(1)) self.assertThat(catalog_ref[0]['endpoints'], matchers.HasLength(1)) # the endpoint is that defined in the endpoint-project association. self.assertEqual( endpoint_1['id'], catalog_ref[0]['endpoints'][0]['id'] ) def test_v3_catalog_endpoint_filter_disabled(self): # there is no endpoint-project association defined. self.config_fixture.config( group='endpoint_filter', return_all_endpoints_if_no_filter=True ) srv_1 = unit.new_service_ref() PROVIDERS.catalog_api.create_service(srv_1['id'], srv_1) endpoint_1 = unit.new_endpoint_ref( service_id=srv_1['id'], region_id=None ) PROVIDERS.catalog_api.create_endpoint(endpoint_1['id'], endpoint_1) srv_2 = unit.new_service_ref() PROVIDERS.catalog_api.create_service(srv_2['id'], srv_2) catalog_ref = PROVIDERS.catalog_api.get_v3_catalog( uuid.uuid4().hex, self.project_bar['id'] ) self.assertThat(catalog_ref, matchers.HasLength(2)) srv_id_list = [catalog_ref[0]['id'], catalog_ref[1]['id']] self.assertCountEqual([srv_1['id'], srv_2['id']], srv_id_list) class SqlPolicy(SqlTests, policy_tests.PolicyTests): pass class SqlInheritance(SqlTests, assignment_tests.InheritanceTests): pass class SqlImpliedRoles(SqlTests, assignment_tests.ImpliedRoleTests): pass class SqlFilterTests(SqlTests, identity_tests.FilterTests): def clean_up_entities(self): """Clean up entity test data from Filter Test Cases.""" for entity in ['user', 'group', 'project']: self._delete_test_data(entity, self.entity_list[entity]) self._delete_test_data(entity, self.domain1_entity_list[entity]) del self.entity_list del self.domain1_entity_list self.domain1['enabled'] = False PROVIDERS.resource_api.update_domain(self.domain1['id'], self.domain1) PROVIDERS.resource_api.delete_domain(self.domain1['id']) del self.domain1 def test_list_entities_filtered_by_domain(self): # NOTE(henry-nash): This method is here rather than in # unit.identity.test_backends since any domain filtering with LDAP is # handled by the manager layer (and is already tested elsewhere) not at # the driver level. self.addCleanup(self.clean_up_entities) self.domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domain1['id'], self.domain1) self.entity_list = {} self.domain1_entity_list = {} for entity in ['user', 'group', 'project']: # Create 5 entities, 3 of which are in domain1 DOMAIN1_ENTITIES = 3 self.entity_list[entity] = self._create_test_data(entity, 2) self.domain1_entity_list[entity] = self._create_test_data( entity, DOMAIN1_ENTITIES, self.domain1['id'] ) # Should get back the DOMAIN1_ENTITIES in domain1 hints = driver_hints.Hints() hints.add_filter('domain_id', self.domain1['id']) entities = self._list_entities(entity)(hints=hints) self.assertEqual(DOMAIN1_ENTITIES, len(entities)) self._match_with_list(entities, self.domain1_entity_list[entity]) # Check the driver has removed the filter from the list hints self.assertFalse(hints.get_exact_filter_by_name('domain_id')) def test_filter_sql_injection_attack(self): """Test against sql injection attack on filters. Test Plan: - Attempt to get all entities back by passing a two-term attribute - Attempt to piggyback filter to damage DB (e.g. drop table) """ # Check we have some users users = PROVIDERS.identity_api.list_users() self.assertGreater(len(users), 0) hints = driver_hints.Hints() hints.add_filter('name', "anything' or 'x'='x") users = PROVIDERS.identity_api.list_users(hints=hints) self.assertEqual(0, len(users)) # See if we can add a SQL command...use the group table instead of the # user table since 'user' is reserved word for SQLAlchemy. group = unit.new_group_ref(domain_id=CONF.identity.default_domain_id) group = PROVIDERS.identity_api.create_group(group) hints = driver_hints.Hints() hints.add_filter('name', "x'; drop table group") groups = PROVIDERS.identity_api.list_groups(hints=hints) self.assertEqual(0, len(groups)) groups = PROVIDERS.identity_api.list_groups() self.assertGreater(len(groups), 0) class SqlLimitTests(SqlTests, identity_tests.LimitTests): def setUp(self): super().setUp() identity_tests.LimitTests.setUp(self) class FakeTable(sql.ModelBase): __tablename__ = 'test_table' col = sql.Column(sql.String(32), primary_key=True) @sql.handle_conflicts('keystone') def insert(self): raise db_exception.DBDuplicateEntry @sql.handle_conflicts('keystone') def update(self): raise db_exception.DBError( inner_exception=exc.IntegrityError('a', 'a', 'a') ) @sql.handle_conflicts('keystone') def lookup(self): raise KeyError class SqlDecorators(unit.TestCase): def test_initialization_fail(self): self.assertRaises( exception.StringLengthExceeded, FakeTable, col='a' * 64 ) def test_initialization(self): tt = FakeTable(col='a') self.assertEqual('a', tt.col) def test_conflict_happend(self): self.assertRaises(exception.Conflict, FakeTable().insert) self.assertRaises(exception.UnexpectedError, FakeTable().update) def test_not_conflict_error(self): self.assertRaises(KeyError, FakeTable().lookup) class SqlModuleInitialization(unit.TestCase): @mock.patch.object(sql.core, 'CONF') @mock.patch.object(options, 'set_defaults') def test_initialize_module(self, set_defaults, CONF): sql.initialize() set_defaults.assert_called_with( CONF, connection='sqlite:///keystone.db' ) class SqlCredential(SqlTests): def _create_credential_with_user_id(self, user_id=uuid.uuid4().hex): credential = unit.new_credential_ref( user_id=user_id, extra=uuid.uuid4().hex, type=uuid.uuid4().hex ) PROVIDERS.credential_api.create_credential( credential['id'], credential ) return credential def _validateCredentialList( self, retrieved_credentials, expected_credentials ): self.assertEqual(len(expected_credentials), len(retrieved_credentials)) retrived_ids = [c['id'] for c in retrieved_credentials] for cred in expected_credentials: self.assertIn(cred['id'], retrived_ids) def setUp(self): self.useFixture(database.Database()) super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_provider.MAX_ACTIVE_KEYS, ) ) self.credentials = [] for _ in range(3): self.credentials.append(self._create_credential_with_user_id()) self.user_credentials = [] for _ in range(3): cred = self._create_credential_with_user_id(self.user_foo['id']) self.user_credentials.append(cred) self.credentials.append(cred) def test_list_credentials(self): credentials = PROVIDERS.credential_api.list_credentials() self._validateCredentialList(credentials, self.credentials) # test filtering using hints hints = driver_hints.Hints() hints.add_filter('user_id', self.user_foo['id']) credentials = PROVIDERS.credential_api.list_credentials(hints) self._validateCredentialList(credentials, self.user_credentials) def test_list_credentials_for_user(self): credentials = PROVIDERS.credential_api.list_credentials_for_user( self.user_foo['id'] ) self._validateCredentialList(credentials, self.user_credentials) def test_list_credentials_for_user_and_type(self): cred = self.user_credentials[0] credentials = PROVIDERS.credential_api.list_credentials_for_user( self.user_foo['id'], type=cred['type'] ) self._validateCredentialList(credentials, [cred]) def test_create_credential_is_encrypted_when_stored(self): credential = unit.new_credential_ref(user_id=uuid.uuid4().hex) credential_id = credential['id'] returned_credential = PROVIDERS.credential_api.create_credential( credential_id, credential ) # Make sure the `blob` is *not* encrypted when returned from the # credential API. self.assertEqual(returned_credential['blob'], credential['blob']) credential_from_backend = ( PROVIDERS.credential_api.driver.get_credential(credential_id) ) # Pull the credential directly from the backend, the `blob` should be # encrypted. self.assertNotEqual( credential_from_backend['encrypted_blob'], credential['blob'] ) def test_list_credentials_is_decrypted(self): credential = unit.new_credential_ref(user_id=uuid.uuid4().hex) credential_id = credential['id'] created_credential = PROVIDERS.credential_api.create_credential( credential_id, credential ) # Pull the credential directly from the backend, the `blob` should be # encrypted. credential_from_backend = ( PROVIDERS.credential_api.driver.get_credential(credential_id) ) self.assertNotEqual( credential_from_backend['encrypted_blob'], credential['blob'] ) # Make sure the `blob` values listed from the API are not encrypted. listed_credentials = PROVIDERS.credential_api.list_credentials() self.assertIn(created_credential, listed_credentials) class SqlRegisteredLimit(SqlTests, limit_tests.RegisteredLimitTests): def setUp(self): super().setUp() fixtures_to_cleanup = [] for service in default_fixtures.SERVICES: service_id = service['id'] rv = PROVIDERS.catalog_api.create_service(service_id, service) attrname = service['extra']['name'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) for region in default_fixtures.REGIONS: rv = PROVIDERS.catalog_api.create_region(region) attrname = region['id'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup)) class SqlLimit(SqlTests, limit_tests.LimitTests): def setUp(self): super().setUp() fixtures_to_cleanup = [] for service in default_fixtures.SERVICES: service_id = service['id'] rv = PROVIDERS.catalog_api.create_service(service_id, service) attrname = service['extra']['name'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) for region in default_fixtures.REGIONS: rv = PROVIDERS.catalog_api.create_region(region) attrname = region['id'] setattr(self, attrname, rv) fixtures_to_cleanup.append(attrname) self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup)) registered_limit_1 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_one['id'], resource_name='volume', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_2 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='snapshot', default_limit=10, id=uuid.uuid4().hex, ) registered_limit_3 = unit.new_registered_limit_ref( service_id=self.service_one['id'], region_id=self.region_two['id'], resource_name='backup', default_limit=10, id=uuid.uuid4().hex, ) PROVIDERS.unified_limit_api.create_registered_limits( [registered_limit_1, registered_limit_2, registered_limit_3] ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_backend_templated.py0000664000175000017500000003655500000000000024522 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from keystone.catalog.backends import base as catalog_base from keystone.common import provider_api from keystone.tests import unit from keystone.tests.unit.catalog import test_backends as catalog_tests from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database PROVIDERS = provider_api.ProviderAPIs BROKEN_WRITE_FUNCTIONALITY_MSG = ( "Templated backend doesn't correctly implement write operations" ) class TestTemplatedCatalog(unit.TestCase, catalog_tests.CatalogTests): DEFAULT_FIXTURE = { 'RegionOne': { 'compute': { 'adminURL': 'http://localhost:8774/v1.1/bar', 'publicURL': 'http://localhost:8774/v1.1/bar', 'internalURL': 'http://localhost:8774/v1.1/bar', 'name': "'Compute Service'", 'id': '2', }, 'identity': { 'adminURL': 'http://localhost:35357/v3', 'publicURL': 'http://localhost:5000/v3', 'internalURL': 'http://localhost:35357/v3', 'name': "'Identity Service'", 'id': '1', }, } } def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() self.load_fixtures(default_fixtures) def config_overrides(self): super().config_overrides() self.config_fixture.config( group='catalog', driver='templated', template_file=unit.dirs.tests('default_catalog.templates'), ) def test_get_catalog(self): catalog_ref = PROVIDERS.catalog_api.get_catalog('foo', 'bar') self.assertDictEqual(self.DEFAULT_FIXTURE, catalog_ref) # NOTE(lbragstad): This test is skipped because the catalog is being # modified within the test and not through the API. @unit.skip_if_cache_is_enabled('catalog') def test_catalog_ignored_malformed_urls(self): # both endpoints are in the catalog catalog_ref = PROVIDERS.catalog_api.get_catalog('foo', 'bar') self.assertEqual(2, len(catalog_ref['RegionOne'])) region = PROVIDERS.catalog_api.driver.templates['RegionOne'] region['compute']['adminURL'] = 'http://localhost:8774/v1.1/$(tenant)s' # the malformed one has been removed catalog_ref = PROVIDERS.catalog_api.get_catalog('foo', 'bar') self.assertEqual(1, len(catalog_ref['RegionOne'])) def test_get_v3_catalog_endpoint_disabled(self): self.skip_test_overrides( "Templated backend doesn't have disabled endpoints" ) def assert_catalogs_equal(self, expected, observed): def sort_key(d): return d['id'] for e, o in zip( sorted(expected, key=sort_key), sorted(observed, key=sort_key) ): expected_endpoints = e.pop('endpoints') observed_endpoints = o.pop('endpoints') self.assertDictEqual(e, o) self.assertCountEqual(expected_endpoints, observed_endpoints) def test_get_v3_catalog(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex catalog_ref = PROVIDERS.catalog_api.get_v3_catalog(user_id, project_id) exp_catalog = [ { 'endpoints': [ { 'interface': 'admin', 'region': 'RegionOne', 'url': 'http://localhost:8774/v1.1/%s' % project_id, }, { 'interface': 'public', 'region': 'RegionOne', 'url': 'http://localhost:8774/v1.1/%s' % project_id, }, { 'interface': 'internal', 'region': 'RegionOne', 'url': 'http://localhost:8774/v1.1/%s' % project_id, }, ], 'type': 'compute', 'name': "'Compute Service'", 'id': '2', }, { 'endpoints': [ { 'interface': 'admin', 'region': 'RegionOne', 'url': 'http://localhost:35357/v3', }, { 'interface': 'public', 'region': 'RegionOne', 'url': 'http://localhost:5000/v3', }, { 'interface': 'internal', 'region': 'RegionOne', 'url': 'http://localhost:35357/v3', }, ], 'type': 'identity', 'name': "'Identity Service'", 'id': '1', }, ] self.assert_catalogs_equal(exp_catalog, catalog_ref) def test_get_multi_region_v3_catalog(self): user_id = uuid.uuid4().hex project_id = uuid.uuid4().hex catalog_api = PROVIDERS.catalog_api # Load the multi-region catalog. catalog_api._load_templates( unit.dirs.tests('default_catalog_multi_region.templates') ) catalog_ref = catalog_api.get_v3_catalog(user_id, project_id) exp_catalog = [ { 'endpoints': [ { 'interface': 'admin', 'region': 'RegionOne', 'url': 'http://region-one:8774/v1.1/%s' % project_id, }, { 'interface': 'public', 'region': 'RegionOne', 'url': 'http://region-one:8774/v1.1/%s' % project_id, }, { 'interface': 'internal', 'region': 'RegionOne', 'url': 'http://region-one:8774/v1.1/%s' % project_id, }, { 'interface': 'admin', 'region': 'RegionTwo', 'url': 'http://region-two:8774/v1.1/%s' % project_id, }, { 'interface': 'public', 'region': 'RegionTwo', 'url': 'http://region-two:8774/v1.1/%s' % project_id, }, { 'interface': 'internal', 'region': 'RegionTwo', 'url': 'http://region-two:8774/v1.1/%s' % project_id, }, ], 'type': 'compute', 'name': "'Compute Service'", 'id': '2', }, { 'endpoints': [ { 'interface': 'admin', 'region': 'RegionOne', 'url': 'http://region-one:35357/v3', }, { 'interface': 'public', 'region': 'RegionOne', 'url': 'http://region-one:5000/v3', }, { 'interface': 'internal', 'region': 'RegionOne', 'url': 'http://region-one:35357/v3', }, { 'interface': 'admin', 'region': 'RegionTwo', 'url': 'http://region-two:35357/v3', }, { 'interface': 'public', 'region': 'RegionTwo', 'url': 'http://region-two:5000/v3', }, { 'interface': 'internal', 'region': 'RegionTwo', 'url': 'http://region-two:35357/v3', }, ], 'type': 'identity', 'name': "'Identity Service'", 'id': '1', }, ] self.assert_catalogs_equal(exp_catalog, catalog_ref) def test_get_catalog_ignores_endpoints_with_invalid_urls(self): user_id = uuid.uuid4().hex project_id = None # If the URL has no 'project_id' to substitute, we will skip the # endpoint which contains this kind of URL. catalog_ref = PROVIDERS.catalog_api.get_v3_catalog(user_id, project_id) exp_catalog = [ { 'endpoints': [], 'type': 'compute', 'name': "'Compute Service'", 'id': '2', }, { 'endpoints': [ { 'interface': 'admin', 'region': 'RegionOne', 'url': 'http://localhost:35357/v3', }, { 'interface': 'public', 'region': 'RegionOne', 'url': 'http://localhost:5000/v3', }, { 'interface': 'internal', 'region': 'RegionOne', 'url': 'http://localhost:35357/v3', }, ], 'type': 'identity', 'name': "'Identity Service'", 'id': '1', }, ] self.assert_catalogs_equal(exp_catalog, catalog_ref) def test_list_regions_filtered_by_parent_region_id(self): self.skip_test_overrides('Templated backend does not support hints') def test_service_filtering(self): self.skip_test_overrides("Templated backend doesn't support filtering") def test_list_services_with_hints(self): hints = {} services = PROVIDERS.catalog_api.list_services(hints=hints) exp_services = [ { 'type': 'compute', 'description': '', 'enabled': True, 'name': "'Compute Service'", 'id': 'compute', }, { 'type': 'identity', 'description': '', 'enabled': True, 'name': "'Identity Service'", 'id': 'identity', }, ] self.assertCountEqual(exp_services, services) # NOTE(dstanek): the following methods have been overridden # from unit.catalog.test_backends.CatalogTests. def test_region_crud(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_region_crud(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_region(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_update_region_extras(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_region_with_duplicate_id(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_delete_region_returns_not_found(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_region_invalid_parent_region_returns_not_found(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_avoid_creating_circular_references_in_regions_update(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) @mock.patch.object( catalog_base.CatalogDriverBase, "_ensure_no_circle_in_hierarchical_regions", ) def test_circular_regions_can_be_deleted(self, mock_ensure_on_circle): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_service_crud(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_cache_layer_service_crud(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_service(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_delete_service_with_endpoint(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_cache_layer_delete_service_with_endpoint(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_delete_service_returns_not_found(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_update_endpoint_nonexistent_service(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_endpoint_nonexistent_region(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_update_endpoint_nonexistent_region(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_get_endpoint_returns_not_found(self): self.skip_test_overrides( "Templated backend doesn't use IDs for endpoints." ) def test_delete_endpoint_returns_not_found(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_create_endpoint(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_update_endpoint(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_list_endpoints(self): expected_urls = { 'http://localhost:5000/v3', 'http://localhost:35357/v3', 'http://localhost:8774/v1.1/$(tenant_id)s', } endpoints = PROVIDERS.catalog_api.list_endpoints() self.assertEqual(expected_urls, {e['url'] for e in endpoints}) @unit.skip_if_cache_disabled('catalog') def test_invalidate_cache_when_updating_endpoint(self): self.skip_test_overrides(BROKEN_WRITE_FUNCTIONALITY_MSG) def test_delete_endpoint_group_association_by_project(self): # Deleting endpoint group association is not supported by the templated # driver, but it should be silent about it and not raise an error. PROVIDERS.catalog_api.delete_endpoint_group_association_by_project( uuid.uuid4().hex ) def test_delete_association_by_endpoint(self): # Deleting endpoint association is not supported by the templated # driver, but it should be silent about it and not raise an error. PROVIDERS.catalog_api.delete_association_by_endpoint(uuid.uuid4().hex) def test_delete_association_by_project(self): # Deleting endpoint association is not supported by the templated # driver, but it should be silent about it and not raise an error. PROVIDERS.catalog_api.delete_association_by_project(uuid.uuid4().hex) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_cli.py0000664000175000017500000026142000000000000021632 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import configparser import copy import datetime import http.client import logging import os from unittest import mock import uuid import fixtures import freezegun import oslo_config.fixture from oslo_log import log from oslo_serialization import jsonutils from oslo_upgradecheck import upgradecheck from oslo_utils import timeutils from testtools import matchers from keystone.cmd import cli from keystone.cmd.doctor import caching from keystone.cmd.doctor import credential from keystone.cmd.doctor import database as doc_database from keystone.cmd.doctor import debug from keystone.cmd.doctor import federation from keystone.cmd.doctor import ldap from keystone.cmd.doctor import security_compliance from keystone.cmd.doctor import tokens from keystone.cmd.doctor import tokens_fernet from keystone.cmd import status from keystone.common import provider_api from keystone.common.sql import upgrades import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.identity.mapping_backends import mapping as identity_mapping from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit.ksfixtures import ldapdb from keystone.tests.unit.ksfixtures import policy from keystone.tests.unit.ksfixtures import temporaryfile from keystone.tests.unit import mapping_fixtures CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class CliLoggingTestCase(unit.BaseTestCase): def setUp(self): self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) self.config_fixture.register_cli_opt(cli.command_opt) self.useFixture( fixtures.MockPatch( 'oslo_config.cfg.find_config_files', return_value=[] ) ) fd = self.useFixture(temporaryfile.SecureTempFile()) self.fake_config_file = fd.file_name super().setUp() # NOTE(crinkle): the command call doesn't have to actually work, # that's what the other unit tests are for. So just mock it out. class FakeConfCommand: def __init__(self): self.cmd_class = mock.Mock() self.useFixture( fixtures.MockPatchObject(CONF, 'command', FakeConfCommand()) ) self.logging = self.useFixture(fixtures.FakeLogger(level=log.WARN)) def test_absent_config_logs_warning(self): expected_msg = 'Config file not found, using default configs.' cli.main(argv=['keystone-manage', 'db_sync']) self.assertThat(self.logging.output, matchers.Contains(expected_msg)) def test_present_config_does_not_log_warning(self): fake_argv = [ 'keystone-manage', '--config-file', self.fake_config_file, 'doctor', ] cli.main(argv=fake_argv) expected_msg = 'Config file not found, using default configs.' self.assertNotIn(expected_msg, self.logging.output) class CliBootStrapTestCase(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): self.useFixture(database.Database()) super().setUp() self.bootstrap = cli.BootStrap() def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def config(self, config_files): CONF( args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex], project='keystone', default_config_files=config_files, ) def test_bootstrap(self): self._do_test_bootstrap(self.bootstrap) def _do_test_bootstrap(self, bootstrap): try: PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN, ) except exception.Conflict: pass bootstrap.do_bootstrap() project = PROVIDERS.resource_api.get_project_by_name( bootstrap.project_name, 'default' ) user = PROVIDERS.identity_api.get_user_by_name( bootstrap.username, 'default' ) admin_role = PROVIDERS.role_api.get_role(bootstrap.role_id) manager_role = PROVIDERS.role_api.get_role(bootstrap.manager_role_id) member_role = PROVIDERS.role_api.get_role(bootstrap.member_role_id) reader_role = PROVIDERS.role_api.get_role(bootstrap.reader_role_id) service_role = PROVIDERS.role_api.get_role(bootstrap.service_role_id) role_list = PROVIDERS.assignment_api.get_roles_for_user_and_project( user['id'], project['id'] ) role_list_len = 5 if bootstrap.bootstrapper.project_name: role_list_len = 4 self.assertIs(role_list_len, len(role_list)) self.assertIn(admin_role['id'], role_list) self.assertIn(manager_role['id'], role_list) self.assertIn(member_role['id'], role_list) self.assertIn(reader_role['id'], role_list) if not bootstrap.bootstrapper.project_name: self.assertIn(service_role['id'], role_list) system_roles = PROVIDERS.assignment_api.list_system_grants_for_user( user['id'] ) self.assertIs(1, len(system_roles)) self.assertEqual(system_roles[0]['id'], admin_role['id']) # NOTE(morganfainberg): Pass an empty context, it isn't used by # `authenticate` method. with self.make_request(): PROVIDERS.identity_api.authenticate(user['id'], bootstrap.password) if bootstrap.region_id: region = PROVIDERS.catalog_api.get_region(bootstrap.region_id) self.assertEqual(self.region_id, region['id']) if bootstrap.service_id: svc = PROVIDERS.catalog_api.get_service(bootstrap.service_id) self.assertEqual(self.service_name, svc['name']) self.assertEqual( {'admin', 'public', 'internal'}, set(bootstrap.endpoints) ) urls = { 'public': self.public_url, 'internal': self.internal_url, 'admin': self.admin_url, } for interface, url in urls.items(): endpoint_id = bootstrap.endpoints[interface] endpoint = PROVIDERS.catalog_api.get_endpoint(endpoint_id) self.assertEqual(self.region_id, endpoint['region_id']) self.assertEqual(url, endpoint['url']) self.assertEqual(svc['id'], endpoint['service_id']) self.assertEqual(interface, endpoint['interface']) def test_bootstrap_is_idempotent_when_password_does_not_change(self): # NOTE(morganfainberg): Ensure we can run bootstrap with the same # configuration multiple times without erroring. self._do_test_bootstrap(self.bootstrap) app = self.loadapp() v3_password_data = { 'auth': { 'identity': { "methods": ["password"], "password": { "user": { "name": self.bootstrap.username, "password": self.bootstrap.password, "domain": {"id": CONF.identity.default_domain_id}, } }, } } } with app.test_client() as c: auth_response = c.post('/v3/auth/tokens', json=v3_password_data) token = auth_response.headers['X-Subject-Token'] self._do_test_bootstrap(self.bootstrap) # build validation request with app.test_client() as c: # Get a new X-Auth-Token r = c.post('/v3/auth/tokens', json=v3_password_data) # Validate the old token with our new X-Auth-Token. c.get( '/v3/auth/tokens', headers={ 'X-Auth-Token': r.headers['X-Subject-Token'], 'X-Subject-Token': token, }, ) admin_role = PROVIDERS.role_api.get_role(self.bootstrap.role_id) reader_role = PROVIDERS.role_api.get_role( self.bootstrap.reader_role_id ) member_role = PROVIDERS.role_api.get_role( self.bootstrap.member_role_id ) self.assertEqual(admin_role['options'], {'immutable': True}) self.assertEqual(member_role['options'], {'immutable': True}) self.assertEqual(reader_role['options'], {'immutable': True}) def test_bootstrap_is_not_idempotent_when_password_does_change(self): # NOTE(lbragstad): Ensure bootstrap isn't idempotent when run with # different arguments or configuration values. self._do_test_bootstrap(self.bootstrap) app = self.loadapp() v3_password_data = { 'auth': { 'identity': { "methods": ["password"], "password": { "user": { "name": self.bootstrap.username, "password": self.bootstrap.password, "domain": {"id": CONF.identity.default_domain_id}, } }, } } } time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_time: with app.test_client() as c: auth_response = c.post( '/v3/auth/tokens', json=v3_password_data ) token = auth_response.headers['X-Subject-Token'] new_passwd = uuid.uuid4().hex os.environ['OS_BOOTSTRAP_PASSWORD'] = new_passwd self._do_test_bootstrap(self.bootstrap) v3_password_data['auth']['identity']['password']['user'][ 'password' ] = new_passwd # Move time forward a second to avoid rev. event capturing the new # auth-token since we're within a single second (possibly) for the # test case. frozen_time.tick(delta=datetime.timedelta(seconds=1)) # Validate the old token with app.test_client() as c: # Get a new X-Auth-Token r = c.post('/v3/auth/tokens', json=v3_password_data) # Since the user account was recovered with a different # password, we shouldn't be able to validate this token. # Bootstrap should have persisted a revocation event because # the user's password was updated. Since this token was # obtained using the original password, it should now be # invalid. c.get( '/v3/auth/tokens', headers={ 'X-Auth-Token': r.headers['X-Subject-Token'], 'X-Subject-Token': token, }, expected_status_code=http.client.NOT_FOUND, ) def test_bootstrap_recovers_user(self): self._do_test_bootstrap(self.bootstrap) # Completely lock the user out. user_id = PROVIDERS.identity_api.get_user_by_name( self.bootstrap.username, 'default' )['id'] PROVIDERS.identity_api.update_user( user_id, {'enabled': False, 'password': uuid.uuid4().hex} ) # The second bootstrap run will recover the account. self._do_test_bootstrap(self.bootstrap) # Sanity check that the original password works again. with self.make_request(): PROVIDERS.identity_api.authenticate( user_id, self.bootstrap.password ) def test_bootstrap_with_explicit_immutable_roles(self): CONF( args=[ 'bootstrap', '--bootstrap-password', uuid.uuid4().hex, '--immutable-roles', ], project='keystone', ) self._do_test_bootstrap(self.bootstrap) admin_role = PROVIDERS.role_api.get_role(self.bootstrap.role_id) reader_role = PROVIDERS.role_api.get_role( self.bootstrap.reader_role_id ) member_role = PROVIDERS.role_api.get_role( self.bootstrap.member_role_id ) self.assertTrue(admin_role['options']['immutable']) self.assertTrue(member_role['options']['immutable']) self.assertTrue(reader_role['options']['immutable']) def test_bootstrap_with_default_immutable_roles(self): CONF( args=['bootstrap', '--bootstrap-password', uuid.uuid4().hex], project='keystone', ) self._do_test_bootstrap(self.bootstrap) admin_role = PROVIDERS.role_api.get_role(self.bootstrap.role_id) reader_role = PROVIDERS.role_api.get_role( self.bootstrap.reader_role_id ) member_role = PROVIDERS.role_api.get_role( self.bootstrap.member_role_id ) self.assertTrue(admin_role['options']['immutable']) self.assertTrue(member_role['options']['immutable']) self.assertTrue(reader_role['options']['immutable']) def test_bootstrap_with_no_immutable_roles(self): CONF( args=[ 'bootstrap', '--bootstrap-password', uuid.uuid4().hex, '--no-immutable-roles', ], project='keystone', ) self._do_test_bootstrap(self.bootstrap) admin_role = PROVIDERS.role_api.get_role(self.bootstrap.role_id) reader_role = PROVIDERS.role_api.get_role( self.bootstrap.reader_role_id ) member_role = PROVIDERS.role_api.get_role( self.bootstrap.member_role_id ) self.assertNotIn('immutable', admin_role['options']) self.assertNotIn('immutable', member_role['options']) self.assertNotIn('immutable', reader_role['options']) def test_bootstrap_with_ambiguous_role_names(self): # bootstrap system to create the default admin role self._do_test_bootstrap(self.bootstrap) # create a domain-specific roles that share the same names as the # default roles created by keystone-manage bootstrap domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} domain = PROVIDERS.resource_api.create_domain(domain['id'], domain) domain_roles = {} for name in ['admin', 'member', 'reader', 'service']: domain_role = { 'domain_id': domain['id'], 'id': uuid.uuid4().hex, 'name': name, } domain_roles[name] = PROVIDERS.role_api.create_role( domain_role['id'], domain_role ) # ensure subsequent bootstrap attempts don't fail because of # ambiguity self._do_test_bootstrap(self.bootstrap) class CliBootStrapTestCaseWithEnvironment(CliBootStrapTestCase): def config(self, config_files): CONF( args=['bootstrap'], project='keystone', default_config_files=config_files, ) def setUp(self): super().setUp() self.password = uuid.uuid4().hex self.username = uuid.uuid4().hex self.project_name = uuid.uuid4().hex self.role_name = uuid.uuid4().hex self.service_name = uuid.uuid4().hex self.public_url = uuid.uuid4().hex self.internal_url = uuid.uuid4().hex self.admin_url = uuid.uuid4().hex self.region_id = uuid.uuid4().hex self.default_domain = { 'id': CONF.identity.default_domain_id, 'name': 'Default', } self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_PASSWORD', newvalue=self.password ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_USERNAME', newvalue=self.username ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_PROJECT_NAME', newvalue=self.project_name ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_ROLE_NAME', newvalue=self.role_name ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_SERVICE_NAME', newvalue=self.service_name ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_PUBLIC_URL', newvalue=self.public_url ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_INTERNAL_URL', newvalue=self.internal_url ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_ADMIN_URL', newvalue=self.admin_url ) ) self.useFixture( fixtures.EnvironmentVariable( 'OS_BOOTSTRAP_REGION_ID', newvalue=self.region_id ) ) PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) def test_assignment_created_with_user_exists(self): # test assignment can be created if user already exists. PROVIDERS.resource_api.create_domain( self.default_domain['id'], self.default_domain ) user_ref = unit.new_user_ref( self.default_domain['id'], name=self.username, password=self.password, ) PROVIDERS.identity_api.create_user(user_ref) self._do_test_bootstrap(self.bootstrap) def test_assignment_created_with_project_exists(self): # test assignment can be created if project already exists. PROVIDERS.resource_api.create_domain( self.default_domain['id'], self.default_domain ) project_ref = unit.new_project_ref( self.default_domain['id'], name=self.project_name ) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) self._do_test_bootstrap(self.bootstrap) def test_assignment_created_with_role_exists(self): # test assignment can be created if role already exists. PROVIDERS.resource_api.create_domain( self.default_domain['id'], self.default_domain ) role = unit.new_role_ref(name=self.role_name) PROVIDERS.role_api.create_role(role['id'], role) self._do_test_bootstrap(self.bootstrap) def test_assignment_created_with_region_exists(self): # test assignment can be created if region already exists. PROVIDERS.resource_api.create_domain( self.default_domain['id'], self.default_domain ) region = unit.new_region_ref(id=self.region_id) PROVIDERS.catalog_api.create_region(region) self._do_test_bootstrap(self.bootstrap) def test_endpoints_created_with_service_exists(self): # test assignment can be created if service already exists. PROVIDERS.resource_api.create_domain( self.default_domain['id'], self.default_domain ) service = unit.new_service_ref(name=self.service_name) PROVIDERS.catalog_api.create_service(service['id'], service) self._do_test_bootstrap(self.bootstrap) def test_endpoints_created_with_endpoint_exists(self): # test assignment can be created if endpoint already exists. PROVIDERS.resource_api.create_domain( self.default_domain['id'], self.default_domain ) service = unit.new_service_ref(name=self.service_name) PROVIDERS.catalog_api.create_service(service['id'], service) region = unit.new_region_ref(id=self.region_id) PROVIDERS.catalog_api.create_region(region) endpoint = unit.new_endpoint_ref( interface='public', service_id=service['id'], url=self.public_url, region_id=self.region_id, ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) self._do_test_bootstrap(self.bootstrap) def test_endpoints_created_with_new_endpoints(self): service = unit.new_service_ref(name=self.service_name, type='identity') PROVIDERS.catalog_api.create_service(service['id'], service) region = unit.new_region_ref(id=self.region_id) PROVIDERS.catalog_api.create_region(region) endpoint = unit.new_endpoint_ref( interface='public', service_id=service['id'], url=uuid.uuid4().hex, region_id=self.region_id, ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) self._do_test_bootstrap(self.bootstrap) updated_endpoint = PROVIDERS.catalog_api.get_endpoint(endpoint['id']) self.assertEqual(updated_endpoint['url'], self.bootstrap.public_url) class CliDomainConfigAllTestCase(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): self.useFixture(database.Database()) super().setUp() self.load_backends() self.config_fixture.config( group='identity', domain_config_dir=unit.TESTCONF + '/domain_configs_multi_ldap', ) self.domain_count = 3 self.setup_initial_domains() self.logging = self.useFixture(fixtures.FakeLogger(level=logging.INFO)) def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def cleanup_domains(self): for domain in self.domains: if domain == 'domain_default': # Not allowed to delete the default domain, but should at least # delete any domain-specific config for it. PROVIDERS.domain_config_api.delete_config( CONF.identity.default_domain_id ) continue this_domain = self.domains[domain] this_domain['enabled'] = False PROVIDERS.resource_api.update_domain( this_domain['id'], this_domain ) PROVIDERS.resource_api.delete_domain(this_domain['id']) self.domains = {} def config(self, config_files): CONF( args=['domain_config_upload', '--all'], project='keystone', default_config_files=config_files, ) def setup_initial_domains(self): def create_domain(domain): return PROVIDERS.resource_api.create_domain(domain['id'], domain) PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) self.domains = {} self.addCleanup(self.cleanup_domains) for x in range(1, self.domain_count): domain = 'domain%s' % x self.domains[domain] = create_domain( {'id': uuid.uuid4().hex, 'name': domain} ) self.default_domain = unit.new_domain_ref( description='The default domain', id=CONF.identity.default_domain_id, name='Default', ) self.domains['domain_default'] = create_domain(self.default_domain) def test_config_upload(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': { 'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com', }, 'identity': {'driver': 'ldap'}, } domain1_config = { 'ldap': { 'url': 'fake://memory1', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com', }, 'identity': {'driver': 'ldap', 'list_limit': '101'}, } domain2_config = { 'ldap': { 'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=myroot,cn=com', 'group_tree_dn': 'ou=UserGroups,dc=myroot,dc=org', 'user_tree_dn': 'ou=Users,dc=myroot,dc=org', }, 'identity': {'driver': 'ldap'}, } # Clear backend dependencies, since cli loads these manually provider_api.ProviderAPIs._clear_registry_instances() cli.DomainConfigUpload.main() res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( CONF.identity.default_domain_id ) self.assertEqual(default_config, res) res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domains['domain1']['id'] ) self.assertEqual(domain1_config, res) res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domains['domain2']['id'] ) self.assertEqual(domain2_config, res) class CliDomainConfigSingleDomainTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF( args=['domain_config_upload', '--domain-name', 'Default'], project='keystone', default_config_files=config_files, ) def test_config_upload(self): # The values below are the same as in the domain_configs_multi_ldap # directory of test config_files. default_config = { 'ldap': { 'url': 'fake://memory', 'user': 'cn=Admin', 'password': 'password', 'suffix': 'cn=example,cn=com', }, 'identity': {'driver': 'ldap'}, } # Clear backend dependencies, since cli loads these manually provider_api.ProviderAPIs._clear_registry_instances() cli.DomainConfigUpload.main() res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( CONF.identity.default_domain_id ) self.assertEqual(default_config, res) res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domains['domain1']['id'] ) self.assertEqual({}, res) res = PROVIDERS.domain_config_api.get_config_with_sensitive_info( self.domains['domain2']['id'] ) self.assertEqual({}, res) def test_no_overwrite_config(self): # Create a config for the default domain default_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': 'ldap'}, } PROVIDERS.domain_config_api.create_config( CONF.identity.default_domain_id, default_config ) # Now try and upload the settings in the configuration file for the # default domain provider_api.ProviderAPIs._clear_registry_instances() with mock.patch('builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) file_name = 'keystone.%s.conf' % self.default_domain['name'] error_msg = _( 'Domain: %(domain)s already has a configuration defined - ' 'ignoring file: %(file)s.' ) % { 'domain': self.default_domain['name'], 'file': os.path.join( CONF.identity.domain_config_dir, file_name ), } mock_print.assert_has_calls([mock.call(error_msg)]) res = PROVIDERS.domain_config_api.get_config( CONF.identity.default_domain_id ) # The initial config should not have been overwritten self.assertEqual(default_config, res) class CliDomainConfigNoOptionsTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF( args=['domain_config_upload'], project='keystone', default_config_files=config_files, ) def test_config_upload(self): provider_api.ProviderAPIs._clear_registry_instances() with mock.patch('builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) mock_print.assert_has_calls( [ mock.call( _( 'At least one option must be provided, use either ' '--all or --domain-name' ) ) ] ) class CliDomainConfigTooManyOptionsTestCase(CliDomainConfigAllTestCase): def config(self, config_files): CONF( args=['domain_config_upload', '--all', '--domain-name', 'Default'], project='keystone', default_config_files=config_files, ) def test_config_upload(self): provider_api.ProviderAPIs._clear_registry_instances() with mock.patch('builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) mock_print.assert_has_calls( [ mock.call( _( 'The --all option cannot be used with ' 'the --domain-name option' ) ) ] ) class CliDomainConfigInvalidDomainTestCase(CliDomainConfigAllTestCase): def config(self, config_files): self.invalid_domain_name = uuid.uuid4().hex CONF( args=[ 'domain_config_upload', '--domain-name', self.invalid_domain_name, ], project='keystone', default_config_files=config_files, ) def test_config_upload(self): provider_api.ProviderAPIs._clear_registry_instances() with mock.patch('builtins.print') as mock_print: self.assertRaises(unit.UnexpectedExit, cli.DomainConfigUpload.main) file_name = 'keystone.%s.conf' % self.invalid_domain_name error_msg = _( 'Invalid domain name: %(domain)s found in config file name: ' '%(file)s - ignoring this file.' ) % { 'domain': self.invalid_domain_name, 'file': os.path.join( CONF.identity.domain_config_dir, file_name ), } mock_print.assert_has_calls([mock.call(error_msg)]) class TestDomainConfigFinder(unit.BaseTestCase): def setUp(self): super().setUp() self.logging = self.useFixture(fixtures.LoggerFixture()) @mock.patch('os.walk') def test_finder_ignores_files(self, mock_walk): mock_walk.return_value = [ ['.', [], ['file.txt', 'keystone.conf', 'keystone.domain0.conf']], ] domain_configs = list(cli._domain_config_finder('.')) expected_domain_configs = [('./keystone.domain0.conf', 'domain0')] self.assertThat( domain_configs, matchers.Equals(expected_domain_configs) ) expected_msg_template = ( 'Ignoring file (%s) while scanning domain config directory' ) self.assertThat( self.logging.output, matchers.Contains(expected_msg_template % 'file.txt'), ) self.assertThat( self.logging.output, matchers.Contains(expected_msg_template % 'keystone.conf'), ) class CliDBSyncTestCase(unit.BaseTestCase): class FakeConfCommand: def __init__(self, parent): self.extension = False self.check = parent.command_check self.expand = parent.command_expand self.migrate = parent.command_migrate self.contract = parent.command_contract self.version = None def setUp(self): super().setUp() self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) self.config_fixture.register_cli_opt(cli.command_opt) self.patchers = patchers = [ mock.patch.object(upgrades, "offline_sync_database_to_version"), mock.patch.object(upgrades, "expand_schema"), mock.patch.object(upgrades, "migrate_data"), mock.patch.object(upgrades, "contract_schema"), ] for p in patchers: p.start() self.command_check = False self.command_expand = False self.command_migrate = False self.command_contract = False def tearDown(self): for p in self.patchers: p.stop() super().tearDown() def _assert_correct_call(self, mocked_function): for func in [ upgrades.offline_sync_database_to_version, upgrades.expand_schema, upgrades.migrate_data, upgrades.contract_schema, ]: if func == mocked_function: self.assertTrue(func.called) else: self.assertFalse(func.called) def test_db_sync(self): self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) cli.DbSync.main() self._assert_correct_call(upgrades.offline_sync_database_to_version) def test_db_sync_expand(self): self.command_expand = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) cli.DbSync.main() self._assert_correct_call(upgrades.expand_schema) def test_db_sync_migrate(self): self.command_migrate = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) cli.DbSync.main() self._assert_correct_call(upgrades.migrate_data) def test_db_sync_contract(self): self.command_contract = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) cli.DbSync.main() self._assert_correct_call(upgrades.contract_schema) class TestMappingPopulate(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): sqldb = self.useFixture(database.Database()) super().setUp() self.ldapdb = self.useFixture(ldapdb.LDAPDatabase()) self.ldapdb.clear() self.load_backends() sqldb.recreate() self.load_fixtures(default_fixtures) def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) def config(self, config_files): CONF( args=['mapping_populate', '--domain-name', 'Default'], project='keystone', default_config_files=config_files, ) def test_mapping_populate(self): # mapping_populate should create id mappings. Test plan: # 0. Purge mappings # 1. Fetch user list directly via backend. It will not create any # mappings because it bypasses identity manager # 2. Verify that users have no public_id yet # 3. Execute mapping_populate. It should create id mappings # 4. For the same users verify that they have public_id now purge_filter = {} PROVIDERS.id_mapping_api.purge_mappings(purge_filter) hints = None users = PROVIDERS.identity_api.driver.list_users(hints) for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER, } self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity) ) # backends are loaded again in the command handler provider_api.ProviderAPIs._clear_registry_instances() cli.MappingPopulate.main() for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER, } self.assertIsNotNone( PROVIDERS.id_mapping_api.get_public_id(local_entity) ) def test_bad_domain_name(self): CONF( args=['mapping_populate', '--domain-name', uuid.uuid4().hex], project='keystone', ) # backends are loaded again in the command handler provider_api.ProviderAPIs._clear_registry_instances() # NOTE: assertEqual is used on purpose. assertFalse passes with None. self.assertEqual(False, cli.MappingPopulate.main()) class CliDomainConfigUploadNothing(unit.BaseTestCase): def setUp(self): super().setUp() config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) config_fixture.register_cli_opt(cli.command_opt) # NOTE(dstanek): since this is not testing any database # functionality there is no need to go through the motions and # setup a test database. def fake_load_backends(self): self.resource_manager = mock.Mock() self.useFixture( fixtures.MockPatchObject( cli.DomainConfigUploadFiles, 'load_backends', fake_load_backends, ) ) tempdir = self.useFixture(fixtures.TempDir()) config_fixture.config(group='identity', domain_config_dir=tempdir.path) self.logging = self.useFixture( fixtures.FakeLogger(level=logging.DEBUG) ) def test_uploading_all_from_an_empty_directory(self): CONF( args=['domain_config_upload', '--all'], project='keystone', default_config_files=[], ) cli.DomainConfigUpload.main() expected_msg = ( 'No domain configs uploaded from %r' % CONF.identity.domain_config_dir ) self.assertThat(self.logging.output, matchers.Contains(expected_msg)) class CachingDoctorTests(unit.TestCase): def test_symptom_caching_disabled(self): # Symptom Detected: Caching disabled self.config_fixture.config(group='cache', enabled=False) self.assertTrue(caching.symptom_caching_disabled()) # No Symptom Detected: Caching is enabled self.config_fixture.config(group='cache', enabled=True) self.assertFalse(caching.symptom_caching_disabled()) def test_caching_symptom_caching_enabled_without_a_backend(self): # Success Case: Caching enabled and backend configured self.config_fixture.config(group='cache', enabled=True) self.config_fixture.config(group='cache', backend='dogpile.cache.null') self.assertTrue(caching.symptom_caching_enabled_without_a_backend()) # Failure Case 1: Caching disabled and backend not configured self.config_fixture.config(group='cache', enabled=False) self.config_fixture.config(group='cache', backend='dogpile.cache.null') self.assertFalse(caching.symptom_caching_enabled_without_a_backend()) # Failure Case 2: Caching disabled and backend configured self.config_fixture.config(group='cache', enabled=False) self.config_fixture.config( group='cache', backend='dogpile.cache.memory' ) self.assertFalse(caching.symptom_caching_enabled_without_a_backend()) # Failure Case 3: Caching enabled and backend configured self.config_fixture.config(group='cache', enabled=True) self.config_fixture.config( group='cache', backend='dogpile.cache.memory' ) self.assertFalse(caching.symptom_caching_enabled_without_a_backend()) @mock.patch('keystone.cmd.doctor.caching.cache.CACHE_REGION') def test_symptom_connection_to_memcached(self, cache_mock): self.config_fixture.config(group='cache', enabled=True) self.config_fixture.config( group='cache', memcache_servers=['alpha.com:11211', 'beta.com:11211'], ) self.config_fixture.config( group='cache', backend='dogpile.cache.memcached' ) # No symptom detected: Caching driver can connect to both memcached # servers cache_mock.actual_backend.client.get_stats.return_value = [ ('alpha.com', {}), ('beta.com', {}), ] self.assertFalse(caching.symptom_connection_to_memcached()) # Symptom detected: Caching driver can't connect to either memcached # server cache_mock.actual_backend.client.get_stats.return_value = [] self.assertTrue(caching.symptom_connection_to_memcached()) # Symptom detected: Caching driver can't connect to one memcached # server cache_mock.actual_backend.client.get_stats.return_value = [ ('alpha.com', {}) ] self.assertTrue(caching.symptom_connection_to_memcached()) self.config_fixture.config( group='cache', memcache_servers=['alpha.com:11211', 'beta.com:11211'], ) self.config_fixture.config( group='cache', backend='oslo_cache.memcache_pool' ) # No symptom detected: Caching driver can connect to both memcached # servers cache_mock.actual_backend.client.get_stats.return_value = [ ('alpha.com', {}), ('beta.com', {}), ] self.assertFalse(caching.symptom_connection_to_memcached()) # Symptom detected: Caching driver can't connect to either memcached # server cache_mock.actual_backend.client.get_stats.return_value = [] self.assertTrue(caching.symptom_connection_to_memcached()) # Symptom detected: Caching driver can't connect to one memcached # server cache_mock.actual_backend.client.get_stats.return_value = [ ('alpha.com', {}) ] self.assertTrue(caching.symptom_connection_to_memcached()) class CredentialDoctorTests(unit.TestCase): def test_credential_and_fernet_key_repositories_match(self): # Symptom Detected: Key repository paths are not unique directory = self.useFixture(fixtures.TempDir()).path self.config_fixture.config( group='credential', key_repository=directory ) self.config_fixture.config( group='fernet_tokens', key_repository=directory ) self.assertTrue(credential.symptom_unique_key_repositories()) def test_credential_and_fernet_key_repositories_are_unique(self): # No Symptom Detected: Key repository paths are unique self.config_fixture.config( group='credential', key_repository='/etc/keystone/cred-repo' ) self.config_fixture.config( group='fernet_tokens', key_repository='/etc/keystone/fernet-repo' ) self.assertFalse(credential.symptom_unique_key_repositories()) @mock.patch('keystone.cmd.doctor.credential.utils') def test_usability_of_cred_fernet_key_repo_raised(self, mock_utils): # Symptom Detected: credential fernet key repository is world readable self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = False self.assertTrue( credential.symptom_usability_of_credential_fernet_key_repository() ) @mock.patch('keystone.cmd.doctor.credential.utils') def test_usability_of_cred_fernet_key_repo_not_raised(self, mock_utils): # No Symptom Detected: Custom driver is used self.config_fixture.config(group='credential', provider='my-driver') mock_utils.FernetUtils().validate_key_repository.return_value = True self.assertFalse( credential.symptom_usability_of_credential_fernet_key_repository() ) # No Symptom Detected: key repository is not world readable self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = True self.assertFalse( credential.symptom_usability_of_credential_fernet_key_repository() ) @mock.patch('keystone.cmd.doctor.credential.utils') def test_keys_in_credential_fernet_key_repository_raised(self, mock_utils): # Symptom Detected: Key repo is empty self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = False self.assertTrue( credential.symptom_keys_in_credential_fernet_key_repository() ) @mock.patch('keystone.cmd.doctor.credential.utils') def test_keys_in_credential_fernet_key_repository_not_raised( self, mock_utils ): # No Symptom Detected: Custom driver is used self.config_fixture.config(group='credential', provider='my-driver') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( credential.symptom_keys_in_credential_fernet_key_repository() ) # No Symptom Detected: Key repo is not empty, fernet is current driver self.config_fixture.config(group='credential', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( credential.symptom_keys_in_credential_fernet_key_repository() ) class DatabaseDoctorTests(unit.TestCase): def test_symptom_is_raised_if_database_connection_is_SQLite(self): # Symptom Detected: Database connection is sqlite self.config_fixture.config( group='database', connection='sqlite:///mydb' ) self.assertTrue( doc_database.symptom_database_connection_is_not_SQLite() ) # No Symptom Detected: Database connection is MySQL self.config_fixture.config( group='database', connection='mysql+mysqlconnector://admin:secret@localhost/mydb', ) self.assertFalse( doc_database.symptom_database_connection_is_not_SQLite() ) class DebugDoctorTests(unit.TestCase): def test_symptom_debug_mode_is_enabled(self): # Symptom Detected: Debug mode is enabled self.config_fixture.config(debug=True) self.assertTrue(debug.symptom_debug_mode_is_enabled()) # No Symptom Detected: Debug mode is disabled self.config_fixture.config(debug=False) self.assertFalse(debug.symptom_debug_mode_is_enabled()) class FederationDoctorTests(unit.TestCase): def test_symptom_comma_in_SAML_public_certificate_path(self): # Symptom Detected: There is a comma in path to public cert file self.config_fixture.config(group='saml', certfile='file,cert.pem') self.assertTrue( federation.symptom_comma_in_SAML_public_certificate_path() ) # No Symptom Detected: There is no comma in the path self.config_fixture.config(group='saml', certfile='signing_cert.pem') self.assertFalse( federation.symptom_comma_in_SAML_public_certificate_path() ) def test_symptom_comma_in_SAML_private_key_file_path(self): # Symptom Detected: There is a comma in path to private key file self.config_fixture.config(group='saml', keyfile='file,key.pem') self.assertTrue( federation.symptom_comma_in_SAML_private_key_file_path() ) # No Symptom Detected: There is no comma in the path self.config_fixture.config(group='saml', keyfile='signing_key.pem') self.assertFalse( federation.symptom_comma_in_SAML_private_key_file_path() ) class LdapDoctorTests(unit.TestCase): def test_user_enabled_emulation_dn_ignored_raised(self): # Symptom when user_enabled_emulation_dn is being ignored because the # user did not enable the user_enabled_emulation self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config( group='ldap', user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com', ) self.assertTrue(ldap.symptom_LDAP_user_enabled_emulation_dn_ignored()) def test_user_enabled_emulation_dn_ignored_not_raised(self): # No symptom when configuration set properly self.config_fixture.config(group='ldap', user_enabled_emulation=True) self.config_fixture.config( group='ldap', user_enabled_emulation_dn='cn=enabled_users,dc=example,dc=com', ) self.assertFalse(ldap.symptom_LDAP_user_enabled_emulation_dn_ignored()) # No symptom when both configurations disabled self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config( group='ldap', user_enabled_emulation_dn=None ) self.assertFalse(ldap.symptom_LDAP_user_enabled_emulation_dn_ignored()) def test_user_enabled_emulation_use_group_config_ignored_raised(self): # Symptom when user enabled emulation isn't enabled but group_config is # enabled self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=True ) self.assertTrue( ldap.symptom_LDAP_user_enabled_emulation_use_group_config_ignored() ) def test_user_enabled_emulation_use_group_config_ignored_not_raised(self): # No symptom when configuration deactivated self.config_fixture.config(group='ldap', user_enabled_emulation=False) self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=False ) self.assertFalse( ldap.symptom_LDAP_user_enabled_emulation_use_group_config_ignored() ) # No symptom when configurations set properly self.config_fixture.config(group='ldap', user_enabled_emulation=True) self.config_fixture.config( group='ldap', user_enabled_emulation_use_group_config=True ) self.assertFalse( ldap.symptom_LDAP_user_enabled_emulation_use_group_config_ignored() ) def test_group_members_are_ids_disabled_raised(self): # Symptom when objectclass is set to posixGroup but members_are_ids are # not enabled self.config_fixture.config( group='ldap', group_objectclass='posixGroup' ) self.config_fixture.config(group='ldap', group_members_are_ids=False) self.assertTrue(ldap.symptom_LDAP_group_members_are_ids_disabled()) def test_group_members_are_ids_disabled_not_raised(self): # No symptom when the configurations are set properly self.config_fixture.config( group='ldap', group_objectclass='posixGroup' ) self.config_fixture.config(group='ldap', group_members_are_ids=True) self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled()) # No symptom when configuration deactivated self.config_fixture.config( group='ldap', group_objectclass='groupOfNames' ) self.config_fixture.config(group='ldap', group_members_are_ids=False) self.assertFalse(ldap.symptom_LDAP_group_members_are_ids_disabled()) @mock.patch('os.listdir') @mock.patch('os.path.isdir') def test_file_based_domain_specific_configs_raised( self, mocked_isdir, mocked_listdir ): self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True ) self.config_fixture.config( group='identity', domain_configurations_from_database=False ) # Symptom if there is no existing directory mocked_isdir.return_value = False self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs()) # Symptom if there is an invalid filename inside the domain directory mocked_isdir.return_value = True mocked_listdir.return_value = ['openstack.domains.conf'] self.assertTrue(ldap.symptom_LDAP_file_based_domain_specific_configs()) @mock.patch('os.listdir') @mock.patch('os.path.isdir') def test_file_based_domain_specific_configs_not_raised( self, mocked_isdir, mocked_listdir ): # No symptom if both configurations deactivated self.config_fixture.config( group='identity', domain_specific_drivers_enabled=False ) self.config_fixture.config( group='identity', domain_configurations_from_database=False ) self.assertFalse( ldap.symptom_LDAP_file_based_domain_specific_configs() ) # No symptom if directory exists with no invalid filenames self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True ) self.config_fixture.config( group='identity', domain_configurations_from_database=False ) mocked_isdir.return_value = True mocked_listdir.return_value = ['keystone.domains.conf'] self.assertFalse( ldap.symptom_LDAP_file_based_domain_specific_configs() ) @mock.patch('os.listdir') @mock.patch('os.path.isdir') @mock.patch('keystone.cmd.doctor.ldap.configparser.ConfigParser') def test_file_based_domain_specific_configs_formatted_correctly_raised( self, mocked_parser, mocked_isdir, mocked_listdir ): symptom = ( 'symptom_LDAP_file_based_domain_specific_configs' '_formatted_correctly' ) # Symptom Detected: Ldap domain specific configuration files are not # formatted correctly self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True ) self.config_fixture.config( group='identity', domain_configurations_from_database=False ) mocked_isdir.return_value = True mocked_listdir.return_value = ['keystone.domains.conf'] mock_instance = mock.MagicMock() mock_instance.read.side_effect = configparser.Error('No Section') mocked_parser.return_value = mock_instance self.assertTrue(getattr(ldap, symptom)()) @mock.patch('os.listdir') @mock.patch('os.path.isdir') def test_file_based_domain_specific_configs_formatted_correctly_not_raised( self, mocked_isdir, mocked_listdir ): symptom = ( 'symptom_LDAP_file_based_domain_specific_configs' '_formatted_correctly' ) # No Symptom Detected: Domain_specific drivers is not enabled self.config_fixture.config( group='identity', domain_specific_drivers_enabled=False ) self.assertFalse(getattr(ldap, symptom)()) # No Symptom Detected: Domain configuration from database is enabled self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True ) self.assertFalse(getattr(ldap, symptom)()) self.config_fixture.config( group='identity', domain_configurations_from_database=True ) self.assertFalse(getattr(ldap, symptom)()) # No Symptom Detected: The directory in domain_config_dir doesn't exist mocked_isdir.return_value = False self.assertFalse(getattr(ldap, symptom)()) # No Symptom Detected: domain specific drivers are enabled, domain # configurations from database are disabled, directory exists, and no # exceptions found. self.config_fixture.config( group='identity', domain_configurations_from_database=False ) mocked_isdir.return_value = True # An empty directory should not raise this symptom self.assertFalse(getattr(ldap, symptom)()) # Test again with a file inside the directory mocked_listdir.return_value = ['keystone.domains.conf'] self.assertFalse(getattr(ldap, symptom)()) class SecurityComplianceDoctorTests(unit.TestCase): def test_minimum_password_age_greater_than_password_expires_days(self): # Symptom Detected: Minimum password age is greater than the password # expires days. Both values are positive integers greater than zero. self.config_fixture.config( group='security_compliance', minimum_password_age=2 ) self.config_fixture.config( group='security_compliance', password_expires_days=1 ) self.assertTrue( security_compliance.symptom_minimum_password_age_greater_than_expires_days() ) def test_minimum_password_age_equal_to_password_expires_days(self): # Symptom Detected: Minimum password age is equal to the password # expires days. Both values are positive integers greater than zero. self.config_fixture.config( group='security_compliance', minimum_password_age=1 ) self.config_fixture.config( group='security_compliance', password_expires_days=1 ) self.assertTrue( security_compliance.symptom_minimum_password_age_greater_than_expires_days() ) def test_minimum_password_age_less_than_password_expires_days(self): # No Symptom Detected: Minimum password age is less than password # expires days. Both values are positive integers greater than zero. self.config_fixture.config( group='security_compliance', minimum_password_age=1 ) self.config_fixture.config( group='security_compliance', password_expires_days=2 ) self.assertFalse( security_compliance.symptom_minimum_password_age_greater_than_expires_days() ) def test_minimum_password_age_and_password_expires_days_deactivated(self): # No Symptom Detected: when minimum_password_age's default value is 0 # and password_expires_days' default value is None self.assertFalse( security_compliance.symptom_minimum_password_age_greater_than_expires_days() ) def test_invalid_password_regular_expression(self): # Symptom Detected: Regular expression is invalid self.config_fixture.config( group='security_compliance', password_regex=r'^^(??=.*\d)$' ) self.assertTrue( security_compliance.symptom_invalid_password_regular_expression() ) def test_valid_password_regular_expression(self): # No Symptom Detected: Regular expression is valid self.config_fixture.config( group='security_compliance', password_regex=r'^(?=.*\d)(?=.*[a-zA-Z]).{7,}$', ) self.assertFalse( security_compliance.symptom_invalid_password_regular_expression() ) def test_password_regular_expression_deactivated(self): # No Symptom Detected: Regular expression deactivated to None self.config_fixture.config( group='security_compliance', password_regex=None ) self.assertFalse( security_compliance.symptom_invalid_password_regular_expression() ) def test_password_regular_expression_description_not_set(self): # Symptom Detected: Regular expression is set but description is not self.config_fixture.config( group='security_compliance', password_regex=r'^(?=.*\d)(?=.*[a-zA-Z]).{7,}$', ) self.config_fixture.config( group='security_compliance', password_regex_description=None ) self.assertTrue( security_compliance.symptom_password_regular_expression_description_not_set() ) def test_password_regular_expression_description_set(self): # No Symptom Detected: Regular expression and description are set desc = '1 letter, 1 digit, and a minimum length of 7 is required' self.config_fixture.config( group='security_compliance', password_regex=r'^(?=.*\d)(?=.*[a-zA-Z]).{7,}$', ) self.config_fixture.config( group='security_compliance', password_regex_description=desc ) self.assertFalse( security_compliance.symptom_password_regular_expression_description_not_set() ) def test_password_regular_expression_description_deactivated(self): # No Symptom Detected: Regular expression and description are # deactivated to None self.config_fixture.config( group='security_compliance', password_regex=None ) self.config_fixture.config( group='security_compliance', password_regex_description=None ) self.assertFalse( security_compliance.symptom_password_regular_expression_description_not_set() ) class TokensDoctorTests(unit.TestCase): def test_unreasonable_max_token_size_raised(self): # Symptom Detected: the max_token_size for fernet is greater than 255 self.config_fixture.config(group='token', provider='fernet') self.config_fixture.config(max_token_size=256) self.assertTrue(tokens.symptom_unreasonable_max_token_size()) def test_unreasonable_max_token_size_not_raised(self): # No Symptom Detected: the max_token_size for uuid is 32 self.config_fixture.config(group='token', provider='uuid') self.config_fixture.config(max_token_size=32) self.assertFalse(tokens.symptom_unreasonable_max_token_size()) # No Symptom Detected: the max_token_size for fernet is 255 or less self.config_fixture.config(group='token', provider='fernet') self.config_fixture.config(max_token_size=255) self.assertFalse(tokens.symptom_unreasonable_max_token_size()) class TokenFernetDoctorTests(unit.TestCase): @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_usability_of_Fernet_key_repository_raised(self, mock_utils): # Symptom Detected: Fernet key repo is world readable self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = False self.assertTrue( tokens_fernet.symptom_usability_of_Fernet_key_repository() ) @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_usability_of_Fernet_key_repository_not_raised(self, mock_utils): # No Symptom Detected: UUID is used instead of fernet self.config_fixture.config(group='token', provider='uuid') mock_utils.FernetUtils().validate_key_repository.return_value = False self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository() ) # No Symptom Detected: configs set properly, key repo is not world # readable but is user readable self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().validate_key_repository.return_value = True self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository() ) @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_keys_in_Fernet_key_repository_raised(self, mock_utils): # Symptom Detected: Fernet key repository is empty self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = False self.assertTrue(tokens_fernet.symptom_keys_in_Fernet_key_repository()) @mock.patch('keystone.cmd.doctor.tokens_fernet.utils') def test_keys_in_Fernet_key_repository_not_raised(self, mock_utils): # No Symptom Detected: UUID is used instead of fernet self.config_fixture.config(group='token', provider='uuid') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository() ) # No Symptom Detected: configs set properly, key repo has been # populated with keys self.config_fixture.config(group='token', provider='fernet') mock_utils.FernetUtils().load_keys.return_value = True self.assertFalse( tokens_fernet.symptom_usability_of_Fernet_key_repository() ) class TestMappingPurge(unit.SQLDriverOverrides, unit.BaseTestCase): class FakeConfCommand: def __init__(self, parent): self.extension = False self.all = parent.command_all self.type = parent.command_type self.domain_name = parent.command_domain_name self.local_id = parent.command_local_id self.public_id = parent.command_public_id def setUp(self): # Set up preset cli options and a parser super().setUp() self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) self.config_fixture.register_cli_opt(cli.command_opt) # For unit tests that should not throw any erorrs, # Use the argument parser to test that the combinations work parser_test = argparse.ArgumentParser() subparsers = parser_test.add_subparsers() self.parser = cli.MappingPurge.add_argument_parser(subparsers) def test_mapping_purge_with_no_arguments_fails(self): # Make sure the logic in main() actually catches no argument error self.command_type = None self.command_all = False self.command_domain_name = None self.command_local_id = None self.command_public_id = None self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) self.assertRaises(ValueError, cli.MappingPurge.main) def test_mapping_purge_with_all_and_other_argument_fails(self): # Make sure the logic in main() actually catches invalid combinations self.command_type = 'user' self.command_all = True self.command_domain_name = None self.command_local_id = None self.command_public_id = None self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) self.assertRaises(ValueError, cli.MappingPurge.main) def test_mapping_purge_with_only_all_passes(self): args = ['--all'] res = self.parser.parse_args(args) self.assertTrue(vars(res)['all']) def test_mapping_purge_with_domain_name_argument_succeeds(self): args = ['--domain-name', uuid.uuid4().hex] self.parser.parse_args(args) def test_mapping_purge_with_public_id_argument_succeeds(self): args = ['--public-id', uuid.uuid4().hex] self.parser.parse_args(args) def test_mapping_purge_with_local_id_argument_succeeds(self): args = ['--local-id', uuid.uuid4().hex] self.parser.parse_args(args) def test_mapping_purge_with_type_argument_succeeds(self): args = ['--type', 'user'] self.parser.parse_args(args) args = ['--type', 'group'] self.parser.parse_args(args) def test_mapping_purge_with_invalid_argument_fails(self): args = ['--invalid-option', 'some value'] self.assertRaises(unit.UnexpectedExit, self.parser.parse_args, args) def test_mapping_purge_with_all_other_combinations_passes(self): args = ['--type', 'user', '--local-id', uuid.uuid4().hex] self.parser.parse_args(args) args.append('--domain-name') args.append('test') self.parser.parse_args(args) args.append('--public-id') args.append(uuid.uuid4().hex) self.parser.parse_args(args) @mock.patch.object(keystone.identity.MappingManager, 'purge_mappings') def test_mapping_purge_type_user(self, purge_mock): # Make sure the logic in main() actually catches no argument error self.command_type = 'user' self.command_all = False self.command_domain_name = None self.command_local_id = uuid.uuid4().hex self.command_public_id = uuid.uuid4().hex self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) def fake_load_backends(): return dict( id_mapping_api=keystone.identity.core.MappingManager, resource_api=None, ) self.useFixture( fixtures.MockPatch( 'keystone.server.backends.load_backends', side_effect=fake_load_backends, ) ) cli.MappingPurge.main() purge_mock.assert_called_with( { 'entity_type': 'user', 'local_id': self.command_local_id, 'public_id': self.command_public_id, } ) class TestUserMappingPurgeFunctional(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): sqldb = self.useFixture(database.Database()) super().setUp() self.ldapdb = self.useFixture(ldapdb.LDAPDatabase()) self.ldapdb.clear() self.load_backends() sqldb.recreate() self.load_fixtures(default_fixtures) def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) def config(self, config_files): CONF( args=['mapping_purge', '--type', 'user'], project='keystone', default_config_files=config_files, ) def test_purge_by_user_type(self): # Grab the list of the users from the backend directly to avoid # populating the public_ids for each user. We do this so we can grab # the local_id of a user before it's overwritten by the public_id. hints = None users = PROVIDERS.identity_api.driver.list_users(hints) # Create a new group in the backend directly. We do this so that we # have control over the local_id, which is `id` here. After creating # the group, let's list them so the id_mapping_api creates the public # id appropriately. group_ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, } PROVIDERS.identity_api.driver.create_group(group_ref['id'], group_ref) PROVIDERS.identity_api.list_groups() # Make sure all users and groups have public ids by querying the # id_mapping_api. for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER, } self.assertIsNotNone( PROVIDERS.id_mapping_api.get_public_id(local_entity) ) group_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': group_ref['id'], 'entity_type': identity_mapping.EntityType.GROUP, } self.assertIsNotNone( PROVIDERS.id_mapping_api.get_public_id(group_entity) ) # Purge all users mappings provider_api.ProviderAPIs._clear_registry_instances() cli.MappingPurge.main() # Check that all the user mappings were purged for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER, } self.assertIsNone( PROVIDERS.id_mapping_api.get_public_id(local_entity) ) # Make sure the group mapping still exists self.assertIsNotNone( PROVIDERS.id_mapping_api.get_public_id(group_entity) ) class TestGroupMappingPurgeFunctional(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): sqldb = self.useFixture(database.Database()) super().setUp() self.ldapdb = self.useFixture(ldapdb.LDAPDatabase()) self.ldapdb.clear() self.load_backends() sqldb.recreate() self.load_fixtures(default_fixtures) def config_files(self): self.config_fixture.register_cli_opt(cli.command_opt) config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_ldap_sql.conf')) return config_files def config_overrides(self): super().config_overrides() self.config_fixture.config(group='identity', driver='ldap') self.config_fixture.config( group='identity_mapping', backward_compatible_ids=False ) def config(self, config_files): CONF( args=['mapping_purge', '--type', 'group'], project='keystone', default_config_files=config_files, ) def test_purge_by_group_type(self): # Grab the list of the users from the backend directly to avoid # populating the public_ids for each user. We do this so we can grab # the local_id of a user before it's overwritten by the public_id. hints = None users = PROVIDERS.identity_api.driver.list_users(hints) # Create a new group in the backend directly. We do this so that we # have control over the local_id, which is `id` here. After creating # the group, let's list them so the id_mapping_api creates the public # id appropriately. group_ref = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'domain_id': CONF.identity.default_domain_id, } PROVIDERS.identity_api.driver.create_group(group_ref['id'], group_ref) PROVIDERS.identity_api.list_groups() # Make sure all users and groups have public ids by querying the # id_mapping_api. for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER, } self.assertIsNotNone( PROVIDERS.id_mapping_api.get_public_id(local_entity) ) group_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': group_ref['id'], 'entity_type': identity_mapping.EntityType.GROUP, } self.assertIsNotNone( PROVIDERS.id_mapping_api.get_public_id(group_entity) ) # Purge group mappings provider_api.ProviderAPIs._clear_registry_instances() cli.MappingPurge.main() # Make sure the group mapping was purged self.assertIsNone(PROVIDERS.id_mapping_api.get_public_id(group_entity)) # Check that all the user mappings still exist for user in users: local_entity = { 'domain_id': CONF.identity.default_domain_id, 'local_id': user['id'], 'entity_type': identity_mapping.EntityType.USER, } self.assertIsNotNone( PROVIDERS.id_mapping_api.get_public_id(local_entity) ) class TestTrustFlush(unit.SQLDriverOverrides, unit.BaseTestCase): class FakeConfCommand: def __init__(self, parent): self.extension = False self.project_id = parent.command_project_id self.trustor_user_id = parent.command_trustor_user_id self.trustee_user_id = parent.command_trustee_user_id self.date = parent.command_date def setUp(self): # Set up preset cli options and a parser super().setUp() self.useFixture(database.Database()) self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) self.config_fixture.register_cli_opt(cli.command_opt) # For unit tests that should not throw any errors, # Use the argument parser to test that the combinations work parser_test = argparse.ArgumentParser() subparsers = parser_test.add_subparsers() self.parser = cli.TrustFlush.add_argument_parser(subparsers) def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def test_trust_flush(self): self.command_project_id = None self.command_trustor_user_id = None self.command_trustee_user_id = None self.command_date = timeutils.utcnow() self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) def fake_load_backends(): return dict(trust_api=keystone.trust.core.Manager()) self.useFixture( fixtures.MockPatch( 'keystone.server.backends.load_backends', side_effect=fake_load_backends, ) ) trust = cli.TrustFlush() trust.main() def test_trust_flush_with_invalid_date(self): self.command_project_id = None self.command_trustor_user_id = None self.command_trustee_user_id = None self.command_date = '4/10/92' self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) def fake_load_backends(): return dict(trust_api=keystone.trust.core.Manager()) self.useFixture( fixtures.MockPatch( 'keystone.server.backends.load_backends', side_effect=fake_load_backends, ) ) # Clear backend dependencies, since cli loads these manually provider_api.ProviderAPIs._clear_registry_instances() trust = cli.TrustFlush() self.assertRaises(ValueError, trust.main) class TestMappingEngineTester(unit.BaseTestCase): class FakeConfCommand: def __init__(self, parent): self.extension = False self.rules = parent.command_rules self.input = parent.command_input self.prefix = parent.command_prefix self.engine_debug = parent.command_engine_debug self.mapping_schema_version = parent.mapping_schema_version def setUp(self): # Set up preset cli options and a parser super().setUp() self.mapping_id = uuid.uuid4().hex self.rules_pathname = None self.rules = None self.assertion_pathname = None self.assertion = None self.logging = self.useFixture(fixtures.LoggerFixture()) self.useFixture(database.Database()) self.config_fixture = self.useFixture(oslo_config.fixture.Config(CONF)) self.config_fixture.register_cli_opt(cli.command_opt) # For unit tests that should not throw any erorrs, # Use the argument parser to test that the combinations work parser_test = argparse.ArgumentParser() subparsers = parser_test.add_subparsers() self.parser = cli.MappingEngineTester.add_argument_parser(subparsers) self.mapping_schema_version = '1.0' def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def test_mapping_engine_tester_with_invalid_rules_file(self): tempfilejson = self.useFixture(temporaryfile.SecureTempFile()) tmpinvalidfile = tempfilejson.file_name # Here the data required for rules should be in JSON format # whereas the file contains text. with open(tmpinvalidfile, 'w') as f: f.write("This is an invalid data") self.command_rules = tmpinvalidfile self.command_input = tmpinvalidfile self.command_prefix = None self.command_engine_debug = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) mapping_engine = cli.MappingEngineTester() self.assertRaises(SystemExit, mapping_engine.main) def test_mapping_engine_tester_with_invalid_input_file(self): tempfilejson = self.useFixture(temporaryfile.SecureTempFile()) tmpfilejsonname = tempfilejson.file_name updated_mapping = copy.deepcopy(mapping_fixtures.MAPPING_SMALL) with open(tmpfilejsonname, 'w') as f: f.write(jsonutils.dumps(updated_mapping)) self.command_rules = tmpfilejsonname # Here invalid.csv does not exist self.command_input = "invalid.csv" self.command_prefix = None self.command_engine_debug = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) mapping_engine = cli.MappingEngineTester() self.assertRaises(SystemExit, mapping_engine.main) def test_mapping_engine_tester(self): tempfilejson = self.useFixture(temporaryfile.SecureTempFile()) tmpfilejsonname = tempfilejson.file_name updated_mapping = copy.deepcopy(mapping_fixtures.MAPPING_SMALL) with open(tmpfilejsonname, 'w') as f: f.write(jsonutils.dumps(updated_mapping)) self.command_rules = tmpfilejsonname tempfile = self.useFixture(temporaryfile.SecureTempFile()) tmpfilename = tempfile.file_name with open(tmpfilename, 'w') as f: f.write("\n") f.write("UserName:me\n") f.write("orgPersonType:NoContractor\n") f.write("LastName:Bo\n") f.write("FirstName:Jill\n") self.command_input = tmpfilename self.command_prefix = None self.command_engine_debug = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) mapping_engine = cli.MappingEngineTester() with mock.patch('builtins.print') as mock_print: mapping_engine.main() self.assertEqual(mock_print.call_count, 3) call = mock_print.call_args_list[0] args, kwargs = call self.assertTrue(args[0].startswith('Using Rules:')) call = mock_print.call_args_list[1] args, kwargs = call self.assertTrue(args[0].startswith('Using Assertion:')) call = mock_print.call_args_list[2] args, kwargs = call expected = { "group_names": [], "user": {"type": "ephemeral", "name": "me"}, "projects": [], "group_ids": ["0cd5e9"], } self.assertEqual(jsonutils.loads(args[0]), expected) def test_mapping_engine_tester_with_invalid_data(self): tempfilejson = self.useFixture(temporaryfile.SecureTempFile()) tmpfilejsonname = tempfilejson.file_name updated_mapping = copy.deepcopy(mapping_fixtures.MAPPING_SMALL) with open(tmpfilejsonname, 'w') as f: f.write(jsonutils.dumps(updated_mapping)) self.command_rules = tmpfilejsonname tempfile = self.useFixture(temporaryfile.SecureTempFile()) tmpfilename = tempfile.file_name # Here we do not have any value matching to type 'Email' # and condition in mapping_engine_test_rules.json with open(tmpfilename, 'w') as f: f.write("\n") f.write("UserName: me\n") f.write("Email: No@example.com\n") self.command_input = tmpfilename self.command_prefix = None self.command_engine_debug = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) mapping_engine = cli.MappingEngineTester() self.assertRaises(exception.ValidationError, mapping_engine.main) def test_mapping_engine_tester_logs_direct_maps(self): tempfilejson = self.useFixture(temporaryfile.SecureTempFile()) tmpfilejsonname = tempfilejson.file_name updated_mapping = copy.deepcopy(mapping_fixtures.MAPPING_SMALL) with open(tmpfilejsonname, 'w') as f: f.write(jsonutils.dumps(updated_mapping)) self.command_rules = tmpfilejsonname tempfile = self.useFixture(temporaryfile.SecureTempFile()) tmpfilename = tempfile.file_name with open(tmpfilename, 'w') as f: f.write("\n") f.write("UserName:me\n") f.write("orgPersonType:NoContractor\n") f.write("LastName:Bo\n") f.write("FirstName:Jill\n") self.command_input = tmpfilename self.command_prefix = None self.command_engine_debug = True self.useFixture( fixtures.MockPatchObject( CONF, 'command', self.FakeConfCommand(self) ) ) mapping_engine = cli.MappingEngineTester() logging = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) mapping_engine.main() expected_msg = "direct_maps: [['me']]" self.assertThat(logging.output, matchers.Contains(expected_msg)) class CliStatusTestCase(unit.SQLDriverOverrides, unit.TestCase): def setUp(self): self.useFixture(database.Database()) super().setUp() self.load_backends() self.policy_file = self.useFixture(temporaryfile.SecureTempFile()) self.policy_file_name = self.policy_file.file_name self.useFixture( policy.Policy( self.config_fixture, policy_file=self.policy_file_name ) ) self.checks = status.Checks() def test_check_safe_trust_policies(self): with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:list_trusts': '', 'identity:delete_trust': '', 'identity:get_trust': '', 'identity:list_roles_for_trust': '', 'identity:get_role_for_trust': '', } f.write(jsonutils.dumps(overridden_policies)) result = self.checks.check_trust_policies_are_not_empty() self.assertEqual(upgradecheck.Code.FAILURE, result.code) with open(self.policy_file_name, 'w') as f: overridden_policies = { 'identity:list_trusts': 'rule:admin_required', 'identity:delete_trust': 'rule:admin_required', 'identity:get_trust': 'rule:admin_required', 'identity:list_roles_for_trust': 'rule:admin_required', 'identity:get_role_for_trust': 'rule:admin_required', } f.write(jsonutils.dumps(overridden_policies)) result = self.checks.check_trust_policies_are_not_empty() self.assertEqual(upgradecheck.Code.SUCCESS, result.code) with open(self.policy_file_name, 'w') as f: overridden_policies = {} f.write(jsonutils.dumps(overridden_policies)) result = self.checks.check_trust_policies_are_not_empty() self.assertEqual(upgradecheck.Code.SUCCESS, result.code) def test_check_immutable_roles(self): role_ref = unit.new_role_ref(name='admin') PROVIDERS.role_api.create_role(role_ref['id'], role_ref) result = self.checks.check_default_roles_are_immutable() self.assertEqual(upgradecheck.Code.FAILURE, result.code) role_ref['options'] = {'immutable': True} PROVIDERS.role_api.update_role(role_ref['id'], role_ref) result = self.checks.check_default_roles_are_immutable() self.assertEqual(upgradecheck.Code.SUCCESS, result.code) # Check domain-specific roles are not reported PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) domain_ref = unit.new_domain_ref() domain = PROVIDERS.resource_api.create_domain( domain_ref['id'], domain_ref ) role_ref = unit.new_role_ref(name='admin', domain_id=domain['id']) PROVIDERS.role_api.create_role(role_ref['id'], role_ref) result = self.checks.check_default_roles_are_immutable() self.assertEqual(upgradecheck.Code.SUCCESS, result.code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_config.py0000664000175000017500000000271300000000000022326 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from oslo_config import generator import keystone.conf from keystone.tests import unit CONF = keystone.conf.CONF class ConfigTestCase(unit.TestCase): def config_files(self): config_files = super().config_files() sample_file = 'keystone.conf.sample' args = [ '--namespace', 'keystone', '--output-file', unit.dirs.etc(sample_file), ] generator.main(args=args) config_files.insert(0, unit.dirs.etc(sample_file)) self.addCleanup(os.remove, unit.dirs.etc(sample_file)) return config_files def test_config_default(self): self.assertIsNone(CONF.auth.password) self.assertIsNone(CONF.auth.token) # Check config.set_config_defaults() has set [profiler]enabled. self.assertEqual(False, CONF.profiler.enabled) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_contrib_ec2_core.py0000664000175000017500000001746400000000000024273 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import hashlib import http.client from keystoneclient.contrib.ec2 import utils as ec2_utils from oslo_utils import timeutils from keystone.common import provider_api from keystone.common import utils from keystone.tests import unit from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs class EC2ContribCoreV3(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.cred_blob, self.credential = unit.new_ec2_credential( self.user['id'], self.project_id ) PROVIDERS.credential_api.create_credential( self.credential['id'], self.credential ) def test_http_get_method_not_allowed(self): resp = self.get( '/ec2tokens', expected_status=http.client.METHOD_NOT_ALLOWED, convert=False, ) self.assertEqual(http.client.METHOD_NOT_ALLOWED, resp.status_code) def _test_valid_authentication_response_with_proper_secret(self, **kwargs): signer = ec2_utils.Ec2Signer(self.cred_blob['secret']) timestamp = utils.isotime(timeutils.utcnow()) credentials = { 'access': self.cred_blob['access'], 'secret': self.cred_blob['secret'], 'host': 'localhost', 'verb': 'GET', 'path': '/', 'params': { 'SignatureVersion': '2', 'Action': 'Test', 'Timestamp': timestamp, }, } credentials['signature'] = signer.generate(credentials) resp = self.post( '/ec2tokens', body={'credentials': credentials}, expected_status=http.client.OK, **kwargs ) self.assertValidProjectScopedTokenResponse(resp, self.user) def test_valid_authentication_response_with_proper_secret(self): self._test_valid_authentication_response_with_proper_secret() def test_valid_authentication_response_with_proper_secret_noauth(self): self._test_valid_authentication_response_with_proper_secret( noauth=True ) def test_valid_authentication_response_with_signature_v4(self): signer = ec2_utils.Ec2Signer(self.cred_blob['secret']) timestamp = utils.isotime(timeutils.utcnow()) hashed_payload = ( 'GET\n' '/\n' 'Action=Test\n' 'host:localhost\n' 'x-amz-date:' + timestamp + '\n' '\n' 'host;x-amz-date\n' 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ) body_hash = hashlib.sha256(hashed_payload.encode()).hexdigest() amz_credential = ( 'AKIAIOSFODNN7EXAMPLE/%s/us-east-1/iam/aws4_request,' % timestamp[:8] ) credentials = { 'access': self.cred_blob['access'], 'secret': self.cred_blob['secret'], 'host': 'localhost', 'verb': 'GET', 'path': '/', 'params': { 'Action': 'Test', 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', 'X-Amz-SignedHeaders': 'host,x-amz-date,', 'X-Amz-Credential': amz_credential, }, 'headers': {'X-Amz-Date': timestamp}, 'body_hash': body_hash, } credentials['signature'] = signer.generate(credentials) resp = self.post( '/ec2tokens', body={'credentials': credentials}, expected_status=http.client.OK, ) self.assertValidProjectScopedTokenResponse(resp, self.user) def test_authenticate_with_empty_body_returns_bad_request(self): self.post( '/ec2tokens', body={}, expected_status=http.client.BAD_REQUEST ) def test_authenticate_without_json_request_returns_bad_request(self): self.post( '/ec2tokens', body='not json', expected_status=http.client.BAD_REQUEST, ) def test_authenticate_without_request_body_returns_bad_request(self): self.post('/ec2tokens', expected_status=http.client.BAD_REQUEST) def test_authenticate_without_proper_secret_returns_unauthorized(self): signer = ec2_utils.Ec2Signer('totally not the secret') timestamp = utils.isotime(timeutils.utcnow()) credentials = { 'access': self.cred_blob['access'], 'secret': 'totally not the secret', 'host': 'localhost', 'verb': 'GET', 'path': '/', 'params': { 'SignatureVersion': '2', 'Action': 'Test', 'Timestamp': timestamp, }, } credentials['signature'] = signer.generate(credentials) self.post( '/ec2tokens', body={'credentials': credentials}, expected_status=http.client.UNAUTHORIZED, ) def test_authenticate_expired_request(self): self.config_fixture.config(group='credential', auth_ttl=5) signer = ec2_utils.Ec2Signer(self.cred_blob['secret']) past = timeutils.utcnow() - datetime.timedelta(minutes=10) timestamp = utils.isotime(past) credentials = { 'access': self.cred_blob['access'], 'secret': self.cred_blob['secret'], 'host': 'localhost', 'verb': 'GET', 'path': '/', 'params': { 'SignatureVersion': '2', 'Action': 'Test', 'Timestamp': timestamp, }, } credentials['signature'] = signer.generate(credentials) self.post( '/ec2tokens', body={'credentials': credentials}, expected_status=http.client.UNAUTHORIZED, ) def test_authenticate_expired_request_v4(self): self.config_fixture.config(group='credential', auth_ttl=5) signer = ec2_utils.Ec2Signer(self.cred_blob['secret']) past = timeutils.utcnow() - datetime.timedelta(minutes=10) timestamp = utils.isotime(past) hashed_payload = ( 'GET\n' '/\n' 'Action=Test\n' 'host:localhost\n' 'x-amz-date:' + timestamp + '\n' '\n' 'host;x-amz-date\n' 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855' ) body_hash = hashlib.sha256(hashed_payload.encode()).hexdigest() amz_credential = ( 'AKIAIOSFODNN7EXAMPLE/%s/us-east-1/iam/aws4_request,' % timestamp[:8] ) credentials = { 'access': self.cred_blob['access'], 'secret': self.cred_blob['secret'], 'host': 'localhost', 'verb': 'GET', 'path': '/', 'params': { 'Action': 'Test', 'X-Amz-Algorithm': 'AWS4-HMAC-SHA256', 'X-Amz-SignedHeaders': 'host,x-amz-date,', 'X-Amz-Credential': amz_credential, }, 'headers': {'X-Amz-Date': timestamp}, 'body_hash': body_hash, } credentials['signature'] = signer.generate(credentials) self.post( '/ec2tokens', body={'credentials': credentials}, expected_status=http.client.UNAUTHORIZED, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_contrib_s3_core.py0000664000175000017500000002316200000000000024137 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib import hmac import http.client import uuid from keystone.api import s3tokens from keystone.common import provider_api from keystone import exception from keystone.tests import unit from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs class S3ContribCore(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.load_backends() self.cred_blob, self.credential = unit.new_ec2_credential( self.user['id'], self.project_id ) PROVIDERS.credential_api.create_credential( self.credential['id'], self.credential ) def test_http_get_method_not_allowed(self): resp = self.get( '/s3tokens', expected_status=http.client.METHOD_NOT_ALLOWED, convert=False, ) self.assertEqual(http.client.METHOD_NOT_ALLOWED, resp.status_code) def _test_good_response(self, **kwargs): sts = 'string to sign' # opaque string from swift3 sig = hmac.new( self.cred_blob['secret'].encode('ascii'), sts.encode('ascii'), hashlib.sha1, ).digest() resp = self.post( '/s3tokens', body={ 'credentials': { 'access': self.cred_blob['access'], 'signature': base64.b64encode(sig).strip(), 'token': base64.b64encode(sts.encode('ascii')).strip(), } }, expected_status=http.client.OK, **kwargs ) self.assertValidProjectScopedTokenResponse( resp, self.user, forbid_token_id=True ) def test_good_response(self): self._test_good_response() def test_good_response_noauth(self): self._test_good_response(noauth=True) def test_bad_request(self): self.post( '/s3tokens', body={}, expected_status=http.client.BAD_REQUEST ) self.post( '/s3tokens', body="not json", expected_status=http.client.BAD_REQUEST, ) self.post('/s3tokens', expected_status=http.client.BAD_REQUEST) def test_bad_response(self): self.post( '/s3tokens', body={ 'credentials': { 'access': self.cred_blob['access'], 'signature': base64.b64encode( b'totally not the sig' ).strip(), 'token': base64.b64encode(b'string to sign').strip(), } }, expected_status=http.client.UNAUTHORIZED, ) def test_good_signature_v1(self): creds_ref = {'secret': 'b121dd41cdcc42fe9f70e572e84295aa'} credentials = { 'token': 'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB' 'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM' 'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ' 'vbV9zMy50eHQ=', 'signature': 'IL4QLcLVaYgylF9iHj6Wb8BGZsw=', } self.assertIsNone( s3tokens.S3Resource._check_signature(creds_ref, credentials) ) def test_bad_signature_v1(self): creds_ref = {'secret': 'b121dd41cdcc42fe9f70e572e84295aa'} credentials = { 'token': 'UFVUCjFCMk0yWThBc2dUcGdBbVk3UGhDZmc9PQphcHB' 'saWNhdGlvbi9vY3RldC1zdHJlYW0KVHVlLCAxMSBEZWMgMjAxM' 'iAyMTo0MTo0MSBHTVQKL2NvbnRfczMvdXBsb2FkZWRfZnJ' 'vbV9zMy50eHQ=', 'signature': uuid.uuid4().hex, } self.assertRaises( exception.Unauthorized, s3tokens.S3Resource._check_signature, creds_ref, credentials, ) def test_good_signature_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy' 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1' 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==', 'signature': '730ba8f58df6ffeadd78f402e990b2910d60' 'bc5c2aec63619734f096a4dd77be', } self.assertIsNone( s3tokens.S3Resource._check_signature(creds_ref, credentials) ) def test_good_iam_signature_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9pYW0vYXdzNF9yZXF1ZXN0CmYy' 'MjE1NTgwZWViOWExNjczNTFiZDkzZTg2YzNiNmYwNGE5Mjhm' 'NWM1NTIwYTM5MzVhNDUzNTQwYTA5NTY0YjU=', 'signature': 'db4e15b3040f6afaa9d9d16002de2fc3425b' 'eea0c6ea8c1b2bb674f052030b7d', } self.assertIsNone( s3tokens.S3Resource._check_signature(creds_ref, credentials) ) def test_good_sts_signature_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9zdHMvYXdzNF9yZXF1ZXN0CmYy' 'MjE1NTgwZWViOWExNjczNTFiZDkzZTg2YzNiNmYwNGE5Mjhm' 'NWM1NTIwYTM5MzVhNDUzNTQwYTA5NTY0YjU=', 'signature': '3aa0b6f1414b92b2a32584068f83c6d09b7f' 'daa11d4ea58912bbf1d8616ef56d', } self.assertIsNone( s3tokens.S3Resource._check_signature(creds_ref, credentials) ) def test_bad_signature_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} # the signature is wrong on an otherwise correctly formed token credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy' 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1' 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==', 'signature': uuid.uuid4().hex, } self.assertRaises( exception.Unauthorized, s3tokens.S3Resource._check_signature, creds_ref, credentials, ) def test_bad_service_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} # use 'bad' as the service scope instead of a recognised service credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9iYWQvYXdzNF9yZXF1ZXN0CmYy' 'MjE1NTgwZWViOWExNjczNTFiZDkzZTg2YzNiNmYwNGE5Mjhm' 'NWM1NTIwYTM5MzVhNDUzNTQwYTA5NTY0YjU=', 'signature': '1a2dec50eb1bba97887d1103c2ead6a39911' '98c4be2537cf14d40b64cceb888b', } self.assertRaises( exception.Unauthorized, s3tokens.S3Resource._check_signature, creds_ref, credentials, ) def test_bad_signing_key_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} # signed with aws4_badrequest instead of aws4_request credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9zMy9hd3M0X3JlcXVlc3QKZjIy' 'MTU1ODBlZWI5YTE2NzM1MWJkOTNlODZjM2I2ZjA0YTkyOGY1' 'YzU1MjBhMzkzNWE0NTM1NDBhMDk1NjRiNQ==', 'signature': '52d02211a3767d00b2104ab28c9859003b0e' '9c8735cd10de7975f3b1212cca41', } self.assertRaises( exception.Unauthorized, s3tokens.S3Resource._check_signature, creds_ref, credentials, ) def test_bad_short_scope_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} # credential scope has too few parts, missing final /aws4_request credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgoyMDE1MDgyNFQxMTIwNDFaCjIw' 'MTUwODI0L1JlZ2lvbk9uZS9zMwpmMjIxNTU4MGVlYjlhMTY3' 'MzUxYmQ5M2U4NmMzYjZmMDRhOTI4ZjVjNTUyMGEzOTM1YTQ1' 'MzU0MGEwOTU2NGI1', 'signature': '28a075f1ee41e96c431153914998443ff0f5' '5fe93d31b37181f13ff4865942a2', } self.assertRaises( exception.Unauthorized, s3tokens.S3Resource._check_signature, creds_ref, credentials, ) def test_bad_token_v4(self): creds_ref = {'secret': 'e7a7a2240136494986991a6598d9fb9f'} # token has invalid format of first part credentials = {'token': 'QVdTNC1BQUEKWApYClg=', 'signature': ''} self.assertRaises( exception.Unauthorized, s3tokens.S3Resource._check_signature, creds_ref, credentials, ) # token has invalid format of scope credentials = { 'token': 'QVdTNC1ITUFDLVNIQTI1NgpYCi8vczMvYXdzTl9yZXF1ZXN0Clg=', 'signature': '', } self.assertRaises( exception.Unauthorized, s3tokens.S3Resource._check_signature, creds_ref, credentials, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_contrib_simple_cert.py0000664000175000017500000000214000000000000025101 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from keystone.tests.unit import test_v3 class BaseTestCase(test_v3.RestfulTestCase): CA_PATH = '/v3/OS-SIMPLE-CERT/ca' CERT_PATH = '/v3/OS-SIMPLE-CERT/certificates' class TestSimpleCert(BaseTestCase): def request_cert(self, path): self.request( app=self.public_app, method='GET', path=path, expected_status=http.client.GONE, ) def test_ca_cert(self): self.request_cert(self.CA_PATH) def test_signing_cert(self): self.request_cert(self.CERT_PATH) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_driver_hints.py0000664000175000017500000000435300000000000023563 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import driver_hints from keystone.tests.unit import core as test class ListHintsTests(test.TestCase): def test_create_iterate_satisfy(self): hints = driver_hints.Hints() hints.add_filter('t1', 'data1') hints.add_filter('t2', 'data2') self.assertEqual(2, len(hints.filters)) filter = hints.get_exact_filter_by_name('t1') self.assertEqual('t1', filter['name']) self.assertEqual('data1', filter['value']) self.assertEqual('equals', filter['comparator']) self.assertFalse(filter['case_sensitive']) hints.filters.remove(filter) filter_count = 0 for filter in hints.filters: filter_count += 1 self.assertEqual('t2', filter['name']) self.assertEqual(1, filter_count) def test_multiple_creates(self): hints = driver_hints.Hints() hints.add_filter('t1', 'data1') hints.add_filter('t2', 'data2') self.assertEqual(2, len(hints.filters)) hints2 = driver_hints.Hints() hints2.add_filter('t4', 'data1') hints2.add_filter('t5', 'data2') self.assertEqual(2, len(hints.filters)) def test_limits(self): hints = driver_hints.Hints() self.assertIsNone(hints.limit) hints.set_limit(10) self.assertEqual(10, hints.limit['limit']) self.assertFalse(hints.limit['truncated']) hints.set_limit(11) self.assertEqual(11, hints.limit['limit']) self.assertFalse(hints.limit['truncated']) hints.set_limit(10, truncated=True) self.assertEqual(10, hints.limit['limit']) self.assertTrue(hints.limit['truncated']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_entry_points.py0000664000175000017500000000215300000000000023614 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import stevedore from testtools import matchers from keystone.tests.unit import core as test class TestEntryPoints(test.TestCase): def test_entry_point_middleware(self): """Assert that our list of expected middleware is present.""" expected_names = [ 'cors', 'debug', 'request_id', 'sizelimit', ] em = stevedore.ExtensionManager('keystone.server_middleware') actual_names = [extension.name for extension in em] self.assertThat(actual_names, matchers.ContainsAll(expected_names)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_exception.py0000664000175000017500000002624200000000000023062 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(morgan): These test cases are used for AuthContextMiddleware exception # rendering. import uuid import fixtures from oslo_config import fixture as config_fixture from oslo_log import log from oslo_serialization import jsonutils import keystone.conf from keystone import exception from keystone.server.flask.request_processing.middleware import auth_context from keystone.tests import unit CONF = keystone.conf.CONF class ExceptionTestCase(unit.BaseTestCase): def assertValidJsonRendering(self, e): resp = auth_context.render_exception(e) self.assertEqual(e.code, resp.status_int) self.assertEqual(f'{e.code} {e.title}', resp.status) j = jsonutils.loads(resp.body) self.assertIsNotNone(j.get('error')) self.assertIsNotNone(j['error'].get('code')) self.assertIsNotNone(j['error'].get('title')) self.assertIsNotNone(j['error'].get('message')) self.assertNotIn('\n', j['error']['message']) self.assertNotIn(' ', j['error']['message']) self.assertIs(type(j['error']['code']), int) def test_all_json_renderings(self): """Everything callable in the exception module should be renderable. ... except for the base error class (exception.Error), which is not user-facing. This test provides a custom message to bypass docstring parsing, which should be tested separately. """ for cls in [x for x in exception.__dict__.values() if callable(x)]: if cls is not exception.Error and isinstance(cls, exception.Error): self.assertValidJsonRendering(cls(message='Overridden.')) def test_validation_error(self): target = uuid.uuid4().hex attribute = uuid.uuid4().hex e = exception.ValidationError(target=target, attribute=attribute) self.assertValidJsonRendering(e) self.assertIn(target, str(e)) self.assertIn(attribute, str(e)) def test_not_found(self): target = uuid.uuid4().hex e = exception.NotFound(target=target) self.assertValidJsonRendering(e) self.assertIn(target, str(e)) def test_forbidden_title(self): e = exception.Forbidden() resp = auth_context.render_exception(e) j = jsonutils.loads(resp.body) self.assertEqual('Forbidden', e.title) self.assertEqual('Forbidden', j['error'].get('title')) def test_unicode_message(self): message = 'Comment \xe7a va' e = exception.Error(message) try: self.assertEqual(message, str(e)) except UnicodeEncodeError: self.fail("unicode error message not supported") def test_unicode_string(self): e = exception.ValidationError( attribute='xx', target='Long \xe2\x80\x93 Dash' ) self.assertIn('Long \xe2\x80\x93 Dash', str(e)) def test_invalid_unicode_string(self): # NOTE(jamielennox): This is a complete failure case so what is # returned in the exception message is not that important so long # as there is an error with a message e = exception.ValidationError(attribute='xx', target='\xe7a va') self.assertIn('\xe7a va', str(e)) class UnexpectedExceptionTestCase(ExceptionTestCase): """Test if internal info is exposed to the API user on UnexpectedError.""" class SubClassExc(exception.UnexpectedError): debug_message_format = 'Debug Message: %(debug_info)s' def setUp(self): super().setUp() self.exc_str = uuid.uuid4().hex self.config_fixture = self.useFixture(config_fixture.Config(CONF)) def test_unexpected_error_no_debug(self): self.config_fixture.config(debug=False) e = exception.UnexpectedError(exception=self.exc_str) self.assertNotIn(self.exc_str, str(e)) def test_unexpected_error_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) e = exception.UnexpectedError(exception=self.exc_str) self.assertIn(self.exc_str, str(e)) def test_unexpected_error_subclass_no_debug(self): self.config_fixture.config(debug=False) e = UnexpectedExceptionTestCase.SubClassExc(debug_info=self.exc_str) self.assertEqual(exception.UnexpectedError.message_format, str(e)) def test_unexpected_error_subclass_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) subclass = self.SubClassExc e = subclass(debug_info=self.exc_str) expected = subclass.debug_message_format % {'debug_info': self.exc_str} self.assertEqual( f'{expected} {exception.SecurityError.amendment}', str(e) ) def test_unexpected_error_custom_message_no_debug(self): self.config_fixture.config(debug=False) e = exception.UnexpectedError(self.exc_str) self.assertEqual(exception.UnexpectedError.message_format, str(e)) def test_unexpected_error_custom_message_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) e = exception.UnexpectedError(self.exc_str) self.assertEqual( f'{self.exc_str} {exception.SecurityError.amendment}', str(e), ) def test_unexpected_error_custom_message_exception_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) orig_e = exception.NotFound(target=uuid.uuid4().hex) e = exception.UnexpectedError(orig_e) self.assertEqual( f'{str(orig_e)} {exception.SecurityError.amendment}', str(e), ) def test_unexpected_error_custom_message_binary_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) binary_msg = b'something' e = exception.UnexpectedError(binary_msg) self.assertEqual( f'{str(binary_msg)} {exception.SecurityError.amendment}', str(e), ) class SecurityErrorTestCase(ExceptionTestCase): """Test whether security-related info is exposed to the API user.""" def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config(public_endpoint='http://localhost:5050') def test_unauthorized_exposure(self): self.config_fixture.config(debug=False) risky_info = uuid.uuid4().hex e = exception.Unauthorized(message=risky_info) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, str(e)) def test_unauthorized_exposure_in_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) risky_info = uuid.uuid4().hex e = exception.Unauthorized(message=risky_info) self.assertValidJsonRendering(e) self.assertIn(risky_info, str(e)) def test_forbidden_exposure(self): self.config_fixture.config(debug=False) risky_info = uuid.uuid4().hex e = exception.Forbidden(message=risky_info) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, str(e)) def test_forbidden_exposure_in_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) risky_info = uuid.uuid4().hex e = exception.Forbidden(message=risky_info) self.assertValidJsonRendering(e) self.assertIn(risky_info, str(e)) def test_forbidden_action_exposure(self): self.config_fixture.config(debug=False) risky_info = uuid.uuid4().hex action = uuid.uuid4().hex e = exception.ForbiddenAction(message=risky_info, action=action) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, str(e)) self.assertIn(action, str(e)) self.assertNotIn(exception.SecurityError.amendment, str(e)) e = exception.ForbiddenAction(action=action) self.assertValidJsonRendering(e) self.assertIn(action, str(e)) self.assertNotIn(exception.SecurityError.amendment, str(e)) def test_forbidden_action_exposure_in_debug(self): self.config_fixture.config(debug=True, insecure_debug=True) risky_info = uuid.uuid4().hex action = uuid.uuid4().hex e = exception.ForbiddenAction(message=risky_info, action=action) self.assertValidJsonRendering(e) self.assertIn(risky_info, str(e)) self.assertIn(exception.SecurityError.amendment, str(e)) e = exception.ForbiddenAction(action=action) self.assertValidJsonRendering(e) self.assertIn(action, str(e)) self.assertNotIn(exception.SecurityError.amendment, str(e)) def test_forbidden_action_no_message(self): # When no custom message is given when the ForbiddenAction (or other # SecurityError subclass) is created the exposed message is the same # whether debug is enabled or not. action = uuid.uuid4().hex self.config_fixture.config(debug=False) e = exception.ForbiddenAction(action=action) exposed_message = str(e) self.assertIn(action, exposed_message) self.assertNotIn(exception.SecurityError.amendment, str(e)) self.config_fixture.config(debug=True) e = exception.ForbiddenAction(action=action) self.assertEqual(exposed_message, str(e)) def test_unicode_argument_message(self): self.config_fixture.config(debug=False) risky_info = '\u7ee7\u7eed\u884c\u7f29\u8fdb\u6216' e = exception.Forbidden(message=risky_info) self.assertValidJsonRendering(e) self.assertNotIn(risky_info, str(e)) class TestSecurityErrorTranslation(unit.BaseTestCase): """Test i18n for SecurityError exceptions.""" def setUp(self): super().setUp() self.config_fixture = self.useFixture(config_fixture.Config(CONF)) self.config_fixture.config(insecure_debug=False) self.warning_log = self.useFixture(fixtures.FakeLogger(level=log.WARN)) exception._FATAL_EXCEPTION_FORMAT_ERRORS = False self.addCleanup( setattr, exception, '_FATAL_EXCEPTION_FORMAT_ERRORS', True ) class CustomSecurityError(exception.SecurityError): message_format = 'We had a failure in the %(place)r' class CustomError(exception.Error): message_format = 'We had a failure in the %(place)r' def test_nested_translation_of_SecurityErrors(self): e = self.CustomSecurityError(place='code') ('Admiral found this in the log: %s') % e self.assertNotIn('programmer error', self.warning_log.output) def test_that_regular_Errors_can_be_deep_copied(self): e = self.CustomError(place='code') ('Admiral found this in the log: %s') % e self.assertNotIn('programmer error', self.warning_log.output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_hacking_checks.py0000664000175000017500000000753100000000000024010 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap import pycodestyle from keystone.tests.hacking import checks from keystone.tests import unit from keystone.tests.unit.ksfixtures import hacking as hacking_fixtures class BaseStyleCheck(unit.BaseTestCase): def setUp(self): super().setUp() self.code_ex = self.useFixture(self.get_fixture()) self.addCleanup(delattr, self, 'code_ex') def get_checker(self): """Return the checker to be used for tests in this class.""" raise NotImplementedError( 'subclasses must provide a real implementation' ) def get_fixture(self): return hacking_fixtures.HackingCode() def run_check(self, code): pycodestyle.register_check(self.get_checker()) lines = textwrap.dedent(code).strip().splitlines(True) # Load all keystone hacking checks, they are of the form Kddd, # where ddd can from range from 000-999 guide = pycodestyle.StyleGuide(select='K') checker = pycodestyle.Checker(lines=lines, options=guide.options) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def assert_has_errors(self, code, expected_errors=None): actual_errors = [e[:3] for e in self.run_check(code)] self.assertCountEqual(expected_errors or [], actual_errors) class TestCheckForMutableDefaultArgs(BaseStyleCheck): def get_checker(self): return checks.CheckForMutableDefaultArgs def test(self): code = self.code_ex.mutable_default_args['code'] errors = self.code_ex.mutable_default_args['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestBlockCommentsBeginWithASpace(BaseStyleCheck): def get_checker(self): return checks.block_comments_begin_with_a_space def test(self): code = self.code_ex.comments_begin_with_space['code'] errors = self.code_ex.comments_begin_with_space['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestTranslationChecks(BaseStyleCheck): def get_checker(self): return checks.CheckForTranslationIssues def get_fixture(self): return hacking_fixtures.HackingTranslations() def assert_has_errors(self, code, expected_errors=None): # pull out the parts of the error that we'll match against actual_errors = (e[:3] for e in self.run_check(code)) # adjust line numbers to make the fixture data more readable. import_lines = len(self.code_ex.shared_imports.split('\n')) - 1 actual_errors = [ (e[0] - import_lines, e[1], e[2]) for e in actual_errors ] self.assertEqual(expected_errors or [], actual_errors) def test_for_translations(self): for example in self.code_ex.examples: code = self.code_ex.shared_imports + example['code'] errors = example['expected_errors'] self.assert_has_errors(code, expected_errors=errors) class TestDictConstructorWithSequenceCopy(BaseStyleCheck): def get_checker(self): return checks.dict_constructor_with_sequence_copy def test(self): code = self.code_ex.dict_constructor['code'] errors = self.code_ex.dict_constructor['expected_errors'] self.assert_has_errors(code, expected_errors=errors) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_healthcheck.py0000664000175000017500000000163200000000000023323 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import http.client from keystone.tests.unit import rest class HealthCheckTestCase(rest.RestfulTestCase): def test_get_healthcheck(self): with self.test_client() as c: resp = c.get('/healthcheck', expected_status_code=http.client.OK) # healthcheck is not supposed to return any data self.assertEqual(0, resp.content_length) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_ldap_livetest.py0000664000175000017500000002026100000000000023716 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import subprocess import ldap.modlist from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.identity.backends import ldap as identity_ldap from keystone.tests import unit from keystone.tests.unit import test_backend_ldap CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def create_object(dn, attrs): conn = ldap.initialize(CONF.ldap.url) conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password) ldif = ldap.modlist.addModlist(attrs) conn.add_s(dn, ldif) conn.unbind_s() class LiveLDAPIdentity(test_backend_ldap.LDAPIdentity): def setUp(self): self._ldap_skip_live() super().setUp() def _ldap_skip_live(self): self.skip_if_env_not_set('ENABLE_LDAP_LIVE_TEST') def clear_database(self): devnull = open('/dev/null', 'w') subprocess.call( [ 'ldapdelete', '-x', '-D', CONF.ldap.user, '-H', CONF.ldap.url, '-w', CONF.ldap.password, '-r', CONF.ldap.suffix, ], stderr=devnull, ) if CONF.ldap.suffix.startswith('ou='): tree_dn_attrs = { 'objectclass': 'organizationalUnit', 'ou': 'openstack', } else: tree_dn_attrs = { 'objectclass': ['dcObject', 'organizationalUnit'], 'dc': 'openstack', 'ou': 'openstack', } create_object(CONF.ldap.suffix, tree_dn_attrs) create_object( CONF.ldap.user_tree_dn, {'objectclass': 'organizationalUnit', 'ou': 'Users'}, ) create_object( CONF.ldap.group_tree_dn, {'objectclass': 'organizationalUnit', 'ou': 'UserGroups'}, ) def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_liveldap.conf')) return config_files def test_build_tree(self): """Regression test for building the tree names.""" # logic is different from the fake backend. user_api = identity_ldap.UserApi(CONF) self.assertTrue(user_api) self.assertEqual(CONF.ldap.user_tree_dn, user_api.tree_dn) def test_ldap_dereferencing(self): alt_users_ldif = { 'objectclass': ['top', 'organizationalUnit'], 'ou': 'alt_users', } alt_fake_user_ldif = { 'objectclass': ['person', 'inetOrgPerson'], 'cn': 'alt_fake1', 'sn': 'alt_fake1', } aliased_users_ldif = { 'objectclass': ['alias', 'extensibleObject'], 'aliasedobjectname': "ou=alt_users,%s" % CONF.ldap.suffix, } create_object("ou=alt_users,%s" % CONF.ldap.suffix, alt_users_ldif) create_object( "%s=alt_fake1,ou=alt_users,%s" % (CONF.ldap.user_id_attribute, CONF.ldap.suffix), alt_fake_user_ldif, ) create_object( "ou=alt_users,%s" % CONF.ldap.user_tree_dn, aliased_users_ldif ) self.config_fixture.config( group='ldap', query_scope='sub', alias_dereferencing='never' ) PROVIDERS.identity_api = identity_ldap.Identity() self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, 'alt_fake1', ) self.config_fixture.config( group='ldap', alias_dereferencing='searching' ) PROVIDERS.identity_api = identity_ldap.Identity() user_ref = PROVIDERS.identity_api.get_user('alt_fake1') self.assertEqual('alt_fake1', user_ref['id']) self.config_fixture.config(group='ldap', alias_dereferencing='always') PROVIDERS.identity_api = identity_ldap.Identity() user_ref = PROVIDERS.identity_api.get_user('alt_fake1') self.assertEqual('alt_fake1', user_ref['id']) # FakeLDAP does not correctly process filters, so this test can only be # run against a live LDAP server def test_list_groups_for_user_filtered(self): domain = self._get_domain_fixture() test_groups = [] test_users = [] GROUP_COUNT = 3 USER_COUNT = 2 positive_user = unit.create_user(PROVIDERS.identity_api, domain['id']) negative_user = unit.create_user(PROVIDERS.identity_api, domain['id']) for x in range(0, USER_COUNT): group_refs = PROVIDERS.identity_api.list_groups_for_user( test_users[x]['id'] ) self.assertEqual(0, len(group_refs)) for x in range(0, GROUP_COUNT): new_group = unit.new_group_ref(domain_id=domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group) test_groups.append(new_group) group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(x, len(group_refs)) PROVIDERS.identity_api.add_user_to_group( positive_user['id'], new_group['id'] ) group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(x + 1, len(group_refs)) group_refs = PROVIDERS.identity_api.list_groups_for_user( negative_user['id'] ) self.assertEqual(0, len(group_refs)) driver = PROVIDERS.identity_api._select_identity_driver( CONF.identity.default_domain_id ) driver.group.ldap_filter = '(dn=xx)' group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(0, len(group_refs)) group_refs = PROVIDERS.identity_api.list_groups_for_user( negative_user['id'] ) self.assertEqual(0, len(group_refs)) driver.group.ldap_filter = '(objectclass=*)' group_refs = PROVIDERS.identity_api.list_groups_for_user( positive_user['id'] ) self.assertEqual(GROUP_COUNT, len(group_refs)) group_refs = PROVIDERS.identity_api.list_groups_for_user( negative_user['id'] ) self.assertEqual(0, len(group_refs)) def test_user_enable_attribute_mask(self): self.config_fixture.config( group='ldap', user_enabled_emulation=False, user_enabled_attribute='employeeType', ) super().test_user_enable_attribute_mask() def test_create_project_case_sensitivity(self): # The attribute used for the live LDAP tests is case insensitive. def call_super(): ( super( LiveLDAPIdentity, self ).test_create_project_case_sensitivity() ) self.assertRaises(exception.Conflict, call_super) def test_create_user_case_sensitivity(self): # The attribute used for the live LDAP tests is case insensitive. def call_super(): super(LiveLDAPIdentity, self).test_create_user_case_sensitivity() self.assertRaises(exception.Conflict, call_super) def test_project_update_missing_attrs_with_a_falsey_value(self): # The description attribute doesn't allow an empty value. def call_super(): ( super( LiveLDAPIdentity, self ).test_project_update_missing_attrs_with_a_falsey_value() ) self.assertRaises(ldap.INVALID_SYNTAX, call_super) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_ldap_pool_livetest.py0000664000175000017500000001776200000000000024763 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldappool from keystone.common import provider_api import keystone.conf from keystone.identity.backends import ldap from keystone.identity.backends.ldap import common as ldap_common from keystone.tests import unit from keystone.tests.unit import fakeldap from keystone.tests.unit import test_backend_ldap_pool from keystone.tests.unit import test_ldap_livetest CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class LiveLDAPPoolIdentity( test_backend_ldap_pool.LdapPoolCommonTestMixin, test_ldap_livetest.LiveLDAPIdentity, ): """Executes existing LDAP live test with pooled LDAP handler. Also executes common pool specific tests via Mixin class. """ def setUp(self): super().setUp() self.addCleanup(self.cleanup_pools) # storing to local variable to avoid long references self.conn_pools = ldap_common.PooledLDAPHandler.connection_pools def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_pool_liveldap.conf')) return config_files def test_assert_connector_used_not_fake_ldap_pool(self): handler = ldap_common._get_connection(CONF.ldap.url, use_pool=True) self.assertNotEqual( type(handler.Connector), type(fakeldap.FakeLdapPool) ) self.assertEqual( type(ldappool.StateConnector), type(handler.Connector) ) def test_async_search_and_result3(self): self.config_fixture.config(group='ldap', page_size=1) self.test_user_enable_attribute_mask() def test_pool_size_expands_correctly(self): who = CONF.ldap.user cred = CONF.ldap.password # get related connection manager instance ldappool_cm = self.conn_pools[CONF.ldap.url] def _get_conn(): return ldappool_cm.connection(who, cred) with _get_conn() as c1: # 1 self.assertEqual(1, len(ldappool_cm)) self.assertTrue(c1.connected) self.assertTrue(c1.active) with _get_conn() as c2: # conn2 self.assertEqual(2, len(ldappool_cm)) self.assertTrue(c2.connected) self.assertTrue(c2.active) self.assertEqual(2, len(ldappool_cm)) # c2 went out of context, its connected but not active self.assertTrue(c2.connected) self.assertFalse(c2.active) with _get_conn() as c3: # conn3 self.assertEqual(2, len(ldappool_cm)) self.assertTrue(c3.connected) self.assertTrue(c3.active) self.assertIs(c3, c2) # same connection is reused self.assertTrue(c2.active) with _get_conn() as c4: # conn4 self.assertEqual(3, len(ldappool_cm)) self.assertTrue(c4.connected) self.assertTrue(c4.active) def test_password_change_with_auth_pool_disabled(self): self.config_fixture.config(group='ldap', use_auth_pool=False) old_password = self.user_sna['password'] self.test_password_change_with_pool() self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, context={}, user_id=self.user_sna['id'], password=old_password, ) def _create_user_and_authenticate(self, password): user = unit.create_user( PROVIDERS.identity_api, CONF.identity.default_domain_id, password=password, ) with self.make_request(): PROVIDERS.identity_api.authenticate( user_id=user['id'], password=password ) return PROVIDERS.identity_api.get_user(user['id']) def _get_auth_conn_pool_cm(self): pool_url = ( ldap_common.PooledLDAPHandler.auth_pool_prefix + CONF.ldap.url ) return self.conn_pools[pool_url] def _do_password_change_for_one_user(self, password, new_password): self.config_fixture.config(group='ldap', use_auth_pool=True) self.cleanup_pools() self.load_backends() user1 = self._create_user_and_authenticate(password) auth_cm = self._get_auth_conn_pool_cm() self.assertEqual(1, len(auth_cm)) user2 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) user3 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) user4 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) user5 = self._create_user_and_authenticate(password) self.assertEqual(1, len(auth_cm)) # connection pool size remains 1 even for different user ldap bind # as there is only one active connection at a time user_api = ldap.UserApi(CONF) u1_dn = user_api._id_to_dn_string(user1['id']) u2_dn = user_api._id_to_dn_string(user2['id']) u3_dn = user_api._id_to_dn_string(user3['id']) u4_dn = user_api._id_to_dn_string(user4['id']) u5_dn = user_api._id_to_dn_string(user5['id']) # now create multiple active connections for end user auth case which # will force to keep them in pool. After that, modify one of user # password. Need to make sure that user connection is in middle # of pool list. auth_cm = self._get_auth_conn_pool_cm() with auth_cm.connection(u1_dn, password) as _: with auth_cm.connection(u2_dn, password) as _: with auth_cm.connection(u3_dn, password) as _: with auth_cm.connection(u4_dn, password) as _: with auth_cm.connection(u5_dn, password) as _: self.assertEqual(5, len(auth_cm)) _.unbind_s() user3['password'] = new_password PROVIDERS.identity_api.update_user(user3['id'], user3) return user3 def test_password_change_with_auth_pool_enabled_long_lifetime(self): self.config_fixture.config( group='ldap', auth_pool_connection_lifetime=600 ) old_password = 'my_password' new_password = 'new_password' user = self._do_password_change_for_one_user( old_password, new_password ) user.pop('password') # with long connection lifetime auth_pool can bind to old password # successfully which is not desired if password change is frequent # use case in a deployment. # This can happen in multiple concurrent connections case only. with self.make_request(): user_ref = PROVIDERS.identity_api.authenticate( user_id=user['id'], password=old_password ) self.assertDictEqual(user, user_ref) def test_password_change_with_auth_pool_enabled_no_lifetime(self): self.config_fixture.config( group='ldap', auth_pool_connection_lifetime=0 ) old_password = 'my_password' new_password = 'new_password' user = self._do_password_change_for_one_user( old_password, new_password ) # now as connection lifetime is zero, so authentication # with old password will always fail. self.assertRaises( AssertionError, PROVIDERS.identity_api.authenticate, context={}, user_id=user['id'], password=old_password, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_ldap_tls_livetest.py0000664000175000017500000000773300000000000024611 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ldap.modlist from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import identity from keystone.tests import unit from keystone.tests.unit import test_ldap_livetest CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def create_object(dn, attrs): conn = ldap.initialize(CONF.ldap.url) conn.simple_bind_s(CONF.ldap.user, CONF.ldap.password) ldif = ldap.modlist.addModlist(attrs) conn.add_s(dn, ldif) conn.unbind_s() class LiveTLSLDAPIdentity(test_ldap_livetest.LiveLDAPIdentity): def _ldap_skip_live(self): self.skip_if_env_not_set('ENABLE_TLS_LDAP_LIVE_TEST') def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_tls_liveldap.conf')) return config_files def test_tls_certfile_demand_option(self): self.config_fixture.config( group='ldap', use_tls=True, tls_cacertdir=None, tls_req_cert='demand', ) PROVIDERS.identity_api = identity.backends.ldap.Identity() user = unit.create_user( PROVIDERS.identity_api, 'default', name='fake1', password='fakepass1', ) user_ref = PROVIDERS.identity_api.get_user(user['id']) self.assertEqual(user['id'], user_ref['id']) user['password'] = 'fakepass2' PROVIDERS.identity_api.update_user(user['id'], user) PROVIDERS.identity_api.delete_user(user['id']) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, user['id'] ) def test_tls_certdir_demand_option(self): self.config_fixture.config( group='ldap', use_tls=True, tls_cacertdir=None, tls_req_cert='demand', ) PROVIDERS.identity_api = identity.backends.ldap.Identity() user = unit.create_user( PROVIDERS.identity_api, 'default', id='fake1', name='fake1', password='fakepass1', ) user_ref = PROVIDERS.identity_api.get_user('fake1') self.assertEqual('fake1', user_ref['id']) user['password'] = 'fakepass2' PROVIDERS.identity_api.update_user('fake1', user) PROVIDERS.identity_api.delete_user('fake1') self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, 'fake1' ) def test_tls_bad_certfile(self): self.config_fixture.config( group='ldap', use_tls=True, tls_req_cert='demand', tls_cacertfile='/etc/keystone/ssl/certs/mythicalcert.pem', tls_cacertdir=None, ) PROVIDERS.identity_api = identity.backends.ldap.Identity() user = unit.new_user_ref('default') self.assertRaises(IOError, PROVIDERS.identity_api.create_user, user) def test_tls_bad_certdir(self): self.config_fixture.config( group='ldap', use_tls=True, tls_cacertfile=None, tls_req_cert='demand', tls_cacertdir='/etc/keystone/ssl/mythicalcertdir', ) PROVIDERS.identity_api = identity.backends.ldap.Identity() user = unit.new_user_ref('default') self.assertRaises(IOError, PROVIDERS.identity_api.create_user, user) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_limits.py0000664000175000017500000021170500000000000022365 0ustar00zuulzuul00000000000000# Copyright 2018 Huawei # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from keystone.common import provider_api from keystone.common.validation import validators import keystone.conf from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class LimitModelTestCase(test_v3.RestfulTestCase): def test_get_default_limit_model_response_schema(self): schema = { 'type': 'object', 'properties': { 'model': { 'type': 'object', 'properties': { 'name': {'type': 'string'}, 'description': {'type': 'string'}, }, 'required': ['name', 'description'], 'additionalProperties': False, }, }, 'required': ['model'], 'additionalProperties': False, } validator = validators.SchemaValidator(schema) response = self.get('/limits/model') validator.validate(response.json_body) def test_head_limit_model(self): self.head('/limits/model', expected_status=http.client.OK) def test_get_limit_model_returns_default_model(self): response = self.get('/limits/model') model = response.result expected = { 'model': { 'name': 'flat', 'description': ( 'Limit enforcement and validation does not take project ' 'hierarchy into consideration.' ), } } self.assertDictEqual(expected, model) def test_get_limit_model_without_token_fails(self): self.get( '/limits/model', noauth=True, expected_status=http.client.UNAUTHORIZED, ) def test_head_limit_model_without_token_fails(self): self.head( '/limits/model', noauth=True, expected_status=http.client.UNAUTHORIZED, ) class RegisteredLimitsTestCase(test_v3.RestfulTestCase): """Test registered_limits CRUD.""" def setUp(self): super().setUp() # Most of these tests require system-scoped tokens. Let's have one on # hand so that we can use it in tests when we need it. PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.role_id ) self.system_admin_token = self.get_system_scoped_token() # There is already a sample service and region created from # load_sample_data() but we're going to create another service and # region for specific testing purposes. response = self.post('/regions', body={'region': {}}) self.region2 = response.json_body['region'] self.region_id2 = self.region2['id'] service_ref = { 'service': { 'name': uuid.uuid4().hex, 'enabled': True, 'type': 'type2', } } response = self.post('/services', body=service_ref) self.service2 = response.json_body['service'] self.service_id2 = self.service2['id'] def test_create_registered_limit(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) registered_limits = r.result['registered_limits'] for key in [ 'service_id', 'region_id', 'resource_name', 'default_limit', 'description', ]: self.assertEqual(registered_limits[0][key], ref[key]) def test_create_registered_limit_without_region(self): ref = unit.new_registered_limit_ref(service_id=self.service_id) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) registered_limits = r.result['registered_limits'] for key in ['service_id', 'resource_name', 'default_limit']: self.assertEqual(registered_limits[0][key], ref[key]) self.assertIsNone(registered_limits[0].get('region_id')) def test_create_registered_without_description(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id ) ref.pop('description') r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) registered_limits = r.result['registered_limits'] for key in [ 'service_id', 'region_id', 'resource_name', 'default_limit', ]: self.assertEqual(registered_limits[0][key], ref[key]) self.assertIsNone(registered_limits[0]['description']) def test_create_multi_registered_limit(self): ref1 = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_registered_limit_ref( service_id=self.service_id, resource_name='snapshot' ) r = self.post( '/registered_limits', body={'registered_limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) registered_limits = r.result['registered_limits'] for key in ['service_id', 'resource_name', 'default_limit']: self.assertEqual(registered_limits[0][key], ref1[key]) self.assertEqual(registered_limits[1][key], ref2[key]) self.assertEqual(registered_limits[0]['region_id'], ref1['region_id']) self.assertIsNone(registered_limits[1].get('region_id')) def test_create_registered_limit_return_count(self): ref1 = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id ) r = self.post( '/registered_limits', body={'registered_limits': [ref1]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) registered_limits = r.result['registered_limits'] self.assertEqual(1, len(registered_limits)) ref2 = unit.new_registered_limit_ref( service_id=self.service_id2, region_id=self.region_id2 ) ref3 = unit.new_registered_limit_ref(service_id=self.service_id2) r = self.post( '/registered_limits', body={'registered_limits': [ref2, ref3]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) registered_limits = r.result['registered_limits'] self.assertEqual(2, len(registered_limits)) def test_create_registered_limit_with_invalid_input(self): ref1 = unit.new_registered_limit_ref() ref2 = unit.new_registered_limit_ref(default_limit='not_int') ref3 = unit.new_registered_limit_ref(resource_name=123) ref4 = unit.new_registered_limit_ref(region_id='fake_region') for input_limit in [ref1, ref2, ref3, ref4]: self.post( '/registered_limits', body={'registered_limits': [input_limit]}, token=self.system_admin_token, expected_status=http.client.BAD_REQUEST, ) def test_create_registered_limit_duplicate(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id ) self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CONFLICT, ) def test_update_registered_limit(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', default_limit=10, ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_ref = { 'service_id': self.service_id2, 'region_id': self.region_id2, 'resource_name': 'snapshot', 'default_limit': 5, 'description': 'test description', } r = self.patch( '/registered_limits/%s' % r.result['registered_limits'][0]['id'], body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.OK, ) new_registered_limits = r.result['registered_limit'] self.assertEqual(new_registered_limits['service_id'], self.service_id2) self.assertEqual(new_registered_limits['region_id'], self.region_id2) self.assertEqual(new_registered_limits['resource_name'], 'snapshot') self.assertEqual(new_registered_limits['default_limit'], 5) self.assertEqual( new_registered_limits['description'], 'test description' ) def test_update_registered_limit_region_failed(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, resource_name='volume', default_limit=10, description='test description', ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_ref = { 'region_id': self.region_id, } registered_limit_id = r.result['registered_limits'][0]['id'] r = self.patch( '/registered_limits/%s' % registered_limit_id, body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.OK, ) new_registered_limits = r.result['registered_limit'] self.assertEqual(self.region_id, new_registered_limits['region_id']) update_ref['region_id'] = '' r = self.patch( '/registered_limits/%s' % registered_limit_id, body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.BAD_REQUEST, ) def test_update_registered_limit_description(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', default_limit=10, ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_ref = {'description': 'test description'} registered_limit_id = r.result['registered_limits'][0]['id'] r = self.patch( '/registered_limits/%s' % registered_limit_id, body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.OK, ) new_registered_limits = r.result['registered_limit'] self.assertEqual( new_registered_limits['description'], 'test description' ) update_ref['description'] = '' r = self.patch( '/registered_limits/%s' % registered_limit_id, body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.OK, ) new_registered_limits = r.result['registered_limit'] self.assertEqual(new_registered_limits['description'], '') def test_update_registered_limit_region_id_to_none(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', default_limit=10, ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_ref = {'region_id': None} registered_limit_id = r.result['registered_limits'][0]['id'] r = self.patch( '/registered_limits/%s' % registered_limit_id, body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.OK, ) self.assertIsNone(r.result['registered_limit']['region_id']) def test_update_registered_limit_region_id_to_none_conflict(self): ref1 = unit.new_registered_limit_ref( service_id=self.service_id, resource_name='volume', default_limit=10, ) ref2 = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', default_limit=10, ) self.post( '/registered_limits', body={'registered_limits': [ref1]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) r = self.post( '/registered_limits', body={'registered_limits': [ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_ref = {'region_id': None} registered_limit_id = r.result['registered_limits'][0]['id'] # There is a registered limit with "service_id=self.service_id, # region_id=None" already. So update ref2's region_id to None will # raise 409 Conflict Error. self.patch( '/registered_limits/%s' % registered_limit_id, body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.CONFLICT, ) def test_update_registered_limit_not_found(self): update_ref = { 'service_id': self.service_id, 'region_id': self.region_id, 'resource_name': 'snapshot', 'default_limit': 5, } self.patch( '/registered_limits/%s' % uuid.uuid4().hex, body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.NOT_FOUND, ) def test_update_registered_limit_with_invalid_input(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', default_limit=10, ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) reg_id = r.result['registered_limits'][0]['id'] update_ref1 = unit.new_registered_limit_ref(service_id='fake_id') update_ref2 = unit.new_registered_limit_ref(default_limit='not_int') update_ref3 = unit.new_registered_limit_ref(resource_name=123) update_ref4 = unit.new_registered_limit_ref(region_id='fake_region') update_ref5 = unit.new_registered_limit_ref(description=123) for input_limit in [ update_ref1, update_ref2, update_ref3, update_ref4, update_ref5, ]: self.patch( '/registered_limits/%s' % reg_id, body={'registered_limit': input_limit}, token=self.system_admin_token, expected_status=http.client.BAD_REQUEST, ) def test_update_registered_limit_with_referenced_limit(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', default_limit=10, ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_ref = { 'service_id': self.service_id2, 'region_id': self.region_id2, 'resource_name': 'snapshot', 'default_limit': 5, } self.patch( '/registered_limits/%s' % r.result['registered_limits'][0]['id'], body={'registered_limit': update_ref}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_list_registered_limit(self): r = self.get('/registered_limits', expected_status=http.client.OK) self.assertEqual([], r.result.get('registered_limits')) ref1 = unit.new_registered_limit_ref( service_id=self.service_id, resource_name='test_resource', region_id=self.region_id, ) ref2 = unit.new_registered_limit_ref( service_id=self.service_id2, resource_name='test_resource', region_id=self.region_id2, ) r = self.post( '/registered_limits', body={'registered_limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) id1 = r.result['registered_limits'][0]['id'] r = self.get('/registered_limits', expected_status=http.client.OK) registered_limits = r.result['registered_limits'] self.assertEqual(len(registered_limits), 2) for key in [ 'service_id', 'region_id', 'resource_name', 'default_limit', ]: if registered_limits[0]['id'] == id1: self.assertEqual(registered_limits[0][key], ref1[key]) self.assertEqual(registered_limits[1][key], ref2[key]) break self.assertEqual(registered_limits[1][key], ref1[key]) self.assertEqual(registered_limits[0][key], ref2[key]) r = self.get( '/registered_limits?service_id=%s' % self.service_id, expected_status=http.client.OK, ) registered_limits = r.result['registered_limits'] self.assertEqual(len(registered_limits), 1) for key in [ 'service_id', 'region_id', 'resource_name', 'default_limit', ]: self.assertEqual(registered_limits[0][key], ref1[key]) r = self.get( '/registered_limits?region_id=%s' % self.region_id2, expected_status=http.client.OK, ) registered_limits = r.result['registered_limits'] self.assertEqual(len(registered_limits), 1) for key in [ 'service_id', 'region_id', 'resource_name', 'default_limit', ]: self.assertEqual(registered_limits[0][key], ref2[key]) r = self.get( '/registered_limits?resource_name=test_resource', expected_status=http.client.OK, ) registered_limits = r.result['registered_limits'] self.assertEqual(len(registered_limits), 2) def test_show_registered_limit(self): ref1 = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id ) ref2 = unit.new_registered_limit_ref( service_id=self.service_id2, region_id=self.region_id2 ) r = self.post( '/registered_limits', body={'registered_limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) id1 = r.result['registered_limits'][0]['id'] self.get( '/registered_limits/fake_id', expected_status=http.client.NOT_FOUND ) r = self.get( '/registered_limits/%s' % id1, expected_status=http.client.OK ) registered_limit = r.result['registered_limit'] for key in [ 'service_id', 'region_id', 'resource_name', 'default_limit', 'description', ]: self.assertEqual(registered_limit[key], ref1[key]) def test_delete_registered_limit(self): ref1 = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id ) ref2 = unit.new_registered_limit_ref( service_id=self.service_id2, region_id=self.region_id2 ) r = self.post( '/registered_limits', body={'registered_limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) id1 = r.result['registered_limits'][0]['id'] self.delete( '/registered_limits/%s' % id1, token=self.system_admin_token, expected_status=http.client.NO_CONTENT, ) self.delete( '/registered_limits/fake_id', token=self.system_admin_token, expected_status=http.client.NOT_FOUND, ) r = self.get('/registered_limits', expected_status=http.client.OK) registered_limits = r.result['registered_limits'] self.assertEqual(len(registered_limits), 1) def test_delete_registered_limit_with_referenced_limit(self): ref = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', default_limit=10, ) r = self.post( '/registered_limits', body={'registered_limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) id = r.result['registered_limits'][0]['id'] self.delete( '/registered_limits/%s' % id, expected_status=http.client.FORBIDDEN ) class LimitsTestCase(test_v3.RestfulTestCase): """Test limits CRUD.""" def setUp(self): super().setUp() # FIXME(lbragstad): Remove all this duplicated logic once we get all # keystone tests using bootstrap consistently. This is something the # bootstrap utility already does for us. reader_role = {'id': uuid.uuid4().hex, 'name': 'reader'} reader_role = PROVIDERS.role_api.create_role( reader_role['id'], reader_role ) member_role = {'id': uuid.uuid4().hex, 'name': 'member'} member_role = PROVIDERS.role_api.create_role( member_role['id'], member_role ) PROVIDERS.role_api.create_implied_role(self.role_id, member_role['id']) PROVIDERS.role_api.create_implied_role( member_role['id'], reader_role['id'] ) # Most of these tests require system-scoped tokens. Let's have one on # hand so that we can use it in tests when we need it. PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.role_id ) self.system_admin_token = self.get_system_scoped_token() # There is already a sample service and region created from # load_sample_data() but we're going to create another service and # region for specific testing purposes. response = self.post('/regions', body={'region': {}}) self.region2 = response.json_body['region'] self.region_id2 = self.region2['id'] service_ref = { 'service': { 'name': uuid.uuid4().hex, 'enabled': True, 'type': 'type2', } } response = self.post('/services', body=service_ref) self.service2 = response.json_body['service'] self.service_id2 = self.service2['id'] ref1 = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_registered_limit_ref( service_id=self.service_id2, resource_name='snapshot' ) ref3 = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='backup', ) self.post( '/registered_limits', body={'registered_limits': [ref1, ref2, ref3]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) # Create more assignments, all are: # # self.user -- admin -- self.project # self.user -- non-admin -- self.project_2 # self.user -- admin -- self.domain # self.user -- non-admin -- self.domain_2 # self.user -- admin -- system self.project_2 = unit.new_project_ref(domain_id=self.domain_id) self.project_2_id = self.project_2['id'] PROVIDERS.resource_api.create_project( self.project_2_id, self.project_2 ) self.domain_2 = unit.new_domain_ref() self.domain_2_id = self.domain_2['id'] PROVIDERS.resource_api.create_domain(self.domain_2_id, self.domain_2) self.role_2 = unit.new_role_ref(name='non-admin') self.role_2_id = self.role_2['id'] PROVIDERS.role_api.create_role(self.role_2_id, self.role_2) PROVIDERS.assignment_api.create_grant( self.role_2_id, user_id=self.user_id, project_id=self.project_2_id ) PROVIDERS.assignment_api.create_grant( self.role_id, user_id=self.user_id, domain_id=self.domain_id ) PROVIDERS.assignment_api.create_grant( self.role_2_id, user_id=self.user_id, domain_id=self.domain_2_id ) PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.role_id ) def test_create_project_limit(self): ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) r = self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limits = r.result['limits'] self.assertIsNotNone(limits[0]['id']) self.assertIsNone(limits[0]['domain_id']) for key in [ 'service_id', 'region_id', 'resource_name', 'resource_limit', 'description', 'project_id', ]: self.assertEqual(limits[0][key], ref[key]) def test_create_domain_limit(self): ref = unit.new_limit_ref( domain_id=self.domain_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) r = self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limits = r.result['limits'] self.assertIsNotNone(limits[0]['id']) self.assertIsNone(limits[0]['project_id']) for key in [ 'service_id', 'region_id', 'resource_name', 'resource_limit', 'description', 'domain_id', ]: self.assertEqual(limits[0][key], ref[key]) def test_create_limit_without_region(self): ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id2, resource_name='snapshot', ) r = self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limits = r.result['limits'] self.assertIsNotNone(limits[0]['id']) self.assertIsNotNone(limits[0]['project_id']) for key in ['service_id', 'resource_name', 'resource_limit']: self.assertEqual(limits[0][key], ref[key]) self.assertIsNone(limits[0].get('region_id')) def test_create_limit_without_description(self): ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref.pop('description') r = self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limits = r.result['limits'] self.assertIsNotNone(limits[0]['id']) self.assertIsNotNone(limits[0]['project_id']) for key in [ 'service_id', 'region_id', 'resource_name', 'resource_limit', ]: self.assertEqual(limits[0][key], ref[key]) self.assertIsNone(limits[0]['description']) def test_create_limit_with_domain_as_project(self): ref = unit.new_limit_ref( project_id=self.domain_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) r = self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token ) limits = r.result['limits'] self.assertIsNone(limits[0]['project_id']) self.assertEqual(self.domain_id, limits[0]['domain_id']) def test_create_multi_limit(self): ref1 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id2, resource_name='snapshot', ) r = self.post( '/limits', body={'limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limits = r.result['limits'] for key in ['service_id', 'resource_name', 'resource_limit']: self.assertEqual(limits[0][key], ref1[key]) self.assertEqual(limits[1][key], ref2[key]) self.assertEqual(limits[0]['region_id'], ref1['region_id']) self.assertIsNone(limits[1].get('region_id')) def test_create_limit_return_count(self): ref1 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) r = self.post( '/limits', body={'limits': [ref1]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limits = r.result['limits'] self.assertEqual(1, len(limits)) ref2 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id2, resource_name='snapshot', ) ref3 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='backup', ) r = self.post( '/limits', body={'limits': [ref2, ref3]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limits = r.result['limits'] self.assertEqual(2, len(limits)) def test_create_limit_with_invalid_input(self): ref1 = unit.new_limit_ref( project_id=self.project_id, resource_limit='not_int' ) ref2 = unit.new_limit_ref( project_id=self.project_id, resource_name=123 ) ref3 = unit.new_limit_ref( project_id=self.project_id, region_id='fake_region' ) for input_limit in [ref1, ref2, ref3]: self.post( '/limits', body={'limits': [input_limit]}, token=self.system_admin_token, expected_status=http.client.BAD_REQUEST, ) def test_create_limit_duplicate(self): ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CONFLICT, ) def test_create_limit_without_reference_registered_limit(self): ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id2, resource_name='volume', ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_update_limit(self): ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=10, ) r = self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_ref = {'resource_limit': 5, 'description': 'test description'} r = self.patch( '/limits/%s' % r.result['limits'][0]['id'], body={'limit': update_ref}, token=self.system_admin_token, expected_status=http.client.OK, ) new_limits = r.result['limit'] self.assertEqual(new_limits['resource_limit'], 5) self.assertEqual(new_limits['description'], 'test description') def test_update_limit_not_found(self): update_ref = {'resource_limit': 5} self.patch( '/limits/%s' % uuid.uuid4().hex, body={'limit': update_ref}, token=self.system_admin_token, expected_status=http.client.NOT_FOUND, ) def test_update_limit_with_invalid_input(self): ref = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=10, ) r = self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) limit_id = r.result['limits'][0]['id'] invalid_resource_limit_update = {'resource_limit': 'not_int'} invalid_description_update = {'description': 123} for input_limit in [ invalid_resource_limit_update, invalid_description_update, ]: self.patch( '/limits/%s' % limit_id, body={'limit': input_limit}, token=self.system_admin_token, expected_status=http.client.BAD_REQUEST, ) def test_list_limit(self): r = self.get( '/limits', token=self.system_admin_token, expected_status=http.client.OK, ) self.assertEqual([], r.result.get('limits')) ref1 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id2, resource_name='snapshot', ) r = self.post( '/limits', body={'limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) id1 = r.result['limits'][0]['id'] r = self.get('/limits', expected_status=http.client.OK) limits = r.result['limits'] self.assertEqual(len(limits), 2) if limits[0]['id'] == id1: self.assertEqual(limits[0]['region_id'], ref1['region_id']) self.assertIsNone(limits[1].get('region_id')) for key in ['service_id', 'resource_name', 'resource_limit']: self.assertEqual(limits[0][key], ref1[key]) self.assertEqual(limits[1][key], ref2[key]) else: self.assertEqual(limits[1]['region_id'], ref1['region_id']) self.assertIsNone(limits[0].get('region_id')) for key in ['service_id', 'resource_name', 'resource_limit']: self.assertEqual(limits[1][key], ref1[key]) self.assertEqual(limits[0][key], ref2[key]) r = self.get( '/limits?service_id=%s' % self.service_id2, expected_status=http.client.OK, ) limits = r.result['limits'] self.assertEqual(len(limits), 1) for key in ['service_id', 'resource_name', 'resource_limit']: self.assertEqual(limits[0][key], ref2[key]) r = self.get( '/limits?region_id=%s' % self.region_id, expected_status=http.client.OK, ) limits = r.result['limits'] self.assertEqual(len(limits), 1) for key in [ 'service_id', 'region_id', 'resource_name', 'resource_limit', ]: self.assertEqual(limits[0][key], ref1[key]) r = self.get( '/limits?resource_name=volume', expected_status=http.client.OK ) limits = r.result['limits'] self.assertEqual(len(limits), 1) for key in [ 'service_id', 'region_id', 'resource_name', 'resource_limit', ]: self.assertEqual(limits[0][key], ref1[key]) def test_list_limit_with_project_id_filter(self): # create two limit in different projects for test. self.config_fixture.config(group='oslo_policy', enforce_scope=True) ref1 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_limit_ref( project_id=self.project_2_id, service_id=self.service_id2, resource_name='snapshot', ) self.post( '/limits', body={'limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) # non system scoped request will get the limits in its project. r = self.get('/limits', expected_status=http.client.OK) limits = r.result['limits'] self.assertEqual(1, len(limits)) self.assertEqual(self.project_id, limits[0]['project_id']) r = self.get( '/limits', expected_status=http.client.OK, auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project_2_id, ), ) limits = r.result['limits'] self.assertEqual(1, len(limits)) self.assertEqual(self.project_2_id, limits[0]['project_id']) # any project user can filter by their own project r = self.get( '/limits?project_id=%s' % self.project_id, expected_status=http.client.OK, ) limits = r.result['limits'] self.assertEqual(1, len(limits)) self.assertEqual(self.project_id, limits[0]['project_id']) # a system scoped request can specify the project_id filter r = self.get( '/limits?project_id=%s' % self.project_id, expected_status=http.client.OK, token=self.system_admin_token, ) limits = r.result['limits'] self.assertEqual(1, len(limits)) self.assertEqual(self.project_id, limits[0]['project_id']) def test_list_limit_with_domain_id_filter(self): # create two limit in different domains for test. ref1 = unit.new_limit_ref( domain_id=self.domain_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_limit_ref( domain_id=self.domain_2_id, service_id=self.service_id2, resource_name='snapshot', ) self.post( '/limits', body={'limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) # non system scoped request will get the limits in its domain. r = self.get( '/limits', expected_status=http.client.OK, auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id, ), ) limits = r.result['limits'] self.assertEqual(1, len(limits)) self.assertEqual(self.domain_id, limits[0]['domain_id']) r = self.get( '/limits', expected_status=http.client.OK, auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_2_id, ), ) limits = r.result['limits'] self.assertEqual(1, len(limits)) self.assertEqual(self.domain_2_id, limits[0]['domain_id']) # if non system scoped request contain domain_id filter, keystone # will return an empty list. r = self.get( '/limits?domain_id=%s' % self.domain_id, expected_status=http.client.OK, ) limits = r.result['limits'] self.assertEqual(0, len(limits)) # a system scoped request can specify the domain_id filter r = self.get( '/limits?domain_id=%s' % self.domain_id, expected_status=http.client.OK, auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], system=True, ), ) limits = r.result['limits'] self.assertEqual(1, len(limits)) self.assertEqual(self.domain_id, limits[0]['domain_id']) def test_show_project_limit(self): ref1 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id2, resource_name='snapshot', ) r = self.post( '/limits', body={'limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) if r.result['limits'][0]['resource_name'] == 'volume': id1 = r.result['limits'][0]['id'] else: id1 = r.result['limits'][1]['id'] self.get( '/limits/fake_id', token=self.system_admin_token, expected_status=http.client.NOT_FOUND, ) r = self.get('/limits/%s' % id1, expected_status=http.client.OK) limit = r.result['limit'] self.assertIsNone(limit['domain_id']) for key in [ 'service_id', 'region_id', 'resource_name', 'resource_limit', 'description', 'project_id', ]: self.assertEqual(limit[key], ref1[key]) def test_show_domain_limit(self): ref1 = unit.new_limit_ref( domain_id=self.domain_id, service_id=self.service_id2, resource_name='snapshot', ) r = self.post( '/limits', body={'limits': [ref1]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) id1 = r.result['limits'][0]['id'] r = self.get( '/limits/%s' % id1, expected_status=http.client.OK, auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id, ), ) limit = r.result['limit'] self.assertIsNone(limit['project_id']) self.assertIsNone(limit['region_id']) for key in [ 'service_id', 'resource_name', 'resource_limit', 'description', 'domain_id', ]: self.assertEqual(limit[key], ref1[key]) def test_delete_limit(self): ref1 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) ref2 = unit.new_limit_ref( project_id=self.project_id, service_id=self.service_id2, resource_name='snapshot', ) r = self.post( '/limits', body={'limits': [ref1, ref2]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) id1 = r.result['limits'][0]['id'] self.delete( '/limits/%s' % id1, token=self.system_admin_token, expected_status=http.client.NO_CONTENT, ) self.delete( '/limits/fake_id', token=self.system_admin_token, expected_status=http.client.NOT_FOUND, ) r = self.get( '/limits', token=self.system_admin_token, expected_status=http.client.OK, ) limits = r.result['limits'] self.assertEqual(len(limits), 1) class StrictTwoLevelLimitsTestCase(LimitsTestCase): def setUp(self): super().setUp() # Most of these tests require system-scoped tokens. Let's have one on # hand so that we can use it in tests when we need it. PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.role_id ) self.system_admin_token = self.get_system_scoped_token() # create two hierarchical projects trees for test. The first level is # domain. # A D # / \ / \ # B C E F domain_ref = {'domain': {'name': 'A', 'enabled': True}} response = self.post('/domains', body=domain_ref) self.domain_A = response.json_body['domain'] project_ref = { 'project': { 'name': 'B', 'enabled': True, 'domain_id': self.domain_A['id'], } } response = self.post('/projects', body=project_ref) self.project_B = response.json_body['project'] project_ref = { 'project': { 'name': 'C', 'enabled': True, 'domain_id': self.domain_A['id'], } } response = self.post('/projects', body=project_ref) self.project_C = response.json_body['project'] domain_ref = {'domain': {'name': 'D', 'enabled': True}} response = self.post('/domains', body=domain_ref) self.domain_D = response.json_body['domain'] project_ref = { 'project': { 'name': 'E', 'enabled': True, 'domain_id': self.domain_D['id'], } } response = self.post('/projects', body=project_ref) self.project_E = response.json_body['project'] project_ref = { 'project': { 'name': 'F', 'enabled': True, 'domain_id': self.domain_D['id'], } } response = self.post('/projects', body=project_ref) self.project_F = response.json_body['project'] def config_overrides(self): super().config_overrides() self.config_fixture.config( group='unified_limit', enforcement_model='strict_two_level' ) def test_create_child_limit(self): # when A is 20, success to create B to 15, C to 18. # A,20 A,20 # / \ --> / \ # B C B,15 C,18 ref = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=20, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=15, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=18, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) def test_create_child_limit_break_hierarchical_tree(self): # when A is 20, success to create B to 15, but fail to create C to 21. # A,20 A,20 # / \ --> / \ # B C B,15 C # # A,20 A,20 # / \ -/-> / \ # B,15 C B,15 C,21 ref = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=20, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=15, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=21, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_create_child_with_default_parent(self): # If A is not set, the default value is 10 (from registered limit). # success to create B to 5, but fail to create C to 11. # A(10) A(10) # / \ --> / \ # B C B,5 C # # A(10) A(10) # / \ -/-> / \ # B,5 C B,5 C,11 ref = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=5, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=11, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_create_parent_limit(self): # When B is 9 , success to set A to 12 # A A,12 # / \ --> / \ # B,9 C B,9 C ref = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=9, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=12, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) def test_create_parent_limit_break_hierarchical_tree(self): # When B is 9 , fail to set A to 8 # A A,8 # / \ -/-> / \ # B,9 C B,9 C ref = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=9, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=8, ) self.post( '/limits', body={'limits': [ref]}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_create_multi_limits(self): # success to create a tree in one request like: # A,12 D,9 # / \ / \ # B,9 C,5 E,5 F,4 ref_A = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=12, ) ref_B = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=9, ) ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=5, ) ref_D = unit.new_limit_ref( domain_id=self.domain_D['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=9, ) ref_E = unit.new_limit_ref( project_id=self.project_E['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=5, ) ref_F = unit.new_limit_ref( project_id=self.project_F['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=4, ) self.post( '/limits', body={'limits': [ref_A, ref_B, ref_C, ref_D, ref_E, ref_F]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) def test_create_multi_limits_invalid_input(self): # fail to create a tree in one request like: # A,12 D,9 # / \ / \ # B,9 C,5 E,5 F,10 # because F will break the second limit tree. ref_A = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=12, ) ref_B = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=9, ) ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=5, ) ref_D = unit.new_limit_ref( domain_id=self.domain_D['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=9, ) ref_E = unit.new_limit_ref( project_id=self.project_E['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=5, ) ref_F = unit.new_limit_ref( project_id=self.project_F['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=10, ) self.post( '/limits', body={'limits': [ref_A, ref_B, ref_C, ref_D, ref_E, ref_F]}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_create_multi_limits_break_hierarchical_tree(self): # when there is some hierarchical_trees already like: # A,12 D # / \ / \ # B,9 C E,5 F # fail to set C to 5 and D to 4 in one request like: # A,12 D,4 # / \ / \ # B,9 C,5 E,5 F # because D will break the second limit tree. ref_A = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=12, ) ref_B = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=9, ) ref_E = unit.new_limit_ref( project_id=self.project_E['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=5, ) self.post( '/limits', body={'limits': [ref_A, ref_B, ref_E]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=5, ) ref_D = unit.new_limit_ref( domain_id=self.domain_D['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=4, ) self.post( '/limits', body={'limits': [ref_C, ref_D]}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_update_child_limit(self): # Success to update C to 9 # A,10 A,10 # / \ --> / \ # B,6 C,7 B,6 C,9 ref_A = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=10, ) ref_B = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=6, ) ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=7, ) self.post( '/limits', body={'limits': [ref_A, ref_B]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) r = self.post( '/limits', body={'limits': [ref_C]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_dict = {'resource_limit': 9} self.patch( '/limits/%s' % r.result['limits'][0]['id'], body={'limit': update_dict}, token=self.system_admin_token, expected_status=http.client.OK, ) def test_update_child_limit_break_hierarchical_tree(self): # Fail to update C to 11 # A,10 A,10 # / \ -/-> / \ # B,6 C,7 B,6 C,11 ref_A = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=10, ) ref_B = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=6, ) ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=7, ) self.post( '/limits', body={'limits': [ref_A, ref_B]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) r = self.post( '/limits', body={'limits': [ref_C]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_dict = {'resource_limit': 11} self.patch( '/limits/%s' % r.result['limits'][0]['id'], body={'limit': update_dict}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_update_child_limit_with_default_parent(self): # If A is not set, the default value is 10 (from registered limit). # Success to update C to 9 but fail to update C to 11 # A,(10) A,(10) # / \ --> / \ # B, C,7 B C,9 # # A,(10) A,(10) # / \ -/-> / \ # B, C,7 B C,11 ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=7, ) r = self.post( '/limits', body={'limits': [ref_C]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_dict = {'resource_limit': 9} self.patch( '/limits/%s' % r.result['limits'][0]['id'], body={'limit': update_dict}, token=self.system_admin_token, expected_status=http.client.OK, ) update_dict = {'resource_limit': 11} self.patch( '/limits/%s' % r.result['limits'][0]['id'], body={'limit': update_dict}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) def test_update_parent_limit(self): # Success to update A to 8 # A,10 A,8 # / \ --> / \ # B,6 C,7 B,6 C,7 ref_A = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=10, ) ref_B = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=6, ) ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=7, ) r = self.post( '/limits', body={'limits': [ref_A]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) self.post( '/limits', body={'limits': [ref_B, ref_C]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_dict = {'resource_limit': 8} self.patch( '/limits/%s' % r.result['limits'][0]['id'], body={'limit': update_dict}, token=self.system_admin_token, expected_status=http.client.OK, ) def test_update_parent_limit_break_hierarchical_tree(self): # Fail to update A to 6 # A,10 A,6 # / \ -/-> / \ # B,6 C,7 B,6 C,7 ref_A = unit.new_limit_ref( domain_id=self.domain_A['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=10, ) ref_B = unit.new_limit_ref( project_id=self.project_B['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=6, ) ref_C = unit.new_limit_ref( project_id=self.project_C['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', resource_limit=7, ) r = self.post( '/limits', body={'limits': [ref_A]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) self.post( '/limits', body={'limits': [ref_B, ref_C]}, token=self.system_admin_token, expected_status=http.client.CREATED, ) update_dict = {'resource_limit': 6} self.patch( '/limits/%s' % r.result['limits'][0]['id'], body={'limit': update_dict}, token=self.system_admin_token, expected_status=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_middleware.py0000664000175000017500000007561600000000000023212 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import hashlib import http.client import typing as ty from unittest import mock import uuid import fixtures import webtest from keystone.auth import core as auth_core from keystone.common import authorization from keystone.common import context as keystone_context from keystone.common import provider_api from keystone.common import tokenless_auth import keystone.conf from keystone import exception from keystone.federation import constants as federation_constants from keystone.server.flask.request_processing.middleware import auth_context from keystone.tests import unit from keystone.tests.unit import mapping_fixtures from keystone.tests.unit import test_backend_sql CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class MiddlewareRequestTestBase(unit.TestCase): MIDDLEWARE_CLASS: ty.Any = None # override this in subclasses def _application(self): """A base wsgi application that returns a simple response.""" def app(environ, start_response): # WSGI requires the body of the response to be bytes body = uuid.uuid4().hex.encode('utf-8') resp_headers = [ ('Content-Type', 'text/html; charset=utf8'), ('Content-Length', str(len(body))), ] start_response('200 OK', resp_headers) return [body] return app def _generate_app_response( self, app, headers=None, method='get', path='/', **kwargs ): """Given a wsgi application wrap it in webtest and call it.""" return getattr(webtest.TestApp(app), method)( path, headers=headers or {}, **kwargs ) def _middleware_failure(self, exc, *args, **kwargs): """Assert that an exception is being thrown from process_request.""" # NOTE(jamielennox): This is a little ugly. We need to call the webtest # framework so that the correct RequestClass object is created for when # we call process_request. However because we go via webtest we only # see the response object and not the actual exception that is thrown # by process_request. To get around this we subclass process_request # with something that checks for the right type of exception being # thrown so we can test the middle of the request process. # TODO(jamielennox): Change these tests to test the value of the # response rather than the error that is raised. class _Failing(self.MIDDLEWARE_CLASS): _called = False def fill_context(i_self, *i_args, **i_kwargs): # i_ to distinguish it from and not clobber the outer vars e = self.assertRaises( exc, super().fill_context, *i_args, **i_kwargs ) i_self._called = True raise e # by default the returned status when an uncaught exception is raised # for validation or caught errors this will likely be 400 kwargs.setdefault('status', http.client.INTERNAL_SERVER_ERROR) # 500 app = _Failing(self._application()) resp = self._generate_app_response(app, *args, **kwargs) self.assertTrue(app._called) return resp def _do_middleware_response(self, *args, **kwargs): """Wrap a middleware around a sample application and call it.""" app = self.MIDDLEWARE_CLASS(self._application()) return self._generate_app_response(app, *args, **kwargs) def _do_middleware_request(self, *args, **kwargs): """The request object from a successful middleware call.""" return self._do_middleware_response(*args, **kwargs).request class AuthContextMiddlewareTest( test_backend_sql.SqlTests, MiddlewareRequestTestBase ): MIDDLEWARE_CLASS = auth_context.AuthContextMiddleware def setUp(self): super().setUp() self.client_issuer = uuid.uuid4().hex self.untrusted_client_issuer = uuid.uuid4().hex self.trusted_issuer = self.client_issuer self.config_fixture.config( group='tokenless_auth', trusted_issuer=[self.trusted_issuer] ) # client_issuer is encoded because you can't hash # unicode objects with hashlib. # This idp_id is calculated based on sha256(self.client_issuer) hashed_idp = hashlib.sha256(self.client_issuer.encode('utf-8')) self.idp_id = hashed_idp.hexdigest() self._load_sample_data() def _load_sample_data(self): self.protocol_id = 'x509' # 1) Create a domain for the user. self.domain = unit.new_domain_ref() self.domain_id = self.domain['id'] self.domain_name = self.domain['name'] PROVIDERS.resource_api.create_domain(self.domain_id, self.domain) # 2) Create a project for the user. self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] self.project_name = self.project['name'] PROVIDERS.resource_api.create_project(self.project_id, self.project) # 3) Create a user in new domain. self.user = unit.new_user_ref( domain_id=self.domain_id, project_id=self.project_id ) self.user = PROVIDERS.identity_api.create_user(self.user) # Add IDP self.idp = self._idp_ref(id=self.idp_id) PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) # Add a role self.role = unit.new_role_ref() self.role_id = self.role['id'] self.role_name = self.role['name'] PROVIDERS.role_api.create_role(self.role_id, self.role) # Add a group self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = PROVIDERS.identity_api.create_group(self.group) # Assign a role to the user on a project PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=self.user['id'], project_id=self.project_id, role_id=self.role_id, ) # Assign a role to the group on a project PROVIDERS.assignment_api.create_grant( role_id=self.role_id, group_id=self.group['id'], project_id=self.project_id, ) def _load_mapping_rules(self, rules): # Add a mapping self.mapping = self._mapping_ref(rules=rules) PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) # Add protocols self.proto_x509 = self._proto_ref(mapping_id=self.mapping['id']) self.proto_x509['id'] = self.protocol_id PROVIDERS.federation_api.create_protocol( self.idp['id'], self.proto_x509['id'], self.proto_x509 ) def _idp_ref(self, id=None): idp = { 'id': id or uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, } return idp def _proto_ref(self, mapping_id=None): proto = { 'id': uuid.uuid4().hex, 'mapping_id': mapping_id or uuid.uuid4().hex, } return proto def _mapping_ref(self, rules=None): if rules is None: mapped_rules = {} else: mapped_rules = rules.get('rules', {}) return { 'id': uuid.uuid4().hex, 'rules': mapped_rules, 'schema_version': "1.0", } def _assert_tokenless_auth_context(self, context, ephemeral_user=False): self.assertIsNotNone(context) self.assertEqual(self.project_id, context['project_id']) self.assertIn(self.role_name, context['roles']) if ephemeral_user: self.assertEqual(self.group['id'], context['group_ids'][0]) self.assertEqual( 'ephemeral', context[federation_constants.PROTOCOL] ) self.assertEqual( self.idp_id, context[federation_constants.IDENTITY_PROVIDER] ) else: self.assertEqual(self.user['id'], context['user_id']) def _assert_tokenless_request_context( self, request_context, ephemeral_user=False ): self.assertIsNotNone(request_context) self.assertEqual(self.project_id, request_context.project_id) self.assertIn(self.role_name, request_context.roles) if not ephemeral_user: self.assertEqual(self.user['id'], request_context.user_id) def test_context_already_exists(self): stub_value = uuid.uuid4().hex env = {authorization.AUTH_CONTEXT_ENV: stub_value} req = self._do_middleware_request(extra_environ=env) self.assertEqual( stub_value, req.environ.get(authorization.AUTH_CONTEXT_ENV) ) def test_not_applicable_to_token_request(self): req = self._do_middleware_request(path='/auth/tokens', method='post') context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_no_tokenless_attributes_request(self): req = self._do_middleware_request() context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_no_issuer_attribute_request(self): env = {} env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_has_only_issuer_and_project_name_request(self): env = {} # SSL_CLIENT_I_DN is the attribute name that wsgi env # references to issuer of the client certificate. env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_has_only_issuer_and_project_domain_name_request(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_has_only_issuer_and_project_domain_id_request(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_DOMAIN_ID'] = uuid.uuid4().hex self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_missing_both_domain_and_project_request(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_empty_trusted_issuer_list(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex self.config_fixture.config(group='tokenless_auth', trusted_issuer=[]) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_client_issuer_not_trusted(self): env = {} env['SSL_CLIENT_I_DN'] = self.untrusted_client_issuer env['HTTP_X_PROJECT_ID'] = uuid.uuid4().hex req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self.assertIsNone(context) def test_proj_scope_with_proj_id_and_proj_dom_id_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id # SSL_CLIENT_USER_NAME and SSL_CLIENT_DOMAIN_NAME are the types # defined in the mapping that will map to the user name and # domain name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME ) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_proj_scope_with_proj_id_only_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME ) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_proj_scope_with_proj_name_and_proj_dom_id_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME ) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_proj_scope_with_proj_name_and_proj_dom_name_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME ) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_proj_scope_with_proj_name_only_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME ) self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_mapping_with_userid_and_domainid_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_ID'] = self.user['id'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINID ) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_mapping_with_userid_and_domainname_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_ID'] = self.user['id'] env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERID_AND_DOMAINNAME ) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_mapping_with_username_and_domainid_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID ) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_only_domain_name_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules(mapping_fixtures.MAPPING_WITH_DOMAINNAME_ONLY) self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_only_domain_id_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self._load_mapping_rules(mapping_fixtures.MAPPING_WITH_DOMAINID_ONLY) self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_missing_domain_data_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_NAME'] = self.user['name'] self._load_mapping_rules(mapping_fixtures.MAPPING_WITH_USERNAME_ONLY) self._middleware_failure( exception.ValidationError, extra_environ=env, status=400 ) def test_userid_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_ID'] = self.user['id'] self._load_mapping_rules(mapping_fixtures.MAPPING_WITH_USERID_ONLY) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context(request_context) def test_domain_disable_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self.domain['enabled'] = False self.domain = PROVIDERS.resource_api.update_domain( self.domain['id'], self.domain ) self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID ) self._middleware_failure( exception.Unauthorized, extra_environ=env, status=401 ) def test_user_disable_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] env['SSL_CLIENT_DOMAIN_ID'] = self.domain_id self.user['enabled'] = False self.user = PROVIDERS.identity_api.update_user( self.user['id'], self.user ) self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINID ) self._middleware_failure(AssertionError, extra_environ=env) def test_invalid_user_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_ID'] = self.project_id env['HTTP_X_PROJECT_DOMAIN_ID'] = self.domain_id env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex env['SSL_CLIENT_DOMAIN_NAME'] = self.domain_name self._load_mapping_rules( mapping_fixtures.MAPPING_WITH_USERNAME_AND_DOMAINNAME ) self._middleware_failure( exception.UserNotFound, extra_environ=env, status=404 ) def test_ephemeral_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config( group='tokenless_auth', protocol='ephemeral' ) self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context, ephemeral_user=True) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context( request_context, ephemeral_user=True ) def test_ephemeral_and_group_domain_name_mapping_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config( group='tokenless_auth', protocol='ephemeral' ) self.protocol_id = 'ephemeral' mapping = copy.deepcopy( mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER_AND_GROUP_DOMAIN_NAME ) mapping['rules'][0]['local'][0]['group']['name'] = self.group['name'] mapping['rules'][0]['local'][0]['group']['domain']['name'] = ( self.domain['name'] ) self._load_mapping_rules(mapping) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context, ephemeral_user=True) def test_ephemeral_with_default_user_type_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config( group='tokenless_auth', protocol='ephemeral' ) self.protocol_id = 'ephemeral' # this mapping does not have the user type defined # and it should defaults to 'ephemeral' which is # the expected type for the test case. mapping = copy.deepcopy( mapping_fixtures.MAPPING_FOR_DEFAULT_EPHEMERAL_USER ) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context, ephemeral_user=True) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context( request_context, ephemeral_user=True ) def test_ephemeral_any_user_success(self): """Verify ephemeral user does not need a specified user. Keystone is not looking to match the user, but a corresponding group. """ env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = uuid.uuid4().hex self.config_fixture.config( group='tokenless_auth', protocol='ephemeral' ) self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) req = self._do_middleware_request(extra_environ=env) context = req.environ.get(authorization.AUTH_CONTEXT_ENV) self._assert_tokenless_auth_context(context, ephemeral_user=True) request_context = req.environ.get(keystone_context.REQUEST_CONTEXT_ENV) self._assert_tokenless_request_context( request_context, ephemeral_user=True ) def test_ephemeral_invalid_scope_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = uuid.uuid4().hex env['HTTP_X_PROJECT_DOMAIN_NAME'] = uuid.uuid4().hex env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config( group='tokenless_auth', protocol='ephemeral' ) self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = self.group['id'] self._load_mapping_rules(mapping) self._middleware_failure( exception.Unauthorized, extra_environ=env, status=401 ) def test_ephemeral_no_group_found_fail(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] self.config_fixture.config( group='tokenless_auth', protocol='ephemeral' ) self.protocol_id = 'ephemeral' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex self._load_mapping_rules(mapping) self._middleware_failure( exception.MappedGroupNotFound, extra_environ=env ) def test_ephemeral_incorrect_mapping_fail(self): """Test ephemeral user picking up the non-ephemeral user mapping. Looking up the mapping with protocol Id 'x509' will load up the non-ephemeral user mapping, results unauthenticated. """ env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer env['HTTP_X_PROJECT_NAME'] = self.project_name env['HTTP_X_PROJECT_DOMAIN_NAME'] = self.domain_name env['SSL_CLIENT_USER_NAME'] = self.user['name'] # This will pick up the incorrect mapping self.config_fixture.config(group='tokenless_auth', protocol='x509') self.protocol_id = 'x509' mapping = copy.deepcopy(mapping_fixtures.MAPPING_FOR_EPHEMERAL_USER) mapping['rules'][0]['local'][0]['group']['id'] = uuid.uuid4().hex self._load_mapping_rules(mapping) self._middleware_failure( exception.MappedGroupNotFound, extra_environ=env ) def test_create_idp_id_success(self): env = {} env['SSL_CLIENT_I_DN'] = self.client_issuer auth = tokenless_auth.TokenlessAuthHelper(env) idp_id = auth._build_idp_id() self.assertEqual(self.idp_id, idp_id) def test_create_idp_id_attri_not_found_fail(self): env = {} env[uuid.uuid4().hex] = self.client_issuer auth = tokenless_auth.TokenlessAuthHelper(env) expected_msg = ( 'Could not determine Identity Provider ID. The ' 'configuration option %s was not found in the ' 'request environment.' % CONF.tokenless_auth.issuer_attribute ) # Check the content of the exception message as well self.assertRaisesRegex( exception.TokenlessAuthConfigError, expected_msg, auth._build_idp_id, ) def test_admin_token_context(self): self.config_fixture.config(admin_token='ADMIN') log_fix = self.useFixture(fixtures.FakeLogger()) headers = {authorization.AUTH_TOKEN_HEADER: 'ADMIN'} req = self._do_middleware_request(headers=headers) self.assertTrue(req.environ[auth_context.CONTEXT_ENV]['is_admin']) self.assertNotIn('Invalid user token', log_fix.output) def test_request_non_admin(self): self.config_fixture.config(admin_token='ADMIN') log_fix = self.useFixture(fixtures.FakeLogger()) headers = {authorization.AUTH_TOKEN_HEADER: 'NOT-ADMIN'} self._do_middleware_request(headers=headers) self.assertIn('Invalid user token', log_fix.output) def test_token_is_cached(self): # Make sure we only call PROVIDERS.token_provider_api.validate_token() # once while in middleware so that we're mindful of performance context = auth_core.AuthContext( user_id=self.user['id'], methods=['password'] ) token = PROVIDERS.token_provider_api.issue_token( context['user_id'], context['methods'], project_id=self.project_id, auth_context=context, ) headers = {authorization.AUTH_TOKEN_HEADER: token.id.encode('utf-8')} with mock.patch.object( PROVIDERS.token_provider_api, 'validate_token', return_value=token ) as token_mock: self._do_middleware_request( path='/v3/projects', method='get', headers=headers ) token_mock.assert_called_once() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_policy.py0000664000175000017500000002375000000000000022364 0ustar00zuulzuul00000000000000# Copyright 2011 Piston Cloud Computing, Inc. # All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from unittest import mock import uuid from oslo_policy import policy as common_policy from keystone.common import policies from keystone.common.rbac_enforcer import policy import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile CONF = keystone.conf.CONF class PolicyFileTestCase(unit.TestCase): def setUp(self): # self.tmpfilename should exist before setUp super is called # this is to ensure it is available for the config_fixture in # the config_overrides call. self.tempfile = self.useFixture(temporaryfile.SecureTempFile()) self.tmpfilename = self.tempfile.file_name super().setUp() self.target = {} def _policy_fixture(self): return ksfixtures.Policy( self.config_fixture, policy_file=self.tmpfilename ) def test_modified_policy_reloads(self): action = "example:test" empty_credentials = {} with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": []}""") policy.enforce(empty_credentials, action, self.target) with open(self.tmpfilename, "w") as policyfile: policyfile.write("""{"example:test": ["false:false"]}""") policy._ENFORCER._enforcer.clear() self.assertRaises( exception.ForbiddenAction, policy.enforce, empty_credentials, action, self.target, ) class PolicyTestCase(unit.TestCase): def setUp(self): super().setUp() self.rules = { "true": [], "example:allowed": [], "example:denied": [["false:false"]], "example:get_http": [["http:http://www.example.com"]], "example:my_file": [ ["role:compute_admin"], ["project_id:%(project_id)s"], ], "example:early_and_fail": [["false:false", "rule:true"]], "example:early_or_success": [["rule:true"], ["false:false"]], "example:lowercase_admin": [["role:admin"], ["role:sysadmin"]], "example:uppercase_admin": [["role:ADMIN"], ["role:sysadmin"]], } # NOTE(vish): then overload underlying policy engine self._set_rules() self.credentials = {} self.target = {} def _set_rules(self): these_rules = common_policy.Rules.from_dict(self.rules) policy._ENFORCER._enforcer.set_rules(these_rules) def test_enforce_nonexistent_action_throws(self): action = "example:noexist" self.assertRaises( exception.ForbiddenAction, policy.enforce, self.credentials, action, self.target, ) def test_enforce_bad_action_throws(self): action = "example:denied" self.assertRaises( exception.ForbiddenAction, policy.enforce, self.credentials, action, self.target, ) def test_enforce_good_action(self): action = "example:allowed" policy.enforce(self.credentials, action, self.target) def test_templatized_enforcement(self): target_mine = {'project_id': 'fake'} target_not_mine = {'project_id': 'another'} credentials = {'project_id': 'fake', 'roles': []} action = "example:my_file" policy.enforce(credentials, action, target_mine) self.assertRaises( exception.ForbiddenAction, policy.enforce, credentials, action, target_not_mine, ) def test_early_AND_enforcement(self): action = "example:early_and_fail" self.assertRaises( exception.ForbiddenAction, policy.enforce, self.credentials, action, self.target, ) def test_early_OR_enforcement(self): action = "example:early_or_success" policy.enforce(self.credentials, action, self.target) def test_ignore_case_role_check(self): lowercase_action = "example:lowercase_admin" uppercase_action = "example:uppercase_admin" # NOTE(dprince): We mix case in the Admin role here to ensure # case is ignored admin_credentials = {'roles': ['AdMiN']} policy.enforce(admin_credentials, lowercase_action, self.target) policy.enforce(admin_credentials, uppercase_action, self.target) class PolicyScopeTypesEnforcementTestCase(unit.TestCase): def setUp(self): super().setUp() rule = common_policy.RuleDefault( name='foo', check_str='', scope_types=['system'] ) policy._ENFORCER._enforcer.register_default(rule) self.credentials = {} self.action = 'foo' self.target = {} def test_forbidden_is_raised_if_enforce_scope_is_true(self): self.config_fixture.config(group='oslo_policy', enforce_scope=True) self.assertRaises( exception.ForbiddenAction, policy.enforce, self.credentials, self.action, self.target, ) def test_warning_message_is_logged_if_enforce_scope_is_false(self): self.config_fixture.config(group='oslo_policy', enforce_scope=False) expected_msg = ( 'Policy "foo": "" failed scope check. The token used to make the ' 'request was project scoped but the policy requires [\'system\'] ' 'scope. This behavior may change in the future where using the ' 'intended scope is required' ) with mock.patch('warnings.warn') as mock_warn: policy.enforce(self.credentials, self.action, self.target) mock_warn.assert_called_with(expected_msg) class PolicyJsonTestCase(unit.TestCase): def _get_default_policy_rules(self): """Return a dictionary of all in-code policies. All policies have a default value that is maintained in code. This method returns a dictionary containing all default policies. """ rules = dict() for rule in policies.list_rules(): rules[rule.name] = rule.check_str return rules def test_policies_loads(self): action = 'identity:list_projects' target = { 'user_id': uuid.uuid4().hex, 'user.domain_id': uuid.uuid4().hex, 'group.domain_id': uuid.uuid4().hex, 'project.domain_id': uuid.uuid4().hex, 'project_id': uuid.uuid4().hex, 'domain_id': uuid.uuid4().hex, } credentials = { 'username': uuid.uuid4().hex, 'token': uuid.uuid4().hex, 'project_name': None, 'user_id': uuid.uuid4().hex, 'roles': ['admin'], 'is_admin': True, 'is_admin_project': True, 'project_id': None, 'domain_id': uuid.uuid4().hex, } # The enforcer is setup behind the scenes and registers the in code # default policies. result = policy._ENFORCER._enforcer.enforce( action, target, credentials ) self.assertTrue(result) def test_all_targets_documented(self): policy_keys = self._get_default_policy_rules() # These keys are in the policy.yaml but aren't targets. policy_rule_keys = [ 'admin_or_owner', 'admin_or_token_subject', 'admin_required', 'owner', 'service_admin_or_token_subject', 'service_or_admin', 'service_role', 'token_subject', 'domain_managed_target_role', ] def read_doc_targets(): # Parse the doc/source/policy_mapping.rst file and return the # targets. doc_path = os.path.join( unit.ROOTDIR, 'doc', 'source', 'getting-started', 'policy_mapping.rst', ) with open(doc_path) as doc_file: for line in doc_file: if line.startswith('Target'): break for line in doc_file: # Skip === line if line.startswith('==='): break for line in doc_file: line = line.rstrip() if not line or line.startswith(' '): continue if line.startswith('=='): break target, dummy, dummy = line.partition(' ') yield str(target) doc_targets = list(read_doc_targets()) self.assertCountEqual(policy_keys, doc_targets + policy_rule_keys) class GeneratePolicyFileTestCase(unit.TestCase): def test_policy_generator_from_command_line(self): # This test ensures keystone.common.policy:get_enforcer ignores # unexpected arguments before handing them off to oslo.config, which # will fail and prevent users from generating policy files. ret_val = subprocess.Popen( ['oslopolicy-policy-generator', '--namespace', 'keystone'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) output = ret_val.communicate() self.assertEqual(ret_val.returncode, 0, output) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_receipt_provider.py0000664000175000017500000000557600000000000024440 0ustar00zuulzuul00000000000000# Copyright 2018 Catalyst Cloud Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import freezegun from oslo_utils import timeutils from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.models import receipt_model from keystone import receipt from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs DELTA = datetime.timedelta(seconds=CONF.receipt.expiration) CURRENT_DATE = timeutils.utcnow() class TestReceiptProvider(unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_receipts', CONF.fernet_receipts.max_active_keys, ) ) self.load_backends() def test_unsupported_receipt_provider(self): self.config_fixture.config(group='receipt', provider='MyProvider') self.assertRaises(ImportError, receipt.provider.Manager) def test_provider_receipt_expiration_validation(self): receipt = receipt_model.ReceiptModel() receipt.issued_at = utils.isotime(CURRENT_DATE) receipt.expires_at = utils.isotime(CURRENT_DATE - DELTA) receipt.id = uuid.uuid4().hex with freezegun.freeze_time(CURRENT_DATE): self.assertRaises( exception.ReceiptNotFound, PROVIDERS.receipt_provider_api._is_valid_receipt, receipt, ) # confirm a non-expired receipt doesn't throw errors. # returning None, rather than throwing an error is correct. receipt = receipt_model.ReceiptModel() receipt.issued_at = utils.isotime(CURRENT_DATE) receipt.expires_at = utils.isotime(CURRENT_DATE + DELTA) receipt.id = uuid.uuid4().hex with freezegun.freeze_time(CURRENT_DATE): self.assertIsNone( PROVIDERS.receipt_provider_api._is_valid_receipt(receipt) ) def test_validate_v3_none_receipt_raises_receipt_not_found(self): self.assertRaises( exception.ReceiptNotFound, PROVIDERS.receipt_provider_api.validate_receipt, None, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_revoke.py0000664000175000017500000005046600000000000022364 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock import uuid from oslo_utils import timeutils from testtools import matchers from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.models import revoke_model from keystone.revoke.backends import sql from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit import test_backend_sql from keystone.token import provider CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def _future_time(): expire_delta = datetime.timedelta(seconds=1000) future_time = timeutils.utcnow() + expire_delta return future_time def _sample_blank_token(): issued_delta = datetime.timedelta(minutes=-2) issued_at = timeutils.utcnow() + issued_delta token_data = revoke_model.blank_token_data(issued_at) return token_data class RevokeTests: def _assertTokenRevoked(self, token_data): self.assertRaises( exception.TokenNotFound, PROVIDERS.revoke_api.check_token, token=token_data, ) def _assertTokenNotRevoked(self, token_data): self.assertIsNone(PROVIDERS.revoke_api.check_token(token_data)) def test_list(self): PROVIDERS.revoke_api.revoke_by_user(user_id=1) self.assertEqual(1, len(PROVIDERS.revoke_api.list_events())) PROVIDERS.revoke_api.revoke_by_user(user_id=2) self.assertEqual(2, len(PROVIDERS.revoke_api.list_events())) def test_list_since(self): PROVIDERS.revoke_api.revoke_by_user(user_id=1) PROVIDERS.revoke_api.revoke_by_user(user_id=2) past = timeutils.utcnow() - datetime.timedelta(seconds=1000) self.assertEqual( 2, len(PROVIDERS.revoke_api.list_events(last_fetch=past)) ) future = timeutils.utcnow() + datetime.timedelta(seconds=1000) self.assertEqual( 0, len(PROVIDERS.revoke_api.list_events(last_fetch=future)) ) def test_list_revoked_user(self): revocation_backend = sql.Revoke() # This simulates creating a token for a specific user. When we revoke # the token we should have a single revocation event in the list. We # are going to assert that the token values match the only revocation # event in the backend. first_token = _sample_blank_token() first_token['user_id'] = uuid.uuid4().hex PROVIDERS.revoke_api.revoke_by_user(user_id=first_token['user_id']) self._assertTokenRevoked(first_token) self.assertEqual( 1, len(revocation_backend.list_events(token=first_token)) ) # This simulates creating a separate token for a separate user. We are # going to revoke the token just like we did for the previous token. # We should have two revocation events stored in the backend but only # one should match the values of the second token. second_token = _sample_blank_token() second_token['user_id'] = uuid.uuid4().hex PROVIDERS.revoke_api.revoke_by_user(user_id=second_token['user_id']) self._assertTokenRevoked(second_token) self.assertEqual( 1, len(revocation_backend.list_events(token=second_token)) ) # This simulates creating another separate token for a separate user, # but we're not going to issue a revocation event. Even though we have # two revocation events persisted in the backend, neither of them # should match the values of the third token. If they did - our # revocation event matching would be too heavy handed, which would # result in over-generalized revocation patterns. third_token = _sample_blank_token() third_token['user_id'] = uuid.uuid4().hex self._assertTokenNotRevoked(third_token) self.assertEqual( 0, len(revocation_backend.list_events(token=third_token)) ) # This gets a token but overrides the user_id of the token to be None. # Technically this should never happen because tokens must belong to # a user. What we're testing here is that the two revocation events # we've created won't match None values for the user_id. fourth_token = _sample_blank_token() fourth_token['user_id'] = None self._assertTokenNotRevoked(fourth_token) self.assertEqual( 0, len(revocation_backend.list_events(token=fourth_token)) ) def test_list_revoked_project(self): revocation_backend = sql.Revoke() token = _sample_blank_token() # Create a token for a project, revoke token, check the token we # created has been revoked, and check the list returned a match for # the token when passed in. first_token = _sample_blank_token() first_token['project_id'] = uuid.uuid4().hex revocation_backend.revoke( revoke_model.RevokeEvent(project_id=first_token['project_id']) ) self._assertTokenRevoked(first_token) self.assertEqual( 1, len(revocation_backend.list_events(token=first_token)) ) # Create a second token, revoke it, check the token has been revoked, # and check the list to make sure that even though we now have 2 # revoked events in the revocation list, it will only return 1 because # only one match for our second_token should exist second_token = _sample_blank_token() second_token['project_id'] = uuid.uuid4().hex revocation_backend.revoke( revoke_model.RevokeEvent(project_id=second_token['project_id']) ) self._assertTokenRevoked(second_token) self.assertEqual( 1, len(revocation_backend.list_events(token=second_token)) ) # This gets a token but overrides project_id of the token to be None. # We expect that since there are two events which both have populated # project_ids, this should not match this third_token with any other # event in the list so we should receive 0. third_token = _sample_blank_token() third_token['project_id'] = None self._assertTokenNotRevoked(token) self.assertEqual(0, len(revocation_backend.list_events(token=token))) def test_list_revoked_audit(self): revocation_backend = sql.Revoke() # Create a token with audit_id set, revoke it, check it is revoked, # check to make sure that list_events matches the token to the event we # just revoked. first_token = _sample_blank_token() first_token['audit_id'] = provider.random_urlsafe_str() PROVIDERS.revoke_api.revoke_by_audit_id( audit_id=first_token['audit_id'] ) self._assertTokenRevoked(first_token) self.assertEqual( 1, len(revocation_backend.list_events(token=first_token)) ) # Create a second token, revoke it, check it is revoked, check to make # sure that list events only finds 1 match since there are 2 and they # dont both have different populated audit_id fields second_token = _sample_blank_token() second_token['audit_id'] = provider.random_urlsafe_str() PROVIDERS.revoke_api.revoke_by_audit_id( audit_id=second_token['audit_id'] ) self._assertTokenRevoked(second_token) self.assertEqual( 1, len(revocation_backend.list_events(token=second_token)) ) # Create a third token with audit_id set to None to make sure that # since there are no events currently revoked with audit_id None this # finds no matches third_token = _sample_blank_token() third_token['audit_id'] = None self._assertTokenNotRevoked(third_token) self.assertEqual( 0, len(revocation_backend.list_events(token=third_token)) ) def test_list_revoked_since(self): revocation_backend = sql.Revoke() token = _sample_blank_token() PROVIDERS.revoke_api.revoke_by_user(user_id=None) PROVIDERS.revoke_api.revoke_by_user(user_id=None) self.assertEqual(2, len(revocation_backend.list_events(token=token))) future = timeutils.utcnow() + datetime.timedelta(seconds=1000) token['issued_at'] = future self.assertEqual(0, len(revocation_backend.list_events(token=token))) def test_list_revoked_multiple_filters(self): revocation_backend = sql.Revoke() # create token that sets key/value filters in list_revoked first_token = _sample_blank_token() first_token['user_id'] = uuid.uuid4().hex first_token['project_id'] = uuid.uuid4().hex first_token['audit_id'] = provider.random_urlsafe_str() # revoke event and then verify that there is only one revocation # and verify the only revoked event is the token PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent( user_id=first_token['user_id'], project_id=first_token['project_id'], audit_id=first_token['audit_id'], ) ) self._assertTokenRevoked(first_token) self.assertEqual( 1, len(revocation_backend.list_events(token=first_token)) ) # If a token has None values which the event contains it shouldn't # match and not be revoked second_token = _sample_blank_token() self._assertTokenNotRevoked(second_token) self.assertEqual( 0, len(revocation_backend.list_events(token=second_token)) ) # If an event column and corresponding dict value don't match, Then # it should not add the event in the list. Demonstrate for project third_token = _sample_blank_token() third_token['project_id'] = uuid.uuid4().hex self._assertTokenNotRevoked(third_token) self.assertEqual( 0, len(revocation_backend.list_events(token=third_token)) ) # A revoked event with user_id as null and token user_id non null # should still be return an event and be revoked if other non null # event fields match non null token fields fourth_token = _sample_blank_token() fourth_token['user_id'] = uuid.uuid4().hex fourth_token['project_id'] = uuid.uuid4().hex fourth_token['audit_id'] = provider.random_urlsafe_str() PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent( project_id=fourth_token['project_id'], audit_id=fourth_token['audit_id'], ) ) self._assertTokenRevoked(fourth_token) self.assertEqual( 1, len(revocation_backend.list_events(token=fourth_token)) ) def _user_field_test(self, field_name): token = _sample_blank_token() token[field_name] = uuid.uuid4().hex PROVIDERS.revoke_api.revoke_by_user(user_id=token[field_name]) self._assertTokenRevoked(token) token2 = _sample_blank_token() token2[field_name] = uuid.uuid4().hex self._assertTokenNotRevoked(token2) def test_revoke_by_user(self): self._user_field_test('user_id') def test_revoke_by_user_matches_trustee(self): self._user_field_test('trustee_id') def test_revoke_by_user_matches_trustor(self): self._user_field_test('trustor_id') def test_by_domain_user(self): revocation_backend = sql.Revoke() # If revoke a domain, then a token for a user in the domain is revoked user_id = uuid.uuid4().hex domain_id = uuid.uuid4().hex token_data = _sample_blank_token() token_data['user_id'] = user_id token_data['identity_domain_id'] = domain_id self._assertTokenNotRevoked(token_data) self.assertEqual( 0, len(revocation_backend.list_events(token=token_data)) ) PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent(domain_id=domain_id) ) self._assertTokenRevoked(token_data) self.assertEqual( 1, len(revocation_backend.list_events(token=token_data)) ) def test_by_domain_project(self): revocation_backend = sql.Revoke() token_data = _sample_blank_token() token_data['user_id'] = uuid.uuid4().hex token_data['identity_domain_id'] = uuid.uuid4().hex token_data['project_id'] = uuid.uuid4().hex token_data['assignment_domain_id'] = uuid.uuid4().hex self._assertTokenNotRevoked(token_data) self.assertEqual( 0, len(revocation_backend.list_events(token=token_data)) ) # If revoke a domain, then a token scoped to a project in the domain # is revoked. PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent( domain_id=token_data['assignment_domain_id'] ) ) self._assertTokenRevoked(token_data) self.assertEqual( 1, len(revocation_backend.list_events(token=token_data)) ) def test_by_domain_domain(self): revocation_backend = sql.Revoke() token_data = _sample_blank_token() token_data['user_id'] = uuid.uuid4().hex token_data['identity_domain_id'] = uuid.uuid4().hex token_data['assignment_domain_id'] = uuid.uuid4().hex self._assertTokenNotRevoked(token_data) self.assertEqual( 0, len(revocation_backend.list_events(token=token_data)) ) # If revoke a domain, then a token scoped to the domain is revoked. PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent( domain_id=token_data['assignment_domain_id'] ) ) self._assertTokenRevoked(token_data) self.assertEqual( 1, len(revocation_backend.list_events(token=token_data)) ) def test_revoke_by_audit_id(self): token = _sample_blank_token() # Audit ID and Audit Chain ID are populated with the same value # if the token is an original token token['audit_id'] = uuid.uuid4().hex token['audit_chain_id'] = token['audit_id'] PROVIDERS.revoke_api.revoke_by_audit_id(audit_id=token['audit_id']) self._assertTokenRevoked(token) token2 = _sample_blank_token() token2['audit_id'] = uuid.uuid4().hex token2['audit_chain_id'] = token2['audit_id'] self._assertTokenNotRevoked(token2) def test_revoke_by_audit_chain_id(self): revocation_backend = sql.Revoke() # Create our first token with audit_id audit_id = provider.random_urlsafe_str() token = _sample_blank_token() # Audit ID and Audit Chain ID are populated with the same value # if the token is an original token token['audit_id'] = audit_id token['audit_chain_id'] = audit_id # Check that the token is not revoked self._assertTokenNotRevoked(token) self.assertEqual(0, len(revocation_backend.list_events(token=token))) # Revoked token by audit chain id using the audit_id PROVIDERS.revoke_api.revoke_by_audit_chain_id(audit_id) # Check that the token is now revoked self._assertTokenRevoked(token) self.assertEqual(1, len(revocation_backend.list_events(token=token))) @mock.patch.object(timeutils, 'utcnow') def test_expired_events_are_removed(self, mock_utcnow): def _sample_token_values(): token = _sample_blank_token() token['expires_at'] = utils.isotime(_future_time(), subsecond=True) return token # We can not use timeutils.utcnow directly since we mocked it, so use # datetime to get pretty much the same. now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) now_plus_2h = now + datetime.timedelta(hours=2) mock_utcnow.return_value = now # Build a token and validate it. This will seed the cache for the # future 'synchronize' call. token_values = _sample_token_values() audit_chain_id = uuid.uuid4().hex PROVIDERS.revoke_api.revoke_by_audit_chain_id(audit_chain_id) token_values['audit_chain_id'] = audit_chain_id self.assertRaises( exception.TokenNotFound, PROVIDERS.revoke_api.check_token, token_values, ) # Move our clock forward by 2h, build a new token and validate it. # 'synchronize' should now be exercised and remove old expired events mock_utcnow.return_value = now_plus_2h PROVIDERS.revoke_api.revoke_by_audit_chain_id(audit_chain_id) # two hours later, it should still be not found self.assertRaises( exception.TokenNotFound, PROVIDERS.revoke_api.check_token, token_values, ) def test_delete_group_without_role_does_not_revoke_users(self): revocation_backend = sql.Revoke() domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Create two groups. Group1 will be used to test deleting a group, # without role assignments and users in the group, doesn't create # revoked events. Group2 will show that deleting a group with role # assignment and users in the group does create revoked events group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domain['id']) group2 = PROVIDERS.identity_api.create_group(group2) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) user1 = unit.new_user_ref(domain_id=domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) user2 = unit.new_user_ref(domain_id=domain['id']) user2 = PROVIDERS.identity_api.create_user(user2) # Add two users to the group, verify they are added, delete group, and # check that the revocaiton events have not been created PROVIDERS.identity_api.add_user_to_group( user_id=user1['id'], group_id=group1['id'] ) PROVIDERS.identity_api.add_user_to_group( user_id=user2['id'], group_id=group1['id'] ) self.assertEqual( 2, len(PROVIDERS.identity_api.list_users_in_group(group1['id'])) ) PROVIDERS.identity_api.delete_group(group1['id']) self.assertEqual(0, len(revocation_backend.list_events())) # Assign a role to the group, add two users to the group, verify that # the role has been assigned to the group, verify the users have been # added to the group, delete the group, check that the revocation # events have been created PROVIDERS.assignment_api.create_grant( group_id=group2['id'], domain_id=domain['id'], role_id=role['id'] ) grants = PROVIDERS.assignment_api.list_role_assignments( role_id=role['id'] ) self.assertThat(grants, matchers.HasLength(1)) PROVIDERS.identity_api.add_user_to_group( user_id=user1['id'], group_id=group2['id'] ) PROVIDERS.identity_api.add_user_to_group( user_id=user2['id'], group_id=group2['id'] ) self.assertEqual( 2, len(PROVIDERS.identity_api.list_users_in_group(group2['id'])) ) PROVIDERS.identity_api.delete_group(group2['id']) self.assertEqual(2, len(revocation_backend.list_events())) class FernetSqlRevokeTests(test_backend_sql.SqlTests, RevokeTests): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='token', provider='fernet', revoke_by_id=False ) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_shadow_users.py0000664000175000017500000001672200000000000023574 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from keystone.common import provider_api from keystone import exception from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.identity.shadow_users import test_backend from keystone.tests.unit.identity.shadow_users import test_core from keystone.tests.unit.ksfixtures import database PROVIDERS = provider_api.ProviderAPIs class ShadowUsersTests( unit.TestCase, test_backend.ShadowUsersBackendTests, test_core.ShadowUsersCoreTests, ): def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) self.idp = { 'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, } self.mapping = { 'id': uuid.uuid4().hex, } self.protocol = { 'id': uuid.uuid4().hex, 'idp_id': self.idp['id'], 'mapping_id': self.mapping['id'], } self.federated_user = { 'idp_id': self.idp['id'], 'protocol_id': self.protocol['id'], 'unique_id': uuid.uuid4().hex, 'display_name': uuid.uuid4().hex, } self.email = uuid.uuid4().hex PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) PROVIDERS.federation_api.create_protocol( self.idp['id'], self.protocol['id'], self.protocol ) self.domain_id = PROVIDERS.federation_api.get_idp(self.idp['id'])[ 'domain_id' ] class TestUserWithFederatedUser(ShadowUsersTests): def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() def assertFederatedDictsEqual(self, fed_dict, fed_object): self.assertEqual(fed_dict['idp_id'], fed_object['idp_id']) self.assertEqual( fed_dict['protocol_id'], fed_object['protocols'][0]['protocol_id'] ) self.assertEqual( fed_dict['unique_id'], fed_object['protocols'][0]['unique_id'] ) def test_get_user_when_user_has_federated_object(self): fed_dict = unit.new_federated_user_ref( idp_id=self.idp['id'], protocol_id=self.protocol['id'] ) user = self.shadow_users_api.create_federated_user( self.domain_id, fed_dict ) # test that the user returns a federated object and that there is only # one returned user_ref = self.identity_api.get_user(user['id']) self.assertIn('federated', user_ref) self.assertEqual(1, len(user_ref['federated'])) self.assertFederatedDictsEqual(fed_dict, user_ref['federated'][0]) def test_create_user_with_invalid_idp_and_protocol_fails(self): baduser = unit.new_user_ref(domain_id=self.domain_id) baduser['federated'] = [ { 'idp_id': 'fakeidp', 'protocols': [ {'protocol_id': 'nonexistent', 'unique_id': 'unknown'} ], } ] # Check validation works by throwing a federated object with # invalid idp_id, protocol_id inside the user passed to create_user. self.assertRaises( exception.ValidationError, self.identity_api.create_user, baduser ) baduser['federated'][0]['idp_id'] = self.idp['id'] self.assertRaises( exception.ValidationError, self.identity_api.create_user, baduser ) def test_create_user_with_federated_attributes(self): # Create the schema of a federated attribute being passed in with a # user. user = unit.new_user_ref(domain_id=self.domain_id) unique_id = uuid.uuid4().hex user['federated'] = [ { 'idp_id': self.idp['id'], 'protocols': [ { 'protocol_id': self.protocol['id'], 'unique_id': unique_id, } ], } ] # Test that there are no current federated_users that match our users # federated object and create the user self.assertRaises( exception.UserNotFound, self.shadow_users_api.get_federated_user, self.idp['id'], self.protocol['id'], unique_id, ) ref = self.identity_api.create_user(user) # Test that the user and federated object now exists self.assertEqual(user['name'], ref['name']) self.assertEqual(user['federated'], ref['federated']) fed_user = self.shadow_users_api.get_federated_user( self.idp['id'], self.protocol['id'], unique_id ) self.assertIsNotNone(fed_user) def test_update_user_with_invalid_idp_and_protocol_fails(self): baduser = unit.new_user_ref(domain_id=self.domain_id) baduser['federated'] = [ { 'idp_id': 'fakeidp', 'protocols': [ {'protocol_id': 'nonexistent', 'unique_id': 'unknown'} ], } ] # Check validation works by throwing a federated object with # invalid idp_id, protocol_id inside the user passed to create_user. self.assertRaises( exception.ValidationError, self.identity_api.create_user, baduser ) baduser['federated'][0]['idp_id'] = self.idp['id'] self.assertRaises( exception.ValidationError, self.identity_api.create_user, baduser ) def test_update_user_with_federated_attributes(self): user = self.shadow_users_api.create_federated_user( self.domain_id, self.federated_user ) user = self.identity_api.get_user(user['id']) # Test that update user can return a federated object with the user as # a response if the user has any user = self.identity_api.update_user(user['id'], user) self.assertFederatedDictsEqual( self.federated_user, user['federated'][0] ) # Test that update user can replace a users federated objects if added # in the request and that its response is that new federated objects new_fed = [ { 'idp_id': self.idp['id'], 'protocols': [ { 'protocol_id': self.protocol['id'], 'unique_id': uuid.uuid4().hex, } ], } ] user['federated'] = new_fed user = self.identity_api.update_user(user['id'], user) self.assertTrue('federated' in user) self.assertEqual(len(user['federated']), 1) self.assertEqual(user['federated'][0], new_fed[0]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_sql_banned_operations.py0000664000175000017500000003345400000000000025440 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import typing as ty from alembic import command as alembic_api from alembic import script as alembic_script import fixtures from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures from oslo_log import log as logging import sqlalchemy # We need to import all of these so the tables are registered. It would be # easier if these were all in a central location :( import keystone.application_credential.backends.sql # noqa: F401 import keystone.assignment.backends.sql # noqa: F401 import keystone.assignment.role_backends.sql_model # noqa: F401 import keystone.catalog.backends.sql # noqa: F401 from keystone.common import sql from keystone.common.sql import upgrades import keystone.conf import keystone.credential.backends.sql # noqa: F401 import keystone.endpoint_policy.backends.sql # noqa: F401 import keystone.federation.backends.sql # noqa: F401 import keystone.identity.backends.sql_model # noqa: F401 import keystone.identity.mapping_backends.sql # noqa: F401 import keystone.limit.backends.sql # noqa: F401 import keystone.oauth1.backends.sql # noqa: F401 import keystone.policy.backends.sql # noqa: F401 import keystone.resource.backends.sql_model # noqa: F401 import keystone.resource.config_backends.sql # noqa: F401 import keystone.revoke.backends.sql # noqa: F401 from keystone.tests import unit import keystone.trust.backends.sql # noqa: F401 LOG = logging.getLogger(__name__) class DBOperationNotAllowed(Exception): pass class BannedDBSchemaOperations(fixtures.Fixture): """Ban some operations for migrations.""" def __init__(self, banned_ops, revision): super().__init__() self._banned_ops = banned_ops or {} self._revision = revision @staticmethod def _explode(op, revision): def fail(*a, **kw): msg = "Operation '%s' is not allowed in migration %s" raise DBOperationNotAllowed(msg % (op, revision)) return fail def setUp(self): super().setUp() for op in self._banned_ops: self.useFixture( fixtures.MonkeyPatch( 'alembic.op.%s' % op, self._explode(op, self._revision), ) ) class KeystoneMigrationsWalk( test_fixtures.OpportunisticDBTestMixin, ): # Migrations can take a long time, particularly on underpowered CI nodes. # Give them some breathing room. TIMEOUT_SCALING_FACTOR = 4 BANNED_OPS = { 'expand': [ 'alter_column', 'drop_column', 'drop_constraint', 'drop_index', 'drop_table', 'drop_table_comment', # 'execute', 'rename_table', ], 'contract': { 'add_column', 'bulk_insert', 'create_check_constraint', 'create_exclude_constraint', 'create_foreign_key', 'create_index', 'create_primary_key', 'create_table', 'create_table_comment', 'create_unique_constraint', # 'execute', 'rename_table', }, } BANNED_OP_EXCEPTIONS: list[ty.Any] = [ # NOTE(xek, henry-nash): Reviewers: DO NOT ALLOW THINGS TO BE ADDED # HERE UNLESS JUSTIFICATION CAN BE PROVIDED AS TO WHY THIS WILL NOT # CAUSE PROBLEMS FOR ROLLING UPGRADES. ] def setUp(self): super().setUp() self.engine = enginefacade.writer.get_engine() self.config = upgrades._find_alembic_conf() self.init_version = upgrades.ALEMBIC_INIT_VERSION # TODO(stephenfin): Do we need this? I suspect not since we're using # enginefacade.write.get_engine() directly above # Override keystone's context manager to be oslo.db's global context # manager. sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True self.addCleanup( setattr, sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False ) self.addCleanup(sql.cleanup) def _migrate_up(self, connection, revision): version = revision.revision if version == self.init_version: # no tests for the initial revision alembic_api.upgrade(self.config, version) return self.assertIsNotNone( getattr(self, '_check_%s' % version, None), ('DB Migration %s does not have a test; you must add one') % version, ) pre_upgrade = getattr(self, '_pre_upgrade_%s' % version, None) if pre_upgrade: pre_upgrade(connection) banned_ops = [] if version not in self.BANNED_OP_EXCEPTIONS: # there should only ever be one label, but this is safer for branch_label in revision.branch_labels: banned_ops.extend(self.BANNED_OPS[branch_label]) # SQLite migrations are running in batch mode, which mean we recreate a # table in all migrations. As such, we can't really blacklist things so # don't even try. if self.FIXTURE.DRIVER == 'sqlite': banned_ops = [] with BannedDBSchemaOperations(banned_ops, version): alembic_api.upgrade(self.config, version) post_upgrade = getattr(self, '_check_%s' % version, None) if post_upgrade: post_upgrade(connection) def _pre_upgrade_e25ffa003242(self, connection): """This is a no-op migration.""" pass def _check_e25ffa003242(self, connection): """This is a no-op migration.""" pass def _pre_upgrade_29e87d24a316(self, connection): """This is a no-op migration.""" pass def _check_29e87d24a316(self, connection): """This is a no-op migration.""" pass # 2023.2 bobcat _99de3849d860_removed_constraints = { 'access_rule': 'access_rule_external_id_key', 'trust': 'duplicate_trust_constraint_expanded', } def _pre_upgrade_99de3849d860(self, connection): inspector = sqlalchemy.inspect(connection) for ( table, constraint, ) in self._99de3849d860_removed_constraints.items(): constraints = [ x['name'] for x in inspector.get_unique_constraints(table) ] self.assertIn(constraint, constraints) def _check_99de3849d860(self, connection): inspector = sqlalchemy.inspect(connection) for ( table, constraint, ) in self._99de3849d860_removed_constraints.items(): constraints = [ x['name'] for x in inspector.get_unique_constraints(table) ] self.assertNotIn(constraint, constraints) def _pre_upgrade_b4f8b3f584e0(self, connection): inspector = sqlalchemy.inspect(connection) constraints = inspector.get_unique_constraints('trust') self.assertNotIn( 'duplicate_trust_constraint', {x['name'] for x in constraints}, ) all_constraints = [] for c in constraints: all_constraints + c.get('column_names', []) not_allowed_constraints = [ 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', ] for not_c in not_allowed_constraints: self.assertNotIn(not_c, all_constraints) def _check_b4f8b3f584e0(self, connection): inspector = sqlalchemy.inspect(connection) constraints = inspector.get_unique_constraints('trust') self.assertIn( 'duplicate_trust_constraint', {x['name'] for x in constraints}, ) constraint = [ x for x in constraints if x['name'] == 'duplicate_trust_constraint' ][0] self.assertEqual( [ 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', ], constraint['column_names'], ) def _pre_upgrade_c88cdce8f248(self, connection): if connection.engine.name != 'mysql': return # NOTE(stephenfin): Even though the migration is written to handle # names generated by both alembic and sqlalchemy-migrate, we only check # for the former here since we don't apply sqlalchemy-migrate # migrations anymore inspector = sqlalchemy.inspect(connection) indexes = inspector.get_indexes('project_tag') self.assertIn('project_id', {x['name'] for x in indexes}) def _check_c88cdce8f248(self, connection): # This migration only applies to MySQL if connection.engine.name != 'mysql': return inspector = sqlalchemy.inspect(connection) indexes = inspector.get_indexes('project_tag') self.assertNotIn('project_id', {x['name'] for x in indexes}) def _pre_upgrade_11c3b243b4cb(self, connection): inspector = sqlalchemy.inspect(connection) columns = inspector.get_columns('service_provider') found = False for column in columns: if column['name'] != 'relay_state_prefix': continue # The default should initially be set to the CONF value self.assertIsNotNone(column['default']) found = True self.assertTrue(found, 'Failed to find column') def _check_11c3b243b4cb(self, connection): inspector = sqlalchemy.inspect(connection) columns = inspector.get_columns('service_provider') found = False for column in columns: if column['name'] != 'relay_state_prefix': continue # The default should now be unset self.assertIsNone(column['default']) found = True self.assertTrue(found, 'Failed to find column') def _pre_upgrade_47147121(self, connection): inspector = sqlalchemy.inspect(connection) columns = inspector.get_columns('mapping') all_column_names = [] for c in columns: all_column_names.append(c.get('name')) self.assertNotIn('schema_version', all_column_names) def _check_47147121(self, connection): inspector = sqlalchemy.inspect(connection) columns = inspector.get_columns('mapping') all_column_names = [] for c in columns: all_column_names.append(c.get('name')) self.assertIn('schema_version', all_column_names) def test_single_base_revision(self): """Ensure we only have a single base revision. There's no good reason for us to have diverging history, so validate that only one base revision exists. This will prevent simple errors where people forget to specify the base revision. If this fail for your change, look for migrations that do not have a 'revises' line in them. """ script = alembic_script.ScriptDirectory.from_config(self.config) self.assertEqual(1, len(script.get_bases())) def test_head_revisions(self): """Ensure we only have a two head revisions. There's no good reason for us to have diverging history beyond the expand and contract branches, so validate that only these head revisions exist. This will prevent merge conflicts adding additional head revision points. If this fail for your change, look for migrations with the duplicate 'revises' line in them. """ script = alembic_script.ScriptDirectory.from_config(self.config) self.assertEqual(2, len(script.get_heads())) def test_walk_versions(self): with self.engine.begin() as connection: self.config.attributes['connection'] = connection script = alembic_script.ScriptDirectory.from_config(self.config) revisions = [x for x in script.walk_revisions()] # for some reason, 'walk_revisions' gives us the revisions in # reverse chronological order so we have to invert this revisions.reverse() self.assertEqual(revisions[0].revision, self.init_version) for revision in revisions: LOG.info('Testing revision %s', revision.revision) self._migrate_up(connection, revision) def _get_head_from_file(self, branch): path = os.path.join( os.path.dirname(upgrades.__file__), 'migrations', 'versions', f'{branch.upper()}_HEAD', ) with open(path) as fh: return fh.read().strip() def test_db_version_alembic(self): upgrades.offline_sync_database_to_version(engine=self.engine) for branch in (upgrades.EXPAND_BRANCH, upgrades.CONTRACT_BRANCH): head = self._get_head_from_file(branch) self.assertEqual(head, upgrades.get_db_version(branch)) class TestMigrationsWalkSQLite( KeystoneMigrationsWalk, test_fixtures.OpportunisticDBTestMixin, unit.TestCase, ): pass class TestMigrationsWalkMySQL( KeystoneMigrationsWalk, test_fixtures.OpportunisticDBTestMixin, unit.TestCase, ): FIXTURE = test_fixtures.MySQLOpportunisticFixture class TestMigrationsWalkPostgreSQL( KeystoneMigrationsWalk, test_fixtures.OpportunisticDBTestMixin, unit.TestCase, ): FIXTURE = test_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_sql_upgrade.py0000664000175000017500000003024200000000000023365 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Test for SQL migration extensions. To run these tests against a live database: 1. Set up a blank, live database. 2. Export database information to environment variable ``OS_TEST_DBAPI_ADMIN_CONNECTION``. For example:: export OS_TEST_DBAPI_ADMIN_CONNECTION=postgresql://localhost/postgres?host= /var/folders/7k/pwdhb_mj2cv4zyr0kyrlzjx40000gq/T/tmpMGqN8C&port=9824 3. Run the tests using:: tox -e py39 -- keystone.tests.unit.test_sql_upgrade For further information, see `oslo.db documentation `_. .. warning:: Your database will be wiped. Do not do this against a database with valuable data as all data will be lost. """ import fixtures from oslo_db import options as db_options from oslo_db.sqlalchemy import enginefacade from oslo_db.sqlalchemy import test_fixtures as db_fixtures from oslo_log import fixture as log_fixture from oslo_log import log import sqlalchemy.exc from keystone.cmd import cli from keystone.common import sql from keystone.common.sql import upgrades import keystone.conf from keystone.tests import unit from keystone.tests.unit import ksfixtures CONF = keystone.conf.CONF # NOTE(morganfainberg): This should be updated when each DB migration collapse # is done to mirror the expected structure of the DB in the format of # { : [, , ...], ... } INITIAL_TABLE_STRUCTURE = { 'config_register': [ 'type', 'domain_id', ], 'credential': [ 'id', 'user_id', 'project_id', 'type', 'extra', 'key_hash', 'encrypted_blob', ], 'endpoint': [ 'id', 'legacy_endpoint_id', 'interface', 'region_id', 'service_id', 'url', 'enabled', 'extra', ], 'group': [ 'id', 'domain_id', 'name', 'description', 'extra', ], 'policy': [ 'id', 'type', 'blob', 'extra', ], 'project': [ 'id', 'name', 'extra', 'description', 'enabled', 'domain_id', 'parent_id', 'is_domain', ], 'project_option': [ 'project_id', 'option_id', 'option_value', ], 'project_tag': [ 'project_id', 'name', ], 'role': [ 'id', 'name', 'extra', 'domain_id', 'description', ], 'role_option': [ 'role_id', 'option_id', 'option_value', ], 'service': [ 'id', 'type', 'extra', 'enabled', ], 'token': [ 'id', 'expires', 'extra', 'valid', 'trust_id', 'user_id', ], 'trust': [ 'id', 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'deleted_at', 'expires_at', 'remaining_uses', 'extra', 'expires_at_int', 'redelegated_trust_id', 'redelegation_count', ], 'trust_role': [ 'trust_id', 'role_id', ], 'user': [ 'id', 'extra', 'enabled', 'default_project_id', 'created_at', 'last_active_at', 'domain_id', ], 'user_option': [ 'user_id', 'option_id', 'option_value', ], 'user_group_membership': [ 'user_id', 'group_id', ], 'region': [ 'id', 'description', 'parent_region_id', 'extra', ], 'assignment': [ 'type', 'actor_id', 'target_id', 'role_id', 'inherited', ], 'id_mapping': [ 'public_id', 'domain_id', 'local_id', 'entity_type', ], 'whitelisted_config': [ 'domain_id', 'group', 'option', 'value', ], 'sensitive_config': [ 'domain_id', 'group', 'option', 'value', ], 'policy_association': [ 'id', 'policy_id', 'endpoint_id', 'service_id', 'region_id', ], 'identity_provider': [ 'id', 'enabled', 'description', 'domain_id', 'authorization_ttl', ], 'federation_protocol': [ 'id', 'idp_id', 'mapping_id', 'remote_id_attribute', ], 'mapping': [ 'id', 'rules', 'schema_version', ], 'service_provider': [ 'auth_url', 'id', 'enabled', 'description', 'sp_url', 'relay_state_prefix', ], 'idp_remote_ids': [ 'idp_id', 'remote_id', ], 'consumer': [ 'id', 'description', 'secret', 'extra', ], 'request_token': [ 'id', 'request_secret', 'verifier', 'authorizing_user_id', 'requested_project_id', 'role_ids', 'consumer_id', 'expires_at', ], 'access_token': [ 'id', 'access_secret', 'authorizing_user_id', 'project_id', 'role_ids', 'consumer_id', 'expires_at', ], 'revocation_event': [ 'id', 'domain_id', 'project_id', 'user_id', 'role_id', 'trust_id', 'consumer_id', 'access_token_id', 'issued_before', 'expires_at', 'revoked_at', 'audit_id', 'audit_chain_id', ], 'project_endpoint': ['endpoint_id', 'project_id'], 'endpoint_group': [ 'id', 'name', 'description', 'filters', ], 'project_endpoint_group': [ 'endpoint_group_id', 'project_id', ], 'implied_role': [ 'prior_role_id', 'implied_role_id', ], 'local_user': [ 'id', 'user_id', 'domain_id', 'name', 'failed_auth_count', 'failed_auth_at', ], 'password': [ 'id', 'local_user_id', 'created_at', 'expires_at', 'self_service', 'password_hash', 'created_at_int', 'expires_at_int', ], 'federated_user': [ 'id', 'user_id', 'idp_id', 'protocol_id', 'unique_id', 'display_name', ], 'nonlocal_user': [ 'domain_id', 'name', 'user_id', ], 'system_assignment': [ 'type', 'actor_id', 'target_id', 'role_id', 'inherited', ], 'registered_limit': [ 'internal_id', 'id', 'service_id', 'region_id', 'resource_name', 'default_limit', 'description', ], 'limit': [ 'internal_id', 'id', 'project_id', 'resource_limit', 'description', 'registered_limit_id', 'domain_id', ], 'application_credential': [ 'internal_id', 'id', 'name', 'secret_hash', 'description', 'user_id', 'project_id', 'expires_at', 'system', 'unrestricted', ], 'application_credential_role': [ 'application_credential_id', 'role_id', ], 'access_rule': [ 'id', 'service', 'path', 'method', 'external_id', 'user_id', ], 'application_credential_access_rule': [ 'application_credential_id', 'access_rule_id', ], 'expiring_user_group_membership': [ 'user_id', 'group_id', 'idp_id', 'last_verified', ], } class MigrateBase( db_fixtures.OpportunisticDBTestMixin, ): """Test complete orchestration between all database phases.""" def setUp(self): super().setUp() self.useFixture(log_fixture.get_logging_handle_error_fixture()) self.stdlog = self.useFixture(ksfixtures.StandardLogging()) self.useFixture(ksfixtures.WarningsFixture()) self.engine = enginefacade.writer.get_engine() self.sessionmaker = enginefacade.writer.get_sessionmaker() db_options.set_defaults(CONF, connection=self.engine.url) # Override keystone's context manager to be oslo.db's global context # manager. sql.core._TESTING_USE_GLOBAL_CONTEXT_MANAGER = True self.addCleanup( setattr, sql.core, '_TESTING_USE_GLOBAL_CONTEXT_MANAGER', False ) self.addCleanup(sql.cleanup) def expand(self): """Expand database schema.""" upgrades.expand_schema(engine=self.engine) def contract(self): """Contract database schema.""" upgrades.contract_schema(engine=self.engine) @property def metadata(self): """A collection of tables and their associated schemas.""" return sqlalchemy.MetaData() def load_table(self, name): table = sqlalchemy.Table( name, self.metadata, autoload_with=self.engine, ) return table def assertTableDoesNotExist(self, table_name): """Assert that a given table exists cannot be selected by name.""" # Switch to a different metadata otherwise you might still # detect renamed or dropped tables try: sqlalchemy.Table( table_name, self.metadata, autoload_with=self.engine, ) except sqlalchemy.exc.NoSuchTableError: pass else: raise AssertionError('Table "%s" already exists' % table_name) def assertTableColumns(self, table_name, expected_cols): """Assert that the table contains the expected set of columns.""" table = self.load_table(table_name) actual_cols = [col.name for col in table.columns] # Check if the columns are equal, but allow for a different order, # which might occur after an upgrade followed by a downgrade self.assertCountEqual( expected_cols, actual_cols, '%s table' % table_name ) def test_db_sync_check(self): checker = cli.DbSync() # If the expand repository doesn't exist yet, then we need to make sure # we advertise that `--expand` must be run first. log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO)) status = checker.check_db_sync_status() self.assertIn("keystone-manage db_sync --expand", log_info.output) self.assertEqual(status, 2) # Assert the correct message is printed when migrate is ahead of # contract self.expand() log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO)) status = checker.check_db_sync_status() self.assertIn("keystone-manage db_sync --contract", log_info.output) self.assertEqual(status, 4) # Assert the correct message gets printed when all commands are on # the same version self.contract() log_info = self.useFixture(fixtures.FakeLogger(level=log.INFO)) status = checker.check_db_sync_status() self.assertIn("All db_sync commands are upgraded", log_info.output) self.assertEqual(status, 0) def test_upgrade_add_initial_tables(self): self.expand() for table in INITIAL_TABLE_STRUCTURE: self.assertTableColumns(table, INITIAL_TABLE_STRUCTURE[table]) class FullMigrationSQLite(MigrateBase, unit.TestCase): pass class FullMigrationMySQL(MigrateBase, unit.TestCase): FIXTURE = db_fixtures.MySQLOpportunisticFixture class FullMigrationPostgreSQL(MigrateBase, unit.TestCase): FIXTURE = db_fixtures.PostgresqlOpportunisticFixture ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_token_provider.py0000664000175000017500000000522300000000000024112 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import urllib from oslo_utils import timeutils from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.models import token_model from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database from keystone import token from keystone.token import provider CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs FUTURE_DELTA = datetime.timedelta(seconds=CONF.token.expiration) CURRENT_DATE = timeutils.utcnow() class TestTokenProvider(unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) self.load_backends() def test_strings_are_url_safe(self): s = provider.random_urlsafe_str() self.assertEqual(s, urllib.parse.quote_plus(s)) def test_unsupported_token_provider(self): self.config_fixture.config(group='token', provider='MyProvider') self.assertRaises(ImportError, token.provider.Manager) def test_provider_token_expiration_validation(self): token = token_model.TokenModel() token.issued_at = "2013-05-21T00:02:43.941473Z" token.expires_at = utils.isotime(CURRENT_DATE) self.assertRaises( exception.TokenNotFound, PROVIDERS.token_provider_api._is_valid_token, token, ) token = token_model.TokenModel() token.issued_at = "2013-05-21T00:02:43.941473Z" token.expires_at = utils.isotime(timeutils.utcnow() + FUTURE_DELTA) self.assertIsNone(PROVIDERS.token_provider_api._is_valid_token(token)) def test_validate_v3_token_with_no_token_raises_token_not_found(self): self.assertRaises( exception.TokenNotFound, PROVIDERS.token_provider_api.validate_token, None, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_url_middleware.py0000664000175000017500000000477100000000000024066 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.server.flask.request_processing.middleware import url_normalize from keystone.tests import unit class FakeApp: """Fakes a WSGI app URL normalized.""" def __init__(self): self.env = {} def __call__(self, env, start_response): self.env = env return class UrlMiddlewareTest(unit.TestCase): def setUp(self): super().setUp() self.fake_app = FakeApp() self.middleware = url_normalize.URLNormalizingMiddleware(self.fake_app) def test_trailing_slash_normalization(self): """Test /v3/auth/tokens & /v3/auth/tokens/ normalized URLs match.""" expected = '/v3/auth/tokens' no_slash = {'PATH_INFO': expected} with_slash = {'PATH_INFO': '/v3/auth/tokens/'} with_many_slash = {'PATH_INFO': '/v3/auth/tokens////'} # Run with a URL that doesn't need stripping and ensure nothing else is # added to the environ self.middleware(no_slash, None) self.assertEqual(expected, self.fake_app.env['PATH_INFO']) self.assertEqual(1, len(self.fake_app.env.keys())) # Run with a URL that needs a single slash stripped and nothing else is # added to the environ self.middleware(with_slash, None) self.assertEqual(expected, self.fake_app.env['PATH_INFO']) self.assertEqual(1, len(self.fake_app.env.keys())) # Run with a URL that needs multiple slashes stripped and ensure # nothing else is added to the environ self.middleware(with_many_slash, None) self.assertEqual(expected, self.fake_app.env['PATH_INFO']) self.assertEqual(1, len(self.fake_app.env.keys())) def test_rewrite_empty_path(self): """Test empty path is rewritten to root.""" environ = {'PATH_INFO': ''} self.middleware(environ, None) self.assertEqual('/', self.fake_app.env['PATH_INFO']) self.assertEqual(1, len(self.fake_app.env.keys())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_v3.py0000664000175000017500000016726200000000000021424 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client import uuid import oslo_context.context from oslo_serialization import jsonutils from testtools import matchers import webtest from keystone.common import authorization from keystone.common import cache from keystone.common import provider_api from keystone.common.validation import validators from keystone import exception from keystone.resource.backends import base as resource_base from keystone.server.flask.request_processing.middleware import auth_context from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import rest PROVIDERS = provider_api.ProviderAPIs DEFAULT_DOMAIN_ID = 'default' TIME_FORMAT = unit.TIME_FORMAT class RestfulTestCase( unit.SQLDriverOverrides, rest.RestfulTestCase, common_auth.AuthTestMixin ): def generate_token_schema( self, system_scoped=False, domain_scoped=False, project_scoped=False ): """Return a dictionary of token properties to validate against.""" ROLES_SCHEMA = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': { 'type': 'string', }, 'name': { 'type': 'string', }, 'description': { 'type': 'string', }, 'options': { 'type': 'object', }, }, 'required': [ 'id', 'name', ], 'additionalProperties': False, }, 'minItems': 1, } properties = { 'audit_ids': { 'type': 'array', 'items': { 'type': 'string', }, 'minItems': 1, 'maxItems': 2, }, 'expires_at': { 'type': 'string', 'pattern': unit.TIME_FORMAT_REGEX, }, 'issued_at': { 'type': 'string', 'pattern': unit.TIME_FORMAT_REGEX, }, 'methods': { 'type': 'array', 'items': { 'type': 'string', }, }, 'user': { 'type': 'object', 'required': ['id', 'name', 'domain', 'password_expires_at'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'domain': { 'type': 'object', 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, }, 'required': ['id', 'name'], 'additonalProperties': False, }, 'password_expires_at': { 'type': ['string', 'null'], 'pattern': unit.TIME_FORMAT_REGEX, }, }, 'additionalProperties': False, }, } if system_scoped: properties['catalog'] = {'type': 'array'} properties['system'] = { 'type': 'object', 'properties': {'all': {'type': 'boolean'}}, } properties['roles'] = ROLES_SCHEMA elif domain_scoped: properties['catalog'] = {'type': 'array'} properties['roles'] = ROLES_SCHEMA properties['domain'] = { 'type': 'object', 'required': ['id', 'name'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, }, 'additionalProperties': False, } elif project_scoped: properties['is_admin_project'] = {'type': 'boolean'} properties['catalog'] = {'type': 'array'} # FIXME(lbragstad): Remove this in favor of the predefined # ROLES_SCHEMA dictionary once bug 1763510 is fixed. ROLES_SCHEMA['items']['properties']['domain_id'] = { 'type': [ 'null', 'string', ], } properties['roles'] = ROLES_SCHEMA properties['is_domain'] = {'type': 'boolean'} properties['project'] = { 'type': ['object'], 'required': ['id', 'name', 'domain'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, 'domain': { 'type': ['object'], 'required': ['id', 'name'], 'properties': { 'id': {'type': 'string'}, 'name': {'type': 'string'}, }, 'additionalProperties': False, }, }, 'additionalProperties': False, } schema = { 'type': 'object', 'properties': properties, 'required': [ 'audit_ids', 'expires_at', 'issued_at', 'methods', 'user', ], 'optional': [], 'additionalProperties': False, } if system_scoped: schema['required'].extend(['system', 'roles']) schema['optional'].append('catalog') elif domain_scoped: schema['required'].extend(['domain', 'roles']) schema['optional'].append('catalog') elif project_scoped: schema['required'].append('project') schema['optional'].append('catalog') schema['optional'].append('OS-TRUST:trust') schema['optional'].append('is_admin_project') schema['optional'].append('is_domain') return schema def config_files(self): config_files = super().config_files() config_files.append(unit.dirs.tests_conf('backend_sql.conf')) return config_files def setUp(self): """Setup for v3 Restful Test Cases.""" super().setUp() self.empty_context = {'environment': {}} def load_backends(self): # ensure the cache region instance is setup cache.configure_cache() super().load_backends() def load_fixtures(self, fixtures): self.load_sample_data() def _populate_default_domain(self): try: PROVIDERS.resource_api.get_domain(DEFAULT_DOMAIN_ID) except exception.DomainNotFound: root_domain = unit.new_domain_ref( id=resource_base.NULL_DOMAIN_ID, name=resource_base.NULL_DOMAIN_ID, ) PROVIDERS.resource_api.create_domain( resource_base.NULL_DOMAIN_ID, root_domain ) domain = unit.new_domain_ref( description=('The default domain'), id=DEFAULT_DOMAIN_ID, name='Default', ) PROVIDERS.resource_api.create_domain(DEFAULT_DOMAIN_ID, domain) def load_sample_data(self, create_region_and_endpoints=True): self._populate_default_domain() self.domain = unit.new_domain_ref() self.domain_id = self.domain['id'] PROVIDERS.resource_api.create_domain(self.domain_id, self.domain) self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] self.project = PROVIDERS.resource_api.create_project( self.project_id, self.project ) self.user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) self.user_id = self.user['id'] self.default_domain_project_id = uuid.uuid4().hex self.default_domain_project = unit.new_project_ref( domain_id=DEFAULT_DOMAIN_ID ) self.default_domain_project['id'] = self.default_domain_project_id PROVIDERS.resource_api.create_project( self.default_domain_project_id, self.default_domain_project ) self.default_domain_user = unit.create_user( PROVIDERS.identity_api, domain_id=DEFAULT_DOMAIN_ID ) self.default_domain_user_id = self.default_domain_user['id'] # create & grant policy.yaml's default role for admin_required self.role = unit.new_role_ref(name='admin') self.role_id = self.role['id'] PROVIDERS.role_api.create_role(self.role_id, self.role) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_id, self.project_id, self.role_id ) PROVIDERS.assignment_api.add_role_to_user_and_project( self.default_domain_user_id, self.default_domain_project_id, self.role_id, ) PROVIDERS.assignment_api.add_role_to_user_and_project( self.default_domain_user_id, self.project_id, self.role_id ) # Create "req_admin" user for simulating a real user instead of the # admin_token_auth middleware self.user_reqadmin = unit.create_user( PROVIDERS.identity_api, DEFAULT_DOMAIN_ID ) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_reqadmin['id'], self.default_domain_project_id, self.role_id, ) if create_region_and_endpoints: self.region = unit.new_region_ref() self.region_id = self.region['id'] PROVIDERS.catalog_api.create_region(self.region) self.service = unit.new_service_ref(type='identity') self.service_id = self.service['id'] PROVIDERS.catalog_api.create_service( self.service_id, self.service.copy() ) self.endpoint = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, ) self.endpoint_id = self.endpoint['id'] PROVIDERS.catalog_api.create_endpoint( self.endpoint_id, self.endpoint.copy() ) # The server adds 'enabled' and defaults to True. self.endpoint['enabled'] = True def create_new_default_project_for_user( self, user_id, domain_id, enable_project=True ): ref = unit.new_project_ref(domain_id=domain_id, enabled=enable_project) r = self.post('/projects', body={'project': ref}) project = self.assertValidProjectResponse(r, ref) # set the user's preferred project body = {'user': {'default_project_id': project['id']}} r = self.patch(f'/users/{user_id}', body=body) self.assertValidUserResponse(r) return project def get_admin_token(self): """Convenience method so that we can test authenticated requests.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user_reqadmin['name'], 'password': self.user_reqadmin['password'], 'domain': { 'id': self.user_reqadmin['domain_id'] }, } }, }, 'scope': { 'project': { 'id': self.default_domain_project_id, } }, } }, ) return r.headers.get('X-Subject-Token') def get_unscoped_token(self): """Convenience method so that we can test authenticated requests.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user['name'], 'password': self.user['password'], 'domain': {'id': self.user['domain_id']}, } }, } } }, ) return r.headers.get('X-Subject-Token') def get_scoped_token(self): """Convenience method so that we can test authenticated requests.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user['name'], 'password': self.user['password'], 'domain': {'id': self.user['domain_id']}, } }, }, 'scope': { 'project': { 'id': self.project['id'], } }, } }, ) return r.headers.get('X-Subject-Token') def get_system_scoped_token(self): """Convenience method for requesting system scoped tokens.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user['name'], 'password': self.user['password'], 'domain': {'id': self.user['domain_id']}, } }, }, 'scope': {'system': {'all': True}}, } }, ) return r.headers.get('X-Subject-Token') def get_domain_scoped_token(self): """Convenience method for requesting domain scoped token.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['password'], 'password': { 'user': { 'name': self.user['name'], 'password': self.user['password'], 'domain': {'id': self.user['domain_id']}, } }, }, 'scope': { 'domain': { 'id': self.domain['id'], } }, } }, ) return r.headers.get('X-Subject-Token') def get_application_credentials_token(self, app_cred_id, app_cred_secret): """Convenience method for requesting application credentials token.""" r = self.admin_request( method='POST', path='/v3/auth/tokens', body={ 'auth': { 'identity': { 'methods': ['application_credential'], 'application_credential': { 'id': app_cred_id, 'secret': app_cred_secret, }, } } }, ) return r.headers.get('X-Subject-Token') def get_requested_token(self, auth): """Request the specific token we want.""" r = self.v3_create_token(auth) return r.headers.get('X-Subject-Token') def v3_create_token(self, auth, expected_status=http.client.CREATED): return self.admin_request( method='POST', path='/v3/auth/tokens', body=auth, expected_status=expected_status, ) def v3_noauth_request(self, path, **kwargs): # request does not require auth token header path = '/v3' + path return self.admin_request(path=path, **kwargs) def v3_request(self, path, **kwargs): # check to see if caller requires token for the API call. if kwargs.pop('noauth', None): return self.v3_noauth_request(path, **kwargs) # Check if the caller has passed in auth details for # use in requesting the token auth_arg = kwargs.pop('auth', None) if auth_arg: token = self.get_requested_token(auth_arg) else: token = kwargs.pop('token', None) if not token: token = self.get_scoped_token() path = '/v3' + path return self.admin_request(path=path, token=token, **kwargs) def get(self, path, expected_status=http.client.OK, **kwargs): return self.v3_request( path, method='GET', expected_status=expected_status, **kwargs ) def head(self, path, expected_status=http.client.NO_CONTENT, **kwargs): r = self.v3_request( path, method='HEAD', expected_status=expected_status, **kwargs ) self.assertEqual(b'', r.body) return r def post(self, path, expected_status=http.client.CREATED, **kwargs): return self.v3_request( path, method='POST', expected_status=expected_status, **kwargs ) def put(self, path, expected_status=http.client.NO_CONTENT, **kwargs): return self.v3_request( path, method='PUT', expected_status=expected_status, **kwargs ) def patch(self, path, expected_status=http.client.OK, **kwargs): return self.v3_request( path, method='PATCH', expected_status=expected_status, **kwargs ) def delete(self, path, expected_status=http.client.NO_CONTENT, **kwargs): return self.v3_request( path, method='DELETE', expected_status=expected_status, **kwargs ) def assertValidErrorResponse(self, r): resp = r.result self.assertIsNotNone(resp.get('error')) self.assertIsNotNone(resp['error'].get('code')) self.assertIsNotNone(resp['error'].get('title')) self.assertIsNotNone(resp['error'].get('message')) self.assertEqual(int(resp['error']['code']), r.status_code) def assertValidListLinks(self, links, resource_url=None): self.assertIsNotNone(links) self.assertIsNotNone(links.get('self')) self.assertThat(links['self'], matchers.StartsWith('http://localhost')) if resource_url: self.assertThat(links['self'], matchers.EndsWith(resource_url)) self.assertIn('next', links) if links['next'] is not None: self.assertThat( links['next'], matchers.StartsWith('http://localhost') ) self.assertIn('previous', links) if links['previous'] is not None: self.assertThat( links['previous'], matchers.StartsWith('http://localhost') ) def assertValidListResponse( self, resp, key, entity_validator, ref=None, expected_length=None, keys_to_check=None, resource_url=None, ): """Make assertions common to all API list responses. If a reference is provided, it's ID will be searched for in the response, and asserted to be equal. """ entities = resp.result.get(key) self.assertIsNotNone(entities) if expected_length is not None: self.assertEqual(expected_length, len(entities)) elif ref is not None: # we're at least expecting the ref self.assertNotEmpty(entities) # collections should have relational links self.assertValidListLinks( resp.result.get('links'), resource_url=resource_url ) for entity in entities: self.assertIsNotNone(entity) self.assertValidEntity(entity, keys_to_check=keys_to_check) entity_validator(entity) if ref: entity = [x for x in entities if x['id'] == ref['id']][0] self.assertValidEntity( entity, ref=ref, keys_to_check=keys_to_check ) entity_validator(entity, ref) return entities def assertValidResponse( self, resp, key, entity_validator, *args, **kwargs ): """Make assertions common to all API responses.""" entity = resp.result.get(key) self.assertIsNotNone(entity) keys = kwargs.pop('keys_to_check', None) self.assertValidEntity(entity, keys_to_check=keys, *args, **kwargs) entity_validator(entity, *args, **kwargs) return entity def assertValidEntity(self, entity, ref=None, keys_to_check=None): """Make assertions common to all API entities. If a reference is provided, the entity will also be compared against the reference. """ if keys_to_check is not None: keys = keys_to_check else: keys = ['name', 'description', 'enabled'] for k in ['id'] + keys: msg = f'{k} unexpectedly None in {entity}' self.assertIsNotNone(entity.get(k), msg) self.assertIsNotNone(entity.get('links')) self.assertIsNotNone(entity['links'].get('self')) self.assertThat( entity['links']['self'], matchers.StartsWith('http://localhost') ) self.assertIn(entity['id'], entity['links']['self']) if ref: for k in keys: msg = f'{k} not equal: {ref[k]} != {entity[k]}' self.assertEqual(ref[k], entity[k]) return entity # auth validation def assertValidISO8601ExtendedFormatDatetime(self, dt): try: return datetime.datetime.strptime(dt, TIME_FORMAT) except Exception: msg = '%s is not a valid ISO 8601 extended format date time.' % dt raise AssertionError(msg) def assertValidTokenResponse(self, r, user=None, forbid_token_id=False): if forbid_token_id: self.assertNotIn('X-Subject-Token', r.headers) else: self.assertTrue(r.headers.get('X-Subject-Token')) token = r.result['token'] self.assertIsNotNone(token.get('expires_at')) expires_at = self.assertValidISO8601ExtendedFormatDatetime( token['expires_at'] ) self.assertIsNotNone(token.get('issued_at')) issued_at = self.assertValidISO8601ExtendedFormatDatetime( token['issued_at'] ) self.assertLess(issued_at, expires_at) self.assertIn('user', token) self.assertIn('id', token['user']) self.assertIn('name', token['user']) self.assertIn('domain', token['user']) self.assertIn('id', token['user']['domain']) if user is not None: self.assertEqual(user['id'], token['user']['id']) self.assertEqual(user['name'], token['user']['name']) self.assertEqual(user['domain_id'], token['user']['domain']['id']) return token def assertValidUnscopedTokenResponse(self, r, *args, **kwargs): token = self.assertValidTokenResponse(r, *args, **kwargs) validator_object = validators.SchemaValidator( self.generate_token_schema() ) validator_object.validate(token) return token def assertValidScopedTokenResponse(self, r, *args, **kwargs): require_catalog = kwargs.pop('require_catalog', True) endpoint_filter = kwargs.pop('endpoint_filter', False) ep_filter_assoc = kwargs.pop('ep_filter_assoc', 0) is_admin_project = kwargs.pop('is_admin_project', None) token = self.assertValidTokenResponse(r, *args, **kwargs) if require_catalog: endpoint_num = 0 self.assertIn('catalog', token) if isinstance(token['catalog'], list): # only test JSON for service in token['catalog']: for endpoint in service['endpoints']: self.assertNotIn('enabled', endpoint) self.assertNotIn('legacy_endpoint_id', endpoint) self.assertNotIn('service_id', endpoint) endpoint_num += 1 # sub test for the OS-EP-FILTER extension enabled if endpoint_filter: self.assertEqual(ep_filter_assoc, endpoint_num) else: self.assertNotIn('catalog', token) self.assertIn('roles', token) self.assertTrue(token['roles']) for role in token['roles']: self.assertIn('id', role) self.assertIn('name', role) # NOTE(samueldmq): We want to explicitly test for boolean or None self.assertIs(is_admin_project, token.get('is_admin_project')) return token def assertValidProjectScopedTokenResponse(self, r, *args, **kwargs): token = self.assertValidScopedTokenResponse(r, *args, **kwargs) project_scoped_token_schema = self.generate_token_schema( project_scoped=True ) if token.get('OS-TRUST:trust'): trust_properties = { 'OS-TRUST:trust': { 'type': ['object'], 'required': [ 'id', 'impersonation', 'trustor_user', 'trustee_user', ], 'properties': { 'id': {'type': 'string'}, 'impersonation': {'type': 'boolean'}, 'trustor_user': { 'type': 'object', 'required': ['id'], 'properties': {'id': {'type': 'string'}}, 'additionalProperties': False, }, 'trustee_user': { 'type': 'object', 'required': ['id'], 'properties': {'id': {'type': 'string'}}, 'additionalProperties': False, }, }, 'additionalProperties': False, } } project_scoped_token_schema['properties'].update(trust_properties) validator_object = validators.SchemaValidator( project_scoped_token_schema ) validator_object.validate(token) self.assertEqual(self.role_id, token['roles'][0]['id']) return token def assertValidDomainScopedTokenResponse(self, r, *args, **kwargs): token = self.assertValidScopedTokenResponse(r, *args, **kwargs) validator_object = validators.SchemaValidator( self.generate_token_schema(domain_scoped=True) ) validator_object.validate(token) return token def assertValidSystemScopedTokenResponse(self, r, *args, **kwargs): token = self.assertValidTokenResponse(r) self.assertTrue(token['system']['all']) system_scoped_token_schema = self.generate_token_schema( system_scoped=True ) validator_object = validators.SchemaValidator( system_scoped_token_schema ) validator_object.validate(token) return token # catalog validation def assertValidCatalogResponse(self, resp, *args, **kwargs): self.assertEqual({'catalog', 'links'}, set(resp.json.keys())) self.assertValidCatalog(resp.json['catalog']) self.assertIn('links', resp.json) self.assertIsInstance(resp.json['links'], dict) self.assertEqual(['self'], list(resp.json['links'].keys())) self.assertEqual( 'http://localhost/v3/auth/catalog', resp.json['links']['self'] ) def assertValidCatalog(self, entity): self.assertIsInstance(entity, list) self.assertGreater(len(entity), 0) for service in entity: self.assertIsNotNone(service.get('id')) self.assertIsNotNone(service.get('name')) self.assertIsNotNone(service.get('type')) self.assertNotIn('enabled', service) self.assertGreater(len(service['endpoints']), 0) for endpoint in service['endpoints']: self.assertIsNotNone(endpoint.get('id')) self.assertIsNotNone(endpoint.get('interface')) self.assertIsNotNone(endpoint.get('url')) self.assertNotIn('enabled', endpoint) self.assertNotIn('legacy_endpoint_id', endpoint) self.assertNotIn('service_id', endpoint) # region validation def assertValidRegionListResponse(self, resp, *args, **kwargs): # NOTE(jaypipes): I have to pass in a blank keys_to_check parameter # below otherwise the base assertValidEntity method # tries to find a "name" and an "enabled" key in the # returned ref dicts. The issue is, I don't understand # how the service and endpoint entity assertions below # actually work (they don't raise assertions), since # AFAICT, the service and endpoint tables don't have # a "name" column either... :( return self.assertValidListResponse( resp, 'regions', self.assertValidRegion, keys_to_check=[], *args, **kwargs, ) def assertValidRegionResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'region', self.assertValidRegion, keys_to_check=[], *args, **kwargs, ) def assertValidRegion(self, entity, ref=None): self.assertIsNotNone(entity.get('description')) if ref: self.assertEqual(ref['description'], entity['description']) return entity # service validation def assertValidServiceListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'services', self.assertValidService, *args, **kwargs ) def assertValidServiceResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'service', self.assertValidService, *args, **kwargs ) def assertValidService(self, entity, ref=None): self.assertIsNotNone(entity.get('type')) self.assertIsInstance(entity.get('enabled'), bool) if ref: self.assertEqual(ref['type'], entity['type']) return entity # endpoint validation def assertValidEndpointListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'endpoints', self.assertValidEndpoint, *args, **kwargs ) def assertValidEndpointResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'endpoint', self.assertValidEndpoint, *args, **kwargs ) def assertValidEndpoint(self, entity, ref=None): self.assertIsNotNone(entity.get('interface')) self.assertIsNotNone(entity.get('service_id')) self.assertIsInstance(entity['enabled'], bool) # this is intended to be an unexposed implementation detail self.assertNotIn('legacy_endpoint_id', entity) if ref: self.assertEqual(ref['interface'], entity['interface']) self.assertEqual(ref['service_id'], entity['service_id']) if ref.get('region') is not None: self.assertEqual(ref['region_id'], entity.get('region_id')) return entity # domain validation def assertValidDomainListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'domains', self.assertValidDomain, *args, **kwargs ) def assertValidDomainResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'domain', self.assertValidDomain, *args, **kwargs ) def assertValidDomain(self, entity, ref=None): if ref: pass return entity # project validation def assertValidProjectListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'projects', self.assertValidProject, *args, **kwargs ) def assertValidProjectResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'project', self.assertValidProject, *args, **kwargs ) def assertValidProject(self, entity, ref=None): if ref: self.assertEqual(ref['domain_id'], entity['domain_id']) return entity # user validation def assertValidUserListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'users', self.assertValidUser, keys_to_check=['name', 'enabled'], *args, **kwargs, ) def assertValidUserResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'user', self.assertValidUser, keys_to_check=['name', 'enabled'], *args, **kwargs, ) def assertValidUser(self, entity, ref=None): self.assertIsNotNone(entity.get('domain_id')) self.assertIsNotNone(entity.get('email')) self.assertNotIn('password', entity) self.assertNotIn('projectId', entity) self.assertIn('password_expires_at', entity) if ref: self.assertEqual(ref['domain_id'], entity['domain_id']) self.assertEqual(ref['email'], entity['email']) if 'default_project_id' in ref: self.assertIsNotNone(ref['default_project_id']) self.assertEqual( ref['default_project_id'], entity['default_project_id'] ) return entity # group validation def assertValidGroupListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'groups', self.assertValidGroup, keys_to_check=['name', 'description', 'domain_id'], *args, **kwargs, ) def assertValidGroupResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'group', self.assertValidGroup, keys_to_check=['name', 'description', 'domain_id'], *args, **kwargs, ) def assertValidGroup(self, entity, ref=None): self.assertIsNotNone(entity.get('name')) if ref: self.assertEqual(ref['name'], entity['name']) return entity # credential validation def assertValidCredentialListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'credentials', self.assertValidCredential, keys_to_check=['blob', 'user_id', 'type'], *args, **kwargs, ) def assertValidCredentialResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'credential', self.assertValidCredential, keys_to_check=['blob', 'user_id', 'type'], *args, **kwargs, ) def assertValidCredential(self, entity, ref=None): self.assertIsNotNone(entity.get('user_id')) self.assertIsNotNone(entity.get('blob')) self.assertIsNotNone(entity.get('type')) self.assertNotIn('key_hash', entity) self.assertNotIn('encrypted_blob', entity) if ref: self.assertEqual(ref['user_id'], entity['user_id']) self.assertEqual(ref['blob'], entity['blob']) self.assertEqual(ref['type'], entity['type']) self.assertEqual(ref.get('project_id'), entity.get('project_id')) return entity # role validation def assertValidRoleListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'roles', self.assertValidRole, keys_to_check=['name'], *args, **kwargs, ) def assertRoleInListResponse(self, resp, ref, expected=1): found_count = 0 for entity in resp.result.get('roles'): try: self.assertValidRole(entity, ref=ref) except Exception: # It doesn't match, so let's go onto the next one pass else: found_count += 1 self.assertEqual(expected, found_count) def assertRoleNotInListResponse(self, resp, ref): self.assertRoleInListResponse(resp, ref=ref, expected=0) def assertValidRoleResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'role', self.assertValidRole, keys_to_check=['name'], *args, **kwargs, ) def assertValidRole(self, entity, ref=None): self.assertIsNotNone(entity.get('name')) if ref: self.assertEqual(ref['name'], entity['name']) self.assertEqual(ref['domain_id'], entity['domain_id']) return entity # role assignment validation def assertValidRoleAssignmentListResponse( self, resp, expected_length=None, resource_url=None ): entities = resp.result.get('role_assignments') if expected_length or expected_length == 0: self.assertEqual(expected_length, len(entities)) # Collections should have relational links self.assertValidListLinks( resp.result.get('links'), resource_url=resource_url ) for entity in entities: self.assertIsNotNone(entity) self.assertValidRoleAssignment(entity) return entities def assertValidRoleAssignment(self, entity, ref=None): # A role should be present self.assertIsNotNone(entity.get('role')) self.assertIsNotNone(entity['role'].get('id')) # Only one of user or group should be present if entity.get('user'): self.assertNotIn('group', entity) self.assertIsNotNone(entity['user'].get('id')) else: self.assertIsNotNone(entity.get('group')) self.assertIsNotNone(entity['group'].get('id')) # A scope should be present and have only one of domain or project self.assertIsNotNone(entity.get('scope')) if entity['scope'].get('project'): self.assertNotIn('domain', entity['scope']) self.assertIsNotNone(entity['scope']['project'].get('id')) elif entity['scope'].get('domain'): self.assertIsNotNone(entity['scope'].get('domain')) self.assertIsNotNone(entity['scope']['domain'].get('id')) else: self.assertIsNotNone(entity['scope'].get('system')) self.assertTrue(entity['scope']['system']['all']) # An assignment link should be present self.assertIsNotNone(entity.get('links')) self.assertIsNotNone(entity['links'].get('assignment')) if ref: links = ref.pop('links') try: self.assertLessEqual(ref.items(), entity.items()) self.assertIn( links['assignment'], entity['links']['assignment'] ) finally: if links: ref['links'] = links def assertRoleAssignmentInListResponse(self, resp, ref, expected=1): found_count = 0 for entity in resp.result.get('role_assignments'): try: self.assertValidRoleAssignment(entity, ref=ref) except Exception: # It doesn't match, so let's go onto the next one pass else: found_count += 1 self.assertEqual(expected, found_count) def assertRoleAssignmentNotInListResponse(self, resp, ref): self.assertRoleAssignmentInListResponse(resp, ref=ref, expected=0) # policy validation def assertValidPolicyListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'policies', self.assertValidPolicy, *args, **kwargs ) def assertValidPolicyResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'policy', self.assertValidPolicy, *args, **kwargs ) def assertValidPolicy(self, entity, ref=None): self.assertIsNotNone(entity.get('blob')) self.assertIsNotNone(entity.get('type')) if ref: self.assertEqual(ref['blob'], entity['blob']) self.assertEqual(ref['type'], entity['type']) return entity # trust validation def assertValidTrustListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'trusts', self.assertValidTrustSummary, keys_to_check=[ 'trustor_user_id', 'trustee_user_id', 'impersonation', ], *args, **kwargs, ) def assertValidTrustResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'trust', self.assertValidTrust, keys_to_check=[ 'trustor_user_id', 'trustee_user_id', 'impersonation', ], *args, **kwargs, ) def assertValidTrustSummary(self, entity, ref=None): return self.assertValidTrust(entity, ref, summary=True) def assertValidTrust(self, entity, ref=None, summary=False): self.assertIsNotNone(entity.get('trustor_user_id')) self.assertIsNotNone(entity.get('trustee_user_id')) self.assertIsNotNone(entity.get('impersonation')) self.assertIn('expires_at', entity) if entity['expires_at'] is not None: self.assertValidISO8601ExtendedFormatDatetime(entity['expires_at']) if summary: # Trust list contains no roles, but getting a specific # trust by ID provides the detailed response containing roles self.assertNotIn('roles', entity) self.assertIn('project_id', entity) else: for role in entity['roles']: self.assertIsNotNone(role) self.assertValidEntity(role, keys_to_check=['name']) self.assertValidRole(role) self.assertValidListLinks(entity.get('roles_links')) # Mirror the test tempest does to ensure the self-link is correct self.assertIn('v3/OS-TRUST/trusts', entity.get('links')['self']) # always disallow role xor project_id (neither or both is allowed) has_roles = bool(entity.get('roles')) has_project = bool(entity.get('project_id')) self.assertFalse(has_roles ^ has_project) if ref: self.assertEqual(ref['trustor_user_id'], entity['trustor_user_id']) self.assertEqual(ref['trustee_user_id'], entity['trustee_user_id']) self.assertEqual(ref['project_id'], entity['project_id']) if entity.get('expires_at') or ref.get('expires_at'): entity_exp = self.assertValidISO8601ExtendedFormatDatetime( entity['expires_at'] ) ref_exp = self.assertValidISO8601ExtendedFormatDatetime( ref['expires_at'] ) self.assertCloseEnoughForGovernmentWork(entity_exp, ref_exp) else: self.assertEqual( ref.get('expires_at'), entity.get('expires_at') ) return entity # Service providers (federation) def assertValidServiceProvider(self, entity, ref=None, *args, **kwargs): attributes = frozenset( [ 'auth_url', 'id', 'enabled', 'description', 'links', 'relay_state_prefix', 'sp_url', ] ) for attribute in attributes: self.assertIsNotNone(entity.get(attribute)) def build_external_auth_environ(self, remote_user, remote_domain=None): environment = {'REMOTE_USER': remote_user, 'AUTH_TYPE': 'Negotiate'} if remote_domain: environment['REMOTE_DOMAIN'] = remote_domain return environment class OAuth2RestfulTestCase(RestfulTestCase): def assertValidErrorResponse(self, response): resp = response.result self.assertIsNotNone(resp.get('error')) self.assertIsNotNone(resp.get('error_description')) class VersionTestCase(RestfulTestCase): def test_get_version(self): pass # NOTE(gyee): test AuthContextMiddleware here instead of test_middleware.py # because we need the token class AuthContextMiddlewareTestCase(RestfulTestCase): def load_fixtures(self, fixtures): self.load_sample_data() app_cred_api = PROVIDERS.application_credential_api access_rules = [ { 'id': uuid.uuid4().hex, 'service': self.service['type'], 'method': 'GET', 'path': '/v3/users/*', } ] app_cred = { 'id': uuid.uuid4().hex, 'name': 'appcredtest', 'secret': uuid.uuid4().hex, 'user_id': self.user['id'], 'project_id': self.project['id'], 'description': 'Test Application Credential', 'roles': [{'id': self.role_id}], 'access_rules': access_rules, } app_cred_ref = app_cred_api.create_application_credential(app_cred) self.app_cred_r_id = app_cred_ref['id'] self.app_cred_r_secret = app_cred_ref['secret'] def _middleware_request(self, token, extra_environ=None): def application(environ, start_response): body = b'body' headers = [ ('Content-Type', 'text/html; charset=utf8'), ('Content-Length', str(len(body))), ] start_response('200 OK', headers) return [body] app = webtest.TestApp( auth_context.AuthContextMiddleware(application), extra_environ=extra_environ, ) resp = app.get('/', headers={authorization.AUTH_TOKEN_HEADER: token}) self.assertEqual(b'body', resp.body) # just to make sure it worked return resp.request def test_auth_context_build_by_middleware(self): # test to make sure AuthContextMiddleware successful build the auth # context from the incoming auth token admin_token = self.get_scoped_token() req = self._middleware_request(admin_token) self.assertEqual( self.user['id'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['user_id'], ) def test_auth_context_override(self): overridden_context = 'OVERRIDDEN_CONTEXT' # this token should not be used token = uuid.uuid4().hex extra_environ = {authorization.AUTH_CONTEXT_ENV: overridden_context} req = self._middleware_request(token, extra_environ=extra_environ) # make sure overridden context take precedence self.assertEqual( overridden_context, req.environ.get(authorization.AUTH_CONTEXT_ENV) ) def test_unscoped_token_auth_context(self): unscoped_token = self.get_unscoped_token() req = self._middleware_request(unscoped_token) # This check originally looked that the value was unset # but that was an artifact of the custom context keystone # used to create. Oslo-context will always provide the # same set of keys, but the values will be None in an # unscoped token for key in ['project_id', 'domain_id', 'domain_name']: self.assertIsNone( req.environ.get(authorization.AUTH_CONTEXT_ENV)[key] ) def test_project_scoped_token_auth_context(self): project_scoped_token = self.get_scoped_token() req = self._middleware_request(project_scoped_token) self.assertEqual( self.project['id'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['project_id'], ) def test_domain_scoped_token_auth_context(self): # grant the domain role to user path = '/domains/{}/users/{}/roles/{}'.format( self.domain['id'], self.user['id'], self.role['id'], ) self.put(path=path) domain_scoped_token = self.get_domain_scoped_token() req = self._middleware_request(domain_scoped_token) self.assertEqual( self.domain['id'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_id'], ) self.assertEqual( self.domain['name'], req.environ.get(authorization.AUTH_CONTEXT_ENV)['domain_name'], ) def test_oslo_context(self): # After AuthContextMiddleware runs, an # oslo_context.context.RequestContext was created so that its fields # can be logged. This test validates that the RequestContext was # created and the fields are set as expected. # Use a scoped token so more fields can be set. token = self.get_scoped_token() # oslo_middleware RequestId middleware sets openstack.request_id. request_id = uuid.uuid4().hex environ = {'openstack.request_id': request_id} self._middleware_request(token, extra_environ=environ) req_context = oslo_context.context.get_current() self.assertEqual(request_id, req_context.request_id) self.assertEqual(token, req_context.auth_token) self.assertEqual(self.user['id'], req_context.user_id) self.assertEqual(self.project['id'], req_context.project_id) self.assertIsNone(req_context.domain_id) self.assertEqual(self.user['domain_id'], req_context.user_domain_id) self.assertEqual( self.project['domain_id'], req_context.project_domain_id ) self.assertFalse(req_context.is_admin) def test_auth_context_app_cred_with_rule(self): # # This is an open-coded _middleware_request(), which allows us to # supply paths and verify failure. We can refactor later if needed. # def application(environ, start_response): body = b'body' headers = [ ('Content-Type', 'text/html; charset=utf8'), ('Content-Length', str(len(body))), ] start_response('200 OK', headers) return [body] token = self.get_application_credentials_token( self.app_cred_r_id, self.app_cred_r_secret ) # Test to failure app = webtest.TestApp(auth_context.AuthContextMiddleware(application)) resp = app.get( '/v3/projects/e3a0883d15ff409e98e59d460f583a68', headers={authorization.AUTH_TOKEN_HEADER: token}, status=401, ) self.assertEqual('401 Unauthorized', resp.status) # Test to success app = webtest.TestApp(auth_context.AuthContextMiddleware(application)) resp = app.get( '/v3/users/3879328537914be2b394ddf57a4fc73a', headers={authorization.AUTH_TOKEN_HEADER: token}, ) self.assertEqual('200 OK', resp.status) self.assertEqual(b'body', resp.body) # just to make sure it worked class JsonHomeTestMixin: """JSON Home test. Mixin this class to provide a test for the JSON-Home response for an extension. The base class must set JSON_HOME_DATA to a dict of relationship URLs (rels) to the JSON-Home data for the relationship. The rels and associated data must be in the response. """ def test_get_json_home(self): resp = self.get( '/', convert=False, headers={'Accept': 'application/json-home'} ) self.assertThat( resp.headers['Content-Type'], matchers.Equals('application/json-home'), ) resp_data = jsonutils.loads(resp.body) # Check that the example relationships are present. for rel in self.JSON_HOME_DATA: self.assertThat( resp_data['resources'][rel], matchers.Equals(self.JSON_HOME_DATA[rel]), ) class AssignmentTestMixin: """To hold assignment helper functions.""" def build_role_assignment_query_url(self, effective=False, **filters): """Build and return a role assignment query url with provided params. Available filters are: domain_id, project_id, user_id, group_id, role_id and inherited_to_projects. """ query_params = '?effective' if effective else '' for k, v in filters.items(): query_params += '?' if not query_params else '&' if k == 'inherited_to_projects': query_params += 'scope.OS-INHERIT:inherited_to=projects' else: if k in ['domain_id', 'project_id']: query_params += 'scope.' elif k not in ['user_id', 'group_id', 'role_id']: raise ValueError( 'Invalid key \'%s\' in provided filters.' % k ) query_params += '{}={}'.format(k.replace('_', '.'), v) return '/role_assignments%s' % query_params def build_role_assignment_link(self, **attribs): """Build and return a role assignment link with provided attributes. Provided attributes are expected to contain: domain_id or project_id, user_id or group_id, role_id and, optionally, inherited_to_projects. """ if attribs.get('domain_id'): link = '/domains/' + attribs['domain_id'] elif attribs.get('system'): link = '/system' else: link = '/projects/' + attribs['project_id'] if attribs.get('user_id'): link += '/users/' + attribs['user_id'] else: link += '/groups/' + attribs['group_id'] link += '/roles/' + attribs['role_id'] if attribs.get('inherited_to_projects'): return '/OS-INHERIT%s/inherited_to_projects' % link return link def build_role_assignment_entity( self, link=None, prior_role_link=None, **attribs ): """Build and return a role assignment entity with provided attributes. Provided attributes are expected to contain: domain_id or project_id, user_id or group_id, role_id and, optionally, inherited_to_projects. """ entity = { 'links': { 'assignment': ( link or self.build_role_assignment_link(**attribs) ) } } if attribs.get('domain_id'): entity['scope'] = {'domain': {'id': attribs['domain_id']}} elif attribs.get('system'): entity['scope'] = {'system': {'all': True}} else: entity['scope'] = {'project': {'id': attribs['project_id']}} if attribs.get('user_id'): entity['user'] = {'id': attribs['user_id']} if attribs.get('group_id'): entity['links']['membership'] = '/groups/{}/users/{}'.format( attribs['group_id'], attribs['user_id'], ) else: entity['group'] = {'id': attribs['group_id']} entity['role'] = {'id': attribs['role_id']} if attribs.get('inherited_to_projects'): entity['scope']['OS-INHERIT:inherited_to'] = 'projects' if prior_role_link: entity['links']['prior_role'] = prior_role_link return entity def build_role_assignment_entity_include_names( self, domain_ref=None, role_ref=None, group_ref=None, user_ref=None, project_ref=None, inherited_assignment=None, ): """Build and return a role assignment entity with provided attributes. The expected attributes are: domain_ref or project_ref, user_ref or group_ref, role_ref and, optionally, inherited_to_projects. """ entity = {'links': {}} attributes_for_links = {} if project_ref: dmn_name = PROVIDERS.resource_api.get_domain( project_ref['domain_id'] )['name'] entity['scope'] = { 'project': { 'id': project_ref['id'], 'name': project_ref['name'], 'domain': { 'id': project_ref['domain_id'], 'name': dmn_name, }, } } attributes_for_links['project_id'] = project_ref['id'] else: entity['scope'] = { 'domain': {'id': domain_ref['id'], 'name': domain_ref['name']} } attributes_for_links['domain_id'] = domain_ref['id'] if user_ref: dmn_name = PROVIDERS.resource_api.get_domain( user_ref['domain_id'] )['name'] entity['user'] = { 'id': user_ref['id'], 'name': user_ref['name'], 'domain': {'id': user_ref['domain_id'], 'name': dmn_name}, } attributes_for_links['user_id'] = user_ref['id'] else: dmn_name = PROVIDERS.resource_api.get_domain( group_ref['domain_id'] )['name'] entity['group'] = { 'id': group_ref['id'], 'name': group_ref['name'], 'domain': {'id': group_ref['domain_id'], 'name': dmn_name}, } attributes_for_links['group_id'] = group_ref['id'] if role_ref: entity['role'] = {'id': role_ref['id'], 'name': role_ref['name']} if role_ref['domain_id']: dmn_name = PROVIDERS.resource_api.get_domain( role_ref['domain_id'] )['name'] entity['role']['domain'] = { 'id': role_ref['domain_id'], 'name': dmn_name, } attributes_for_links['role_id'] = role_ref['id'] if inherited_assignment: entity['scope']['OS-INHERIT:inherited_to'] = 'projects' attributes_for_links['inherited_to_projects'] = True entity['links']['assignment'] = self.build_role_assignment_link( **attributes_for_links ) return entity ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_v3_application_credential.py0000664000175000017500000007536300000000000026201 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client import uuid from oslo_utils import timeutils from testtools import matchers from keystone.common import provider_api import keystone.conf from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs MEMBER_PATH_FMT = '/users/%(user_id)s/application_credentials/%(app_cred_id)s' class ApplicationCredentialTestCase(test_v3.RestfulTestCase): """Test CRUD operations for application credentials.""" def config_overrides(self): super().config_overrides() self.config_fixture.config( group='auth', methods='password,application_credential' ) def _app_cred_body( self, roles=None, name=None, expires=None, secret=None, access_rules=None, ): name = name or uuid.uuid4().hex description = 'Credential for backups' app_cred_data = {'name': name, 'description': description} if roles: app_cred_data['roles'] = roles if expires: app_cred_data['expires_at'] = expires if secret: app_cred_data['secret'] = secret if access_rules is not None: app_cred_data['access_rules'] = access_rules return {'application_credential': app_cred_data} def test_create_application_credential(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) # Create operation returns the secret self.assertIn('secret', resp.json['application_credential']) # But not the stored hash self.assertNotIn('secret_hash', resp.json['application_credential']) def test_create_application_credential_implied_role(self): """Test creation with implied roles. Verify that implied roles are respected when user creates new application credential specifying a role that implies some other role """ implied_role = unit.new_role_ref(name='implied') implied_role_id = implied_role['id'] PROVIDERS.role_api.create_role(implied_role_id, implied_role) PROVIDERS.role_api.create_implied_role(self.role_id, implied_role_id) with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) # Create operation returns the secret self.assertIn('secret', resp.json['application_credential']) # But not the stored hash self.assertNotIn('secret_hash', resp.json['application_credential']) # Ensure implied role is also granted self.assertIn( implied_role_id, [x['id'] for x in resp.json["application_credential"]["roles"]], ) def test_create_application_credential_with_secret(self): with self.test_client() as c: secret = 'supersecuresecret' roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles, secret=secret) token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) self.assertEqual(secret, resp.json['application_credential']['secret']) def test_create_application_credential_roles_from_token(self): with self.test_client() as c: app_cred_body = self._app_cred_body() token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) self.assertThat( resp.json['application_credential']['roles'], matchers.HasLength(1), ) self.assertEqual( resp.json['application_credential']['roles'][0]['id'], self.role_id, ) def test_create_application_credential_wrong_user(self): wrong_user = unit.create_user( PROVIDERS.identity_api, test_v3.DEFAULT_DOMAIN_ID ) with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() c.post( '/v3/users/%s/application_credentials' % wrong_user['id'], json=app_cred_body, expected_status_code=http.client.FORBIDDEN, headers={'X-Auth-Token': token}, ) def test_create_application_credential_bad_role(self): with self.test_client() as c: roles = [{'id': uuid.uuid4().hex}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.BAD_REQUEST, headers={'X-Auth-Token': token}, ) def test_create_application_credential_with_expiration(self): with self.test_client() as c: roles = [{'id': self.role_id}] expires = timeutils.utcnow() + datetime.timedelta(days=365) expires = str(expires) app_cred_body = self._app_cred_body(roles=roles, expires=expires) token = self.get_scoped_token() c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) def test_create_application_credential_invalid_expiration_fmt(self): with self.test_client() as c: roles = [{'id': self.role_id}] expires = 'next tuesday' app_cred_body = self._app_cred_body(roles=roles, expires=expires) token = self.get_scoped_token() c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.BAD_REQUEST, headers={'X-Auth-Token': token}, ) def test_create_application_credential_already_expired(self): with self.test_client() as c: roles = [{'id': self.role_id}] expires = timeutils.utcnow() - datetime.timedelta(hours=1) app_cred_body = self._app_cred_body(roles=roles, expires=expires) token = self.get_scoped_token() c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.BAD_REQUEST, headers={'X-Auth-Token': token}, ) def test_create_application_credential_with_application_credential(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body_1 = self._app_cred_body(roles=roles) token = self.get_scoped_token() app_cred_1 = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body_1, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) auth_data = self.build_authentication_request( app_cred_id=app_cred_1.json['application_credential']['id'], secret=app_cred_1.json['application_credential']['secret'], ) token_data = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) app_cred_body_2 = self._app_cred_body(roles=roles) token = token_data.headers['x-subject-token'] c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body_2, expected_status_code=http.client.FORBIDDEN, headers={'X-Auth-Token': token}, ) def test_create_application_credential_with_trust(self): second_role = unit.new_role_ref(name='reader') PROVIDERS.role_api.create_role(second_role['id'], second_role) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_id, self.project_id, second_role['id'] ) with self.test_client() as c: pw_token = self.get_scoped_token() # create a self-trust - only the roles are important for this test trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.user_id, project_id=self.project_id, role_ids=[second_role['id']], ) resp = c.post( '/v3/OS-TRUST/trusts', headers={'X-Auth-Token': pw_token}, json={'trust': trust_ref}, ) trust_id = resp.json['trust']['id'] trust_auth = self.build_authentication_request( user_id=self.user_id, password=self.user['password'], trust_id=trust_id, ) trust_token = self.v3_create_token(trust_auth).headers[ 'X-Subject-Token' ] app_cred = self._app_cred_body(roles=[{'id': self.role_id}]) # only the roles from the trust token should be allowed, even if # the user has the role assigned on the project c.post( '/v3/users/%s/application_credentials' % self.user_id, headers={'X-Auth-Token': trust_token}, json=app_cred, expected_status_code=http.client.BAD_REQUEST, ) def test_create_application_credential_allow_recursion(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body_1 = self._app_cred_body(roles=roles) app_cred_body_1['application_credential']['unrestricted'] = True token = self.get_scoped_token() app_cred_1 = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body_1, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) auth_data = self.build_authentication_request( app_cred_id=app_cred_1.json['application_credential']['id'], secret=app_cred_1.json['application_credential']['secret'], ) token_data = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) app_cred_body_2 = self._app_cred_body(roles=roles) c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body_2, expected_status_code=http.client.CREATED, headers={ 'x-Auth-Token': token_data.headers['x-subject-token'] }, ) def test_create_application_credential_with_access_rules(self): roles = [{'id': self.role_id}] access_rules = [ { 'path': '/v3/projects', 'method': 'POST', 'service': 'identity', } ] app_cred_body = self._app_cred_body( roles=roles, access_rules=access_rules ) with self.test_client() as c: token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, headers={'X-Auth-Token': token}, json=app_cred_body, expected_status_code=http.client.CREATED, ) app_cred_id = resp.json['application_credential']['id'] resp_access_rules = resp.json['application_credential'][ 'access_rules' ] access_rule_id = resp_access_rules[0].pop('id') self.assertEqual(access_rules[0], resp_access_rules[0]) resp = c.get( '/v3/users/%s/access_rules' % self.user_id, headers={'X-Auth-Token': token}, ) resp_access_rule = resp.json['access_rules'][0] resp_access_rule.pop('id') resp_access_rule.pop('links') self.assertEqual(access_rules[0], resp_access_rule) resp = c.get( '/v3/users/%s/access_rules/%s' % (self.user_id, access_rule_id), headers={'X-Auth-Token': token}, ) resp_access_rule = resp.json['access_rule'] resp_access_rule.pop('id') resp_access_rule.pop('links') self.assertEqual(access_rules[0], resp_access_rule) # can't delete an access rule in use c.delete( '/v3/users/%s/access_rules/%s' % (self.user_id, access_rule_id), headers={'X-Auth-Token': token}, expected_status_code=http.client.FORBIDDEN, ) c.delete( '/v3/users/%s/application_credentials/%s' % (self.user_id, app_cred_id), headers={'X-Auth-Token': token}, ) c.delete( '/v3/users/%s/access_rules/%s' % (self.user_id, access_rule_id), headers={'X-Auth-Token': token}, ) def test_create_application_credential_with_duplicate_access_rule(self): roles = [{'id': self.role_id}] access_rules = [ { 'path': '/v3/projects', 'method': 'POST', 'service': 'identity', } ] app_cred_body_1 = self._app_cred_body( roles=roles, access_rules=access_rules ) with self.test_client() as c: token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, headers={'X-Auth-Token': token}, json=app_cred_body_1, expected_status_code=http.client.CREATED, ) resp_access_rules = resp.json['application_credential']['access_rules'] self.assertIn('id', resp_access_rules[0]) access_rule_id = resp_access_rules[0].pop('id') self.assertEqual(access_rules[0], resp_access_rules[0]) app_cred_body_2 = self._app_cred_body( roles=roles, access_rules=access_rules ) with self.test_client() as c: token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, headers={'X-Auth-Token': token}, json=app_cred_body_2, expected_status_code=http.client.CREATED, ) resp_access_rules = resp.json['application_credential']['access_rules'] self.assertEqual(access_rule_id, resp_access_rules[0]['id']) def test_create_application_credential_with_access_rule_by_id(self): roles = [{'id': self.role_id}] access_rules = [ { 'path': '/v3/projects', 'method': 'POST', 'service': 'identity', } ] app_cred_body_1 = self._app_cred_body( roles=roles, access_rules=access_rules ) with self.test_client() as c: token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, headers={'X-Auth-Token': token}, json=app_cred_body_1, expected_status_code=http.client.CREATED, ) resp_access_rules = resp.json['application_credential']['access_rules'] access_rule_id = resp_access_rules self.assertIn('id', resp_access_rules[0]) access_rule_id = resp_access_rules[0].pop('id') self.assertEqual(access_rules[0], resp_access_rules[0]) access_rules = [{'id': access_rule_id}] app_cred_body_2 = self._app_cred_body( roles=roles, access_rules=access_rules ) with self.test_client() as c: token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, headers={'X-Auth-Token': token}, json=app_cred_body_2, expected_status_code=http.client.CREATED, ) resp_access_rules = resp.json['application_credential']['access_rules'] self.assertEqual(access_rule_id, resp_access_rules[0]['id']) def test_list_application_credentials(self): with self.test_client() as c: token = self.get_scoped_token() resp = c.get( '/v3/users/%s/application_credentials' % self.user_id, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertEqual([], resp.json['application_credentials']) roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) resp = c.get( '/v3/users/%s/application_credentials' % self.user_id, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertEqual(1, len(resp.json['application_credentials'])) self.assertNotIn('secret', resp.json['application_credentials'][0]) self.assertNotIn( 'secret_hash', resp.json['application_credentials'][0] ) app_cred_body['application_credential']['name'] = 'two' c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) resp = c.get( '/v3/users/%s/application_credentials' % self.user_id, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertEqual(2, len(resp.json['application_credentials'])) for ac in resp.json['application_credentials']: self.assertNotIn('secret', ac) self.assertNotIn('secret_hash', ac) def test_list_application_credentials_with_deleted_role(self): second_role = unit.new_role_ref(name='test_new_role') PROVIDERS.role_api.create_role(second_role['id'], second_role) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_id, self.project_id, second_role['id'] ) with self.test_client() as c: token = self.get_scoped_token() resp = c.get( '/v3/users/%s/application_credentials' % self.user_id, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertEqual([], resp.json['application_credentials']) roles = [{'id': second_role['id']}] app_cred_body = self._app_cred_body(roles=roles) c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) resp = c.get( '/v3/users/%s/application_credentials' % self.user_id, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) PROVIDERS.role_api.delete_role(second_role['id']) resp = c.get( '/v3/users/%s/application_credentials' % self.user_id, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) def test_list_application_credentials_by_name(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() name = app_cred_body['application_credential']['name'] search_path = ( '/v3/users/%(user_id)s/application_credentials?' 'name=%(name)s' ) % {'user_id': self.user_id, 'name': name} resp = c.get( search_path, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertEqual([], resp.json['application_credentials']) resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) resp = c.get( search_path, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertEqual(1, len(resp.json['application_credentials'])) self.assertNotIn('secret', resp.json['application_credentials'][0]) self.assertNotIn( 'secret_hash', resp.json['application_credentials'][0] ) app_cred_body['application_credential']['name'] = 'two' c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) resp = c.get( search_path, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertEqual(1, len(resp.json['application_credentials'])) self.assertEqual( resp.json['application_credentials'][0]['name'], name ) def test_get_head_application_credential(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) app_cred_id = resp.json['application_credential']['id'] c.head( '/v3%s' % MEMBER_PATH_FMT % {'user_id': self.user_id, 'app_cred_id': app_cred_id}, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) expected_response = resp.json expected_response['application_credential'].pop('secret') resp = c.get( '/v3%s' % MEMBER_PATH_FMT % {'user_id': self.user_id, 'app_cred_id': app_cred_id}, expected_status_code=http.client.OK, headers={'X-Auth-Token': token}, ) self.assertDictEqual(resp.json, expected_response) def test_get_head_application_credential_not_found(self): with self.test_client() as c: token = self.get_scoped_token() c.head( '/v3%s' % MEMBER_PATH_FMT % {'user_id': self.user_id, 'app_cred_id': uuid.uuid4().hex}, expected_status_code=http.client.NOT_FOUND, headers={'X-Auth-Token': token}, ) c.get( '/v3%s' % MEMBER_PATH_FMT % {'user_id': self.user_id, 'app_cred_id': uuid.uuid4().hex}, expected_status_code=http.client.NOT_FOUND, headers={'X-Auth-Token': token}, ) def test_delete_application_credential(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) app_cred_id = resp.json['application_credential']['id'] c.delete( '/v3%s' % MEMBER_PATH_FMT % {'user_id': self.user_id, 'app_cred_id': app_cred_id}, expected_status_code=http.client.NO_CONTENT, headers={'X-Auth-Token': token}, ) def test_delete_application_credential_not_found(self): with self.test_client() as c: token = self.get_scoped_token() c.delete( '/v3%s' % MEMBER_PATH_FMT % {'user_id': self.user_id, 'app_cred_id': uuid.uuid4().hex}, expected_status_code=http.client.NOT_FOUND, headers={'X-Auth-Token': token}, ) def test_delete_application_credential_with_application_credential(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() app_cred = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) auth_data = self.build_authentication_request( app_cred_id=app_cred.json['application_credential']['id'], secret=app_cred.json['application_credential']['secret'], ) token_data = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) member_path = ( '/v3%s' % MEMBER_PATH_FMT % { 'user_id': self.user_id, 'app_cred_id': app_cred.json['application_credential'][ 'id' ], } ) token = token_data.headers['x-subject-token'] c.delete( member_path, json=app_cred_body, expected_status_code=http.client.FORBIDDEN, headers={'X-Auth-Token': token}, ) def test_delete_application_credential_allow_recursion(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) app_cred_body['application_credential']['unrestricted'] = True token = self.get_scoped_token() app_cred = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) auth_data = self.build_authentication_request( app_cred_id=app_cred.json['application_credential']['id'], secret=app_cred.json['application_credential']['secret'], ) token_data = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) member_path = ( '/v3%s' % MEMBER_PATH_FMT % { 'user_id': self.user_id, 'app_cred_id': app_cred.json['application_credential'][ 'id' ], } ) c.delete( member_path, json=app_cred_body, expected_status_code=http.client.NO_CONTENT, headers={ 'x-Auth-Token': token_data.headers['x-subject-token'] }, ) def test_update_application_credential(self): with self.test_client() as c: roles = [{'id': self.role_id}] app_cred_body = self._app_cred_body(roles=roles) token = self.get_scoped_token() resp = c.post( '/v3/users/%s/application_credentials' % self.user_id, json=app_cred_body, expected_status_code=http.client.CREATED, headers={'X-Auth-Token': token}, ) # Application credentials are immutable app_cred_body['application_credential'][ 'description' ] = "New Things" app_cred_id = resp.json['application_credential']['id'] # NOTE(morgan): when the whole test case is converted to using # flask test_client, this extra v3 prefix will # need to be rolled into the base MEMBER_PATH_FMT member_path = ( '/v3%s' % MEMBER_PATH_FMT % {'user_id': self.user_id, 'app_cred_id': app_cred_id} ) c.patch( member_path, json=app_cred_body, expected_status_code=http.client.METHOD_NOT_ALLOWED, headers={'X-Auth-Token': token}, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_v3_assignment.py0000664000175000017500000052431200000000000023645 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client import random import uuid import freezegun from oslo_utils import timeutils from testtools import matchers from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.resource.backends import base as resource_base from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class SystemRoleAssignmentMixin: def _create_new_role(self): """Create a role available for use anywhere and return the ID.""" ref = unit.new_role_ref() response = self.post('/roles', body={'role': ref}) # We only really need the role ID, so omit the rest of the response and # return the ID of the role we just created. return response.json_body['role']['id'] def _create_group(self): body = { 'group': {'domain_id': self.domain_id, 'name': uuid.uuid4().hex} } response = self.post('/groups/', body=body) return response.json_body['group'] def _create_user(self): body = { 'user': {'domain_id': self.domain_id, 'name': uuid.uuid4().hex} } response = self.post('/users/', body=body) return response.json_body['user'] class AssignmentTestCase( test_v3.RestfulTestCase, test_v3.AssignmentTestMixin, SystemRoleAssignmentMixin, ): """Test roles and role assignments.""" def setUp(self): super().setUp() self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = PROVIDERS.identity_api.create_group(self.group) self.group_id = self.group['id'] # Role CRUD tests def test_create_role(self): """Call ``POST /roles``.""" ref = unit.new_role_ref() r = self.post('/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def test_create_role_bad_request(self): """Call ``POST /roles``.""" self.post( '/roles', body={'role': {}}, expected_status=http.client.BAD_REQUEST, ) def test_list_head_roles(self): """Call ``GET & HEAD /roles``.""" resource_url = '/roles' r = self.get(resource_url) self.assertValidRoleListResponse( r, ref=self.role, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) def test_get_head_role(self): """Call ``GET & HEAD /roles/{role_id}``.""" resource_url = f'/roles/{self.role_id}' r = self.get(resource_url) self.assertValidRoleResponse(r, self.role) self.head(resource_url, expected_status=http.client.OK) def test_update_role(self): """Call ``PATCH /roles/{role_id}``.""" ref = unit.new_role_ref() del ref['id'] r = self.patch( f'/roles/{self.role_id}', body={'role': ref}, ) self.assertValidRoleResponse(r, ref) def test_delete_role(self): """Call ``DELETE /roles/{role_id}``.""" self.delete(f'/roles/{self.role_id}') # Role Grants tests def test_crud_user_project_role_grants(self): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) collection_url = '/projects/{project_id}/users/{user_id}/roles'.format( project_id=self.project['id'], user_id=self.user['id'], ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=role['id'], ) # There is a role assignment for self.user on self.project r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, expected_length=1) self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=role, resource_url=collection_url, expected_length=2 ) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse(r, ref=self.role, expected_length=1) self.assertIn(collection_url, r.result['links']['self']) self.head(collection_url, expected_status=http.client.OK) def test_crud_user_project_role_grants_no_user(self): """Grant role on a project to a user that doesn't exist. When grant a role on a project to a user that doesn't exist, the server returns Not Found for the user. """ user_id = uuid.uuid4().hex collection_url = '/projects/{project_id}/users/{user_id}/roles'.format( project_id=self.project['id'], user_id=user_id, ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def test_crud_user_domain_role_grants(self): time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: collection_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles' % {'domain_id': self.domain_id, 'user_id': self.user['id']} ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=self.role, resource_url=collection_url ) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) # NOTE(lbragstad): Make sure we wait a second before we ask for the # roles. This ensures the token we use isn't considered revoked # because it was issued within the same second as a revocation # event. frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleListResponse( r, expected_length=0, resource_url=collection_url ) self.head(collection_url, expected_status=http.client.OK) def test_crud_user_domain_role_grants_no_user(self): """Grant role on a domain to a user that doesn't exist. When grant a role on a domain to a user that doesn't exist, the server returns 404 Not Found for the user. """ user_id = uuid.uuid4().hex collection_url = '/domains/{domain_id}/users/{user_id}/roles'.format( domain_id=self.domain_id, user_id=user_id, ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def test_crud_group_project_role_grants(self): time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % {'project_id': self.project_id, 'group_id': self.group_id} ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=self.role, resource_url=collection_url ) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) # NOTE(lbragstad): Make sure we wait a second before we ask for the # roles. This ensures the token we use isn't considered revoked # because it was issued within the same second as a revocation # event. frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleListResponse( r, expected_length=0, resource_url=collection_url ) self.head(collection_url, expected_status=http.client.OK) def test_crud_group_project_role_grants_no_group(self): """Grant role on a project to a group that doesn't exist. When grant a role on a project to a group that doesn't exist, the server returns 404 Not Found for the group. """ group_id = uuid.uuid4().hex collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % {'project_id': self.project_id, 'group_id': group_id} ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def test_crud_group_domain_role_grants(self): time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: collection_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % {'domain_id': self.domain_id, 'group_id': self.group_id} ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=self.role, resource_url=collection_url ) self.head(collection_url, expected_status=http.client.OK) self.delete(member_url) # NOTE(lbragstad): Make sure we wait a second before we ask for the # roles. This ensures the token we use isn't considered revoked # because it was issued within the same second as a revocation # event. frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleListResponse( r, expected_length=0, resource_url=collection_url ) self.head(collection_url, expected_status=http.client.OK) def test_crud_group_domain_role_grants_no_group(self): """Grant role on a domain to a group that doesn't exist. When grant a role on a domain to a group that doesn't exist, the server returns 404 Not Found for the group. """ group_id = uuid.uuid4().hex collection_url = '/domains/{domain_id}/groups/{group_id}/roles'.format( domain_id=self.domain_id, group_id=group_id, ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url, expected_status=http.client.NOT_FOUND) self.head(member_url, expected_status=http.client.NOT_FOUND) self.get(member_url, expected_status=http.client.NOT_FOUND) def _create_new_user_and_assign_role_on_project(self): """Create a new user and assign user a role on a project.""" # Create a new user new_user = unit.new_user_ref(domain_id=self.domain_id) user_ref = PROVIDERS.identity_api.create_user(new_user) # Assign the user a role on the project collection_url = '/projects/{project_id}/users/{user_id}/roles'.format( project_id=self.project_id, user_id=user_ref['id'], ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url) # Check the user has the role assigned self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) return member_url, user_ref def test_delete_user_before_removing_role_assignment_succeeds(self): """Call ``DELETE`` on the user before the role assignment.""" member_url, user = self._create_new_user_and_assign_role_on_project() # Delete the user from identity backend PROVIDERS.identity_api.driver.delete_user(user['id']) # Clean up the role assignment self.delete(member_url) # Make sure the role is gone self.head(member_url, expected_status=http.client.NOT_FOUND) def test_delete_group_before_removing_role_assignment_succeeds(self): # Disable the cache so that we perform a fresh check of the identity # backend when attempting to remove the role assignment. self.config_fixture.config(group='cache', enabled=False) # Create a new group group = unit.new_group_ref(domain_id=self.domain_id) group_ref = PROVIDERS.identity_api.create_group(group) # Assign the user a role on the project collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % {'project_id': self.project_id, 'group_id': group_ref['id']} ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url) # Check the user has the role assigned self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) # Simulate removing the group via LDAP by directly removing it from the # identity backend. PROVIDERS.identity_api.driver.delete_group(group_ref['id']) # Ensure we can clean up the role assignment even though the group # doesn't exist self.delete(member_url) def test_delete_user_before_removing_system_assignments_succeeds(self): system_role = self._create_new_role() user = self._create_user() path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=user['id'], role_id=system_role, ) self.put(path) response = self.get('/role_assignments') number_of_assignments = len(response.json_body['role_assignments']) path = '/users/{user_id}'.format(user_id=user['id']) self.delete(path) # The user with the system role assignment is a new user and only has # one role on the system. We should expect one less role assignment in # the list. response = self.get('/role_assignments') self.assertValidRoleAssignmentListResponse( response, expected_length=number_of_assignments - 1 ) def test_delete_user_and_check_role_assignment_fails(self): """Call ``DELETE`` on the user and check the role assignment.""" member_url, user = self._create_new_user_and_assign_role_on_project() # Delete the user from identity backend PROVIDERS.identity_api.delete_user(user['id']) # We should get a 404 Not Found when looking for the user in the # identity backend because we're not performing a delete operation on # the role. self.head(member_url, expected_status=http.client.NOT_FOUND) def test_token_revoked_once_group_role_grant_revoked(self): """Test token invalid when direct & indirect role on user is revoked. When a role granted to a group is revoked for a given scope, and user direct role is revoked, then tokens created by user will be invalid. """ time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # creates grant from group on project. PROVIDERS.assignment_api.create_grant( role_id=self.role['id'], project_id=self.project['id'], group_id=self.group['id'], ) # adds user to the group. PROVIDERS.identity_api.add_user_to_group( user_id=self.user['id'], group_id=self.group['id'] ) # creates a token for the user auth_body = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) token_resp = self.post('/auth/tokens', body=auth_body) token = token_resp.headers.get('x-subject-token') # validates the returned token; it should be valid. self.head( '/auth/tokens', headers={'x-subject-token': token}, expected_status=http.client.OK, ) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) # revokes the grant from group on project. PROVIDERS.assignment_api.delete_grant( role_id=self.role['id'], project_id=self.project['id'], group_id=self.group['id'], ) # revokes the direct role form user on project PROVIDERS.assignment_api.delete_grant( role_id=self.role['id'], project_id=self.project['id'], user_id=self.user['id'], ) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) # validates the same token again; it should not longer be valid. self.head( '/auth/tokens', token=token, expected_status=http.client.UNAUTHORIZED, ) def test_delete_group_before_removing_system_assignments_succeeds(self): system_role = self._create_new_role() group = self._create_group() path = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role, ) self.put(path) response = self.get('/role_assignments') number_of_assignments = len(response.json_body['role_assignments']) path = '/groups/{group_id}'.format(group_id=group['id']) self.delete(path) # The group with the system role assignment is a new group and only has # one role on the system. We should expect one less role assignment in # the list. response = self.get('/role_assignments') self.assertValidRoleAssignmentListResponse( response, expected_length=number_of_assignments - 1 ) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_user_and_project_invalidate_cache(self): # create a new project new_project = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(new_project['id'], new_project) collection_url = '/projects/{project_id}/users/{user_id}/roles'.format( project_id=new_project['id'], user_id=self.user['id'], ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) # create the user a grant on the new project self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse( resp, ref=self.role, resource_url=collection_url ) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the project resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_user_and_domain_invalidates_cache(self): # create a new domain new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) collection_url = '/domains/{domain_id}/users/{user_id}/roles'.format( domain_id=new_domain['id'], user_id=self.user['id'], ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) # create the user a grant on the new domain self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse( resp, ref=self.role, resource_url=collection_url ) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the domain resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_group_and_project_invalidates_cache(self): # create a new project new_project = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(new_project['id'], new_project) collection_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles' % {'project_id': new_project['id'], 'group_id': self.group['id']} ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) # create the group a grant on the new project self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse( resp, ref=self.role, resource_url=collection_url ) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the project resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) @unit.skip_if_cache_disabled('assignment') def test_delete_grant_from_group_and_domain_invalidates_cache(self): # create a new domain new_domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(new_domain['id'], new_domain) collection_url = '/domains/{domain_id}/groups/{group_id}/roles'.format( domain_id=new_domain['id'], group_id=self.group['id'], ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) # create the group a grant on the new domain self.put(member_url) # check the grant that was just created self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) resp = self.get(collection_url) self.assertValidRoleListResponse( resp, ref=self.role, resource_url=collection_url ) # delete the grant self.delete(member_url) # get the collection and ensure there are no roles on the domain resp = self.get(collection_url) self.assertListEqual(resp.json_body['roles'], []) # Role Assignments tests def test_get_head_role_assignments(self): """Call ``GET & HEAD /role_assignments``. The sample data set up already has a user, group and project that is part of self.domain. We use these plus a new user we create as our data set, making sure we ignore any role assignments that are already in existence. Since we don't yet support a first class entity for role assignments, we are only testing the LIST API. To create and delete the role assignments we use the old grant APIs. Test Plan: - Create extra user for tests - Get a list of all existing role assignments - Add a new assignment for each of the four combinations, i.e. group+domain, user+domain, group+project, user+project, using the same role each time - Get a new list of all role assignments, checking these four new ones have been added - Then delete the four we added - Get a new list of all role assignments, checking the four have been removed """ time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # Since the default fixtures already assign some roles to the # user it creates, we also need a new user that will not have any # existing assignments user1 = unit.new_user_ref(domain_id=self.domain['id']) user1 = PROVIDERS.identity_api.create_user(user1) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) self.head(collection_url, expected_status=http.client.OK) existing_assignments = len(r.result.get('role_assignments')) # Now add one of each of the four types of assignment, making sure # that we get them all back. gd_entity = self.build_role_assignment_entity( domain_id=self.domain_id, group_id=self.group_id, role_id=role['id'], ) self.put(gd_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url, ) self.assertRoleAssignmentInListResponse(r, gd_entity) self.head(collection_url, expected_status=http.client.OK) ud_entity = self.build_role_assignment_entity( domain_id=self.domain_id, user_id=user1['id'], role_id=role['id'], ) self.put(ud_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url, ) self.assertRoleAssignmentInListResponse(r, ud_entity) self.head(collection_url, expected_status=http.client.OK) gp_entity = self.build_role_assignment_entity( project_id=self.project_id, group_id=self.group_id, role_id=role['id'], ) self.put(gp_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 3, resource_url=collection_url, ) self.assertRoleAssignmentInListResponse(r, gp_entity) self.head(collection_url, expected_status=http.client.OK) up_entity = self.build_role_assignment_entity( project_id=self.project_id, user_id=user1['id'], role_id=role['id'], ) self.put(up_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 4, resource_url=collection_url, ) self.assertRoleAssignmentInListResponse(r, up_entity) self.head(collection_url, expected_status=http.client.OK) # Now delete the four we added and make sure they are removed # from the collection. self.delete(gd_entity['links']['assignment']) self.delete(ud_entity['links']['assignment']) self.delete(gp_entity['links']['assignment']) self.delete(up_entity['links']['assignment']) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments, resource_url=collection_url, ) self.assertRoleAssignmentNotInListResponse(r, gd_entity) self.assertRoleAssignmentNotInListResponse(r, ud_entity) self.assertRoleAssignmentNotInListResponse(r, gp_entity) self.assertRoleAssignmentNotInListResponse(r, up_entity) self.head(collection_url, expected_status=http.client.OK) def test_get_effective_role_assignments(self): """Call ``GET /role_assignments?effective``. Test Plan: - Create two extra user for tests - Add these users to a group - Add a role assignment for the group on a domain - Get a list of all role assignments, checking one has been added - Then get a list of all effective role assignments - the group assignment should have turned into assignments on the domain for each of the group members. """ user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) PROVIDERS.identity_api.add_user_to_group(user1['id'], self.group['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], self.group['id']) collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) existing_assignments = len(r.result.get('role_assignments')) gd_entity = self.build_role_assignment_entity( domain_id=self.domain_id, group_id=self.group_id, role_id=self.role_id, ) self.put(gd_entity['links']['assignment']) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url, ) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now re-read the collection asking for effective roles - this # should mean the group assignment is translated into the two # member user assignments collection_url = '/role_assignments?effective' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url, ) ud_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], domain_id=self.domain_id, user_id=user1['id'], role_id=self.role_id, ) self.assertRoleAssignmentInListResponse(r, ud_entity) ud_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], domain_id=self.domain_id, user_id=user2['id'], role_id=self.role_id, ) self.assertRoleAssignmentInListResponse(r, ud_entity) def test_check_effective_values_for_role_assignments(self): """Call ``GET & HEAD /role_assignments?effective=value``. Check the various ways of specifying the 'effective' query parameter. If the 'effective' query parameter is included then this should always be treated as meaning 'True' unless it is specified as: {url}?effective=0 This is by design to match the agreed way of handling policy checking on query/filter parameters. Test Plan: - Create two extra user for tests - Add these users to a group - Add a role assignment for the group on a domain - Get a list of all role assignments, checking one has been added - Then issue various request with different ways of defining the 'effective' query parameter. As we have tested the correctness of the data coming back when we get effective roles in other tests, here we just use the count of entities to know if we are getting effective roles or not """ user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) PROVIDERS.identity_api.add_user_to_group(user1['id'], self.group['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], self.group['id']) collection_url = '/role_assignments' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) existing_assignments = len(r.result.get('role_assignments')) gd_entity = self.build_role_assignment_entity( domain_id=self.domain_id, group_id=self.group_id, role_id=self.role_id, ) self.put(gd_entity['links']['assignment']) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url, ) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now re-read the collection asking for effective roles, # using the most common way of defining "effective'. This # should mean the group assignment is translated into the two # member user assignments collection_url = '/role_assignments?effective' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url, ) # Now set 'effective' to false explicitly - should get # back the regular roles collection_url = '/role_assignments?effective=0' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 1, resource_url=collection_url, ) # Now try setting 'effective' to 'False' explicitly- this is # NOT supported as a way of setting a query or filter # parameter to false by design. Hence we should get back # effective roles. collection_url = '/role_assignments?effective=False' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url, ) # Now set 'effective' to True explicitly collection_url = '/role_assignments?effective=True' r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=existing_assignments + 2, resource_url=collection_url, ) def test_filtered_role_assignments(self): """Call ``GET /role_assignments?filters``. Test Plan: - Create extra users, group, role and project for tests - Make the following assignments: Give group1, role1 on project1 and domain Give user1, role2 on project1 and domain Make User1 a member of Group1 - Test a series of single filter list calls, checking that the correct results are obtained - Test a multi-filtered list call - Test listing all effective roles for a given user - Test the equivalent of the list of roles in a project scoped token (all effective roles for a user on a project) """ # Since the default fixtures already assign some roles to the # user it creates, we also need a new user that will not have any # existing assignments user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) group1 = unit.new_group_ref(domain_id=self.domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], group1['id']) project1 = unit.new_project_ref(domain_id=self.domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) self.role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(self.role1['id'], self.role1) self.role2 = unit.new_role_ref() PROVIDERS.role_api.create_role(self.role2['id'], self.role2) # Now add one of each of the six types of assignment gd_entity = self.build_role_assignment_entity( domain_id=self.domain_id, group_id=group1['id'], role_id=self.role1['id'], ) self.put(gd_entity['links']['assignment']) ud_entity = self.build_role_assignment_entity( domain_id=self.domain_id, user_id=user1['id'], role_id=self.role2['id'], ) self.put(ud_entity['links']['assignment']) gp_entity = self.build_role_assignment_entity( project_id=project1['id'], group_id=group1['id'], role_id=self.role1['id'], ) self.put(gp_entity['links']['assignment']) up_entity = self.build_role_assignment_entity( project_id=project1['id'], user_id=user1['id'], role_id=self.role2['id'], ) self.put(up_entity['links']['assignment']) gs_entity = self.build_role_assignment_entity( system='all', group_id=group1['id'], role_id=self.role1['id'] ) self.put(gs_entity['links']['assignment']) us_entity = self.build_role_assignment_entity( system='all', user_id=user1['id'], role_id=self.role2['id'] ) self.put(us_entity['links']['assignment']) us2_entity = self.build_role_assignment_entity( system='all', user_id=user2['id'], role_id=self.role2['id'] ) self.put(us2_entity['links']['assignment']) # Now list by various filters to make sure we get back the right ones collection_url = ( '/role_assignments?scope.project.id=%s' % project1['id'] ) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) collection_url = ( '/role_assignments?scope.domain.id=%s' % self.domain['id'] ) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, gd_entity) collection_url = '/role_assignments?user.id=%s' % user1['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=3, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, ud_entity) collection_url = '/role_assignments?group.id=%s' % group1['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=3, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(r, gd_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) collection_url = '/role_assignments?role.id=%s' % self.role1['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=3, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(r, gd_entity) self.assertRoleAssignmentInListResponse(r, gp_entity) self.assertRoleAssignmentInListResponse(r, gs_entity) collection_url = '/role_assignments?role.id=%s' % self.role2['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=4, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, us_entity) # Let's try combining two filers together.... collection_url = ( '/role_assignments?user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % {'user_id': user1['id'], 'project_id': project1['id']} ) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=1, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(r, up_entity) # Now for a harder one - filter for user with effective # roles - this should return role assignment that were directly # assigned as well as by virtue of group membership collection_url = '/role_assignments?effective&user.id=%s' % user1['id'] r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=4, resource_url=collection_url ) # Should have the two direct roles... self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, ud_entity) # ...and the two via group membership... gp1_link = self.build_role_assignment_link( project_id=project1['id'], group_id=group1['id'], role_id=self.role1['id'], ) gd1_link = self.build_role_assignment_link( domain_id=self.domain_id, group_id=group1['id'], role_id=self.role1['id'], ) up1_entity = self.build_role_assignment_entity( link=gp1_link, project_id=project1['id'], user_id=user1['id'], role_id=self.role1['id'], ) ud1_entity = self.build_role_assignment_entity( link=gd1_link, domain_id=self.domain_id, user_id=user1['id'], role_id=self.role1['id'], ) self.assertRoleAssignmentInListResponse(r, up1_entity) self.assertRoleAssignmentInListResponse(r, ud1_entity) # ...and for the grand-daddy of them all, simulate the request # that would generate the list of effective roles in a project # scoped token. collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % {'user_id': user1['id'], 'project_id': project1['id']} ) r = self.get(collection_url, expected_status=http.client.OK) self.head(collection_url, expected_status=http.client.OK) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url ) # Should have one direct role and one from group membership... self.assertRoleAssignmentInListResponse(r, up_entity) self.assertRoleAssignmentInListResponse(r, up1_entity) def test_list_system_role_assignments(self): # create a bunch of roles user_system_role_id = self._create_new_role() user_domain_role_id = self._create_new_role() user_project_role_id = self._create_new_role() group_system_role_id = self._create_new_role() group_domain_role_id = self._create_new_role() group_project_role_id = self._create_new_role() # create a user and grant the user a role on the system, domain, and # project user = self._create_user() url = '/system/users/{}/roles/{}'.format( user['id'], user_system_role_id ) self.put(url) url = '/domains/{}/users/{}/roles/{}'.format( self.domain_id, user['id'], user_domain_role_id, ) self.put(url) url = '/projects/{}/users/{}/roles/{}'.format( self.project_id, user['id'], user_project_role_id, ) self.put(url) # create a group and grant the group a role on the system, domain, and # project group = self._create_group() url = '/system/groups/{}/roles/{}'.format( group['id'], group_system_role_id, ) self.put(url) url = '/domains/{}/groups/{}/roles/{}'.format( self.domain_id, group['id'], group_domain_role_id, ) self.put(url) url = '/projects/{}/groups/{}/roles/{}'.format( self.project_id, group['id'], group_project_role_id, ) self.put(url) # /v3/role_assignments?scope.system=all should return two assignments response = self.get('/role_assignments?scope.system=all') self.assertValidRoleAssignmentListResponse(response, expected_length=2) for assignment in response.json_body['role_assignments']: self.assertTrue(assignment['scope']['system']['all']) if assignment.get('user'): self.assertEqual(user_system_role_id, assignment['role']['id']) if assignment.get('group'): self.assertEqual( group_system_role_id, assignment['role']['id'] ) # /v3/role_assignments?scope_system=all&user.id=$USER_ID should return # one role assignment url = '/role_assignments?scope.system=all&user.id=%s' % user['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( user_system_role_id, response.json_body['role_assignments'][0]['role']['id'], ) # /v3/role_assignments?scope_system=all&group.id=$GROUP_ID should # return one role assignment url = '/role_assignments?scope.system=all&group.id=%s' % group['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( group_system_role_id, response.json_body['role_assignments'][0]['role']['id'], ) # /v3/role_assignments?user.id=$USER_ID should return 3 assignments # and system should be in that list of assignments url = '/role_assignments?user.id=%s' % user['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=3) for assignment in response.json_body['role_assignments']: if 'system' in assignment['scope']: self.assertEqual(user_system_role_id, assignment['role']['id']) if 'domain' in assignment['scope']: self.assertEqual(user_domain_role_id, assignment['role']['id']) if 'project' in assignment['scope']: self.assertEqual( user_project_role_id, assignment['role']['id'] ) # /v3/role_assignments?group.id=$GROUP_ID should return 3 assignments # and system should be in that list of assignments url = '/role_assignments?group.id=%s' % group['id'] response = self.get(url) self.assertValidRoleAssignmentListResponse(response, expected_length=3) for assignment in response.json_body['role_assignments']: if 'system' in assignment['scope']: self.assertEqual( group_system_role_id, assignment['role']['id'] ) if 'domain' in assignment['scope']: self.assertEqual( group_domain_role_id, assignment['role']['id'] ) if 'project' in assignment['scope']: self.assertEqual( group_project_role_id, assignment['role']['id'] ) class RoleAssignmentBaseTestCase( test_v3.RestfulTestCase, test_v3.AssignmentTestMixin ): """Base class for testing /v3/role_assignments API behavior.""" MAX_HIERARCHY_BREADTH = 3 MAX_HIERARCHY_DEPTH = CONF.max_project_tree_depth - 1 def load_sample_data(self): """Create sample data to be used on tests. Created data are i) a role and ii) a domain containing: a project hierarchy and 3 users within 3 groups. """ def create_project_hierarchy(parent_id, depth): """Create a random project hierarchy.""" if depth == 0: return breadth = random.randint(1, self.MAX_HIERARCHY_BREADTH) subprojects = [] for i in range(breadth): subprojects.append( unit.new_project_ref( domain_id=self.domain_id, parent_id=parent_id ) ) PROVIDERS.resource_api.create_project( subprojects[-1]['id'], subprojects[-1] ) new_parent = subprojects[random.randint(0, breadth - 1)] create_project_hierarchy(new_parent['id'], depth - 1) super().load_sample_data() # Create a domain self.domain = unit.new_domain_ref() self.domain_id = self.domain['id'] PROVIDERS.resource_api.create_domain(self.domain_id, self.domain) # Create a project hierarchy self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] PROVIDERS.resource_api.create_project(self.project_id, self.project) # Create a random project hierarchy create_project_hierarchy( self.project_id, random.randint(1, self.MAX_HIERARCHY_DEPTH) ) # Create 3 users self.user_ids = [] for i in range(3): user = unit.new_user_ref(domain_id=self.domain_id) user = PROVIDERS.identity_api.create_user(user) self.user_ids.append(user['id']) # Create 3 groups self.group_ids = [] for i in range(3): group = unit.new_group_ref(domain_id=self.domain_id) group = PROVIDERS.identity_api.create_group(group) self.group_ids.append(group['id']) # Put 2 members on each group PROVIDERS.identity_api.add_user_to_group( user_id=self.user_ids[i], group_id=group['id'] ) PROVIDERS.identity_api.add_user_to_group( user_id=self.user_ids[i % 2], group_id=group['id'] ) PROVIDERS.assignment_api.create_grant( user_id=self.user_id, project_id=self.project_id, role_id=self.role_id, ) # Create a role self.role = unit.new_role_ref() self.role_id = self.role['id'] PROVIDERS.role_api.create_role(self.role_id, self.role) # Set default user and group to be used on tests self.default_user_id = self.user_ids[0] self.default_group_id = self.group_ids[0] def get_role_assignments(self, expected_status=http.client.OK, **filters): """Return the result from querying role assignment API + queried URL. Calls GET /v3/role_assignments? and returns its result, where is the HTTP query parameters form of effective option plus filters, if provided. Queried URL is returned as well. :returns: a tuple containing the list role assignments API response and queried URL. """ query_url = self._get_role_assignments_query_url(**filters) response = self.get(query_url, expected_status=expected_status) return (response, query_url) def _get_role_assignments_query_url(self, **filters): """Return non-effective role assignments query URL from given filters. :param filters: query parameters are created with the provided filters on role assignments attributes. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: role assignments query URL. """ return self.build_role_assignment_query_url(**filters) class RoleAssignmentFailureTestCase(RoleAssignmentBaseTestCase): """Class for testing invalid query params on /v3/role_assignments API. Querying domain and project, or user and group results in a HTTP 400 Bad Request, since a role assignment must contain only a single pair of (actor, target). In addition, since filtering on role assignments applies only to the final result, effective mode cannot be combined with i) group or ii) domain and inherited, because it would always result in an empty list. """ def test_get_role_assignments_by_domain_and_project(self): self.get_role_assignments( domain_id=self.domain_id, project_id=self.project_id, expected_status=http.client.BAD_REQUEST, ) def test_get_role_assignments_by_user_and_group(self): self.get_role_assignments( user_id=self.default_user_id, group_id=self.default_group_id, expected_status=http.client.BAD_REQUEST, ) def test_get_role_assignments_by_effective_and_inherited(self): self.get_role_assignments( domain_id=self.domain_id, effective=True, inherited_to_projects=True, expected_status=http.client.BAD_REQUEST, ) def test_get_role_assignments_by_effective_and_group(self): self.get_role_assignments( effective=True, group_id=self.default_group_id, expected_status=http.client.BAD_REQUEST, ) class RoleAssignmentDirectTestCase(RoleAssignmentBaseTestCase): """Class for testing direct assignments on /v3/role_assignments API. Direct assignments on a domain or project have effect on them directly, instead of on their project hierarchy, i.e they are non-inherited. In addition, group direct assignments are not expanded to group's users. Tests on this class make assertions on the representation and API filtering of direct assignments. """ def _test_get_role_assignments(self, **filters): """Generic filtering test method. According to the provided filters, this method: - creates a new role assignment; - asserts that list role assignments API reponds correctly; - deletes the created role assignment. :param filters: filters to be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. """ # Fills default assignment with provided filters test_assignment = self._set_default_assignment_attributes(**filters) # Create new role assignment for this test PROVIDERS.assignment_api.create_grant(**test_assignment) # Get expected role assignments expected_assignments = self._list_expected_role_assignments( **test_assignment ) # Get role assignments from API response, query_url = self.get_role_assignments(**test_assignment) self.assertValidRoleAssignmentListResponse( response, resource_url=query_url ) self.assertEqual( len(expected_assignments), len(response.result.get('role_assignments')), ) # Assert that expected role assignments were returned by the API call for assignment in expected_assignments: self.assertRoleAssignmentInListResponse(response, assignment) # Delete created role assignment PROVIDERS.assignment_api.delete_grant(**test_assignment) def _set_default_assignment_attributes(self, **attribs): """Insert default values for missing attributes of role assignment. If no actor, target or role are provided, they will default to values from sample data. :param attribs: info from a role assignment entity. Valid attributes are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. """ if not any( target in attribs for target in ('domain_id', 'projects_id') ): attribs['project_id'] = self.project_id if not any(actor in attribs for actor in ('user_id', 'group_id')): attribs['user_id'] = self.default_user_id if 'role_id' not in attribs: attribs['role_id'] = self.role_id return attribs def _list_expected_role_assignments(self, **filters): """Given the filters, it returns expected direct role assignments. :param filters: filters that will be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: the list of the expected role assignments. """ return [self.build_role_assignment_entity(**filters)] # Test cases below call the generic test method, providing different filter # combinations. Filters are provided as specified in the method name, after # 'by'. For example, test_get_role_assignments_by_project_user_and_role # calls the generic test method with project_id, user_id and role_id. def test_get_role_assignments_by_domain(self, **filters): self._test_get_role_assignments(domain_id=self.domain_id, **filters) def test_get_role_assignments_by_project(self, **filters): self._test_get_role_assignments(project_id=self.project_id, **filters) def test_get_role_assignments_by_user(self, **filters): self._test_get_role_assignments( user_id=self.default_user_id, **filters ) def test_get_role_assignments_by_group(self, **filters): self._test_get_role_assignments( group_id=self.default_group_id, **filters ) def test_get_role_assignments_by_role(self, **filters): self._test_get_role_assignments(role_id=self.role_id, **filters) def test_get_role_assignments_by_domain_and_user(self, **filters): self.test_get_role_assignments_by_domain( user_id=self.default_user_id, **filters ) def test_get_role_assignments_by_domain_and_group(self, **filters): self.test_get_role_assignments_by_domain( group_id=self.default_group_id, **filters ) def test_get_role_assignments_by_project_and_user(self, **filters): self.test_get_role_assignments_by_project( user_id=self.default_user_id, **filters ) def test_get_role_assignments_by_project_and_group(self, **filters): self.test_get_role_assignments_by_project( group_id=self.default_group_id, **filters ) def test_get_role_assignments_by_domain_user_and_role(self, **filters): self.test_get_role_assignments_by_domain_and_user( role_id=self.role_id, **filters ) def test_get_role_assignments_by_domain_group_and_role(self, **filters): self.test_get_role_assignments_by_domain_and_group( role_id=self.role_id, **filters ) def test_get_role_assignments_by_project_user_and_role(self, **filters): self.test_get_role_assignments_by_project_and_user( role_id=self.role_id, **filters ) def test_get_role_assignments_by_project_group_and_role(self, **filters): self.test_get_role_assignments_by_project_and_group( role_id=self.role_id, **filters ) class RoleAssignmentInheritedTestCase(RoleAssignmentDirectTestCase): """Class for testing inherited assignments on /v3/role_assignments API. Inherited assignments on a domain or project have no effect on them directly, but on the projects under them instead. Tests on this class do not make assertions on the effect of inherited assignments, but in their representation and API filtering. """ def _test_get_role_assignments(self, **filters): """Add inherited_to_project filter to expected entity in tests.""" super()._test_get_role_assignments( inherited_to_projects=True, **filters ) class RoleAssignmentEffectiveTestCase(RoleAssignmentInheritedTestCase): """Class for testing inheritance effects on /v3/role_assignments API. Inherited assignments on a domain or project have no effect on them directly, but on the projects under them instead. Tests on this class make assertions on the effect of inherited assignments and API filtering. """ def _get_role_assignments_query_url(self, **filters): """Return effective role assignments query URL from given filters. For test methods in this class, effetive will always be true. As in effective mode, inherited_to_projects, group_id, domain_id and project_id will always be desconsidered from provided filters. :param filters: query parameters are created with the provided filters. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: role assignments query URL. """ query_filters = filters.copy() query_filters.pop('inherited_to_projects') query_filters.pop('group_id', None) query_filters.pop('domain_id', None) query_filters.pop('project_id', None) return self.build_role_assignment_query_url( effective=True, **query_filters ) def _list_expected_role_assignments(self, **filters): """Given the filters, it returns expected direct role assignments. :param filters: filters that will be considered when listing role assignments. Valid filters are: role_id, domain_id, project_id, group_id, user_id and inherited_to_projects. :returns: the list of the expected role assignments. """ # Get assignment link, to be put on 'links': {'assignment': link} assignment_link = self.build_role_assignment_link(**filters) # Expand group membership user_ids = [None] if filters.get('group_id'): user_ids = [ user['id'] for user in PROVIDERS.identity_api.list_users_in_group( filters['group_id'] ) ] else: user_ids = [self.default_user_id] # Expand role inheritance project_ids = [None] if filters.get('domain_id'): project_ids = [ project['id'] for project in PROVIDERS.resource_api.list_projects_in_domain( filters.pop('domain_id') ) ] else: project_ids = [ project['id'] for project in PROVIDERS.resource_api.list_projects_in_subtree( self.project_id ) ] # Compute expected role assignments assignments = [] for project_id in project_ids: filters['project_id'] = project_id for user_id in user_ids: filters['user_id'] = user_id assignments.append( self.build_role_assignment_entity( link=assignment_link, **filters ) ) return assignments class AssignmentInheritanceTestCase( test_v3.RestfulTestCase, test_v3.AssignmentTestMixin ): """Test inheritance crud and its effects.""" def test_get_token_from_inherited_user_domain_role_grants(self): # Create a new user to ensure that no grant is loaded from sample data user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) # Define domain and project authentication data domain_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=self.domain_id, ) project_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=self.project_id, ) # Check the user cannot get a domain nor a project token self.v3_create_token( domain_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token( project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Grant non-inherited role for user on domain non_inher_ud_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id ) self.put(non_inher_ud_link) # Check the user can get only a domain token self.v3_create_token(domain_auth_data) self.v3_create_token( project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Create inherited role inherited_role = unit.new_role_ref(name='inherited') PROVIDERS.role_api.create_role(inherited_role['id'], inherited_role) # Grant inherited role for user on domain inher_ud_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=inherited_role['id'], inherited_to_projects=True, ) self.put(inher_ud_link) # Check the user can get both a domain and a project token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data) # Delete inherited grant self.delete(inher_ud_link) # Check the user can only get a domain token self.v3_create_token(domain_auth_data) self.v3_create_token( project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Delete non-inherited grant self.delete(non_inher_ud_link) # Check the user cannot get a domain token anymore self.v3_create_token( domain_auth_data, expected_status=http.client.UNAUTHORIZED ) def test_get_token_from_inherited_group_domain_role_grants(self): # Create a new group and put a new user in it to # ensure that no grant is loaded from sample data user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) group = unit.new_group_ref(domain_id=self.domain['id']) group = PROVIDERS.identity_api.create_group(group) PROVIDERS.identity_api.add_user_to_group(user['id'], group['id']) # Define domain and project authentication data domain_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=self.domain_id, ) project_auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=self.project_id, ) # Check the user cannot get a domain nor a project token self.v3_create_token( domain_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token( project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Grant non-inherited role for user on domain non_inher_gd_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=self.role_id ) self.put(non_inher_gd_link) # Check the user can get only a domain token self.v3_create_token(domain_auth_data) self.v3_create_token( project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Create inherited role inherited_role = unit.new_role_ref(name='inherited') PROVIDERS.role_api.create_role(inherited_role['id'], inherited_role) # Grant inherited role for user on domain inher_gd_link = self.build_role_assignment_link( domain_id=self.domain_id, user_id=user['id'], role_id=inherited_role['id'], inherited_to_projects=True, ) self.put(inher_gd_link) # Check the user can get both a domain and a project token self.v3_create_token(domain_auth_data) self.v3_create_token(project_auth_data) # Delete inherited grant self.delete(inher_gd_link) # Check the user can only get a domain token self.v3_create_token(domain_auth_data) self.v3_create_token( project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Delete non-inherited grant self.delete(non_inher_gd_link) # Check the user cannot get a domain token anymore self.v3_create_token( domain_auth_data, expected_status=http.client.UNAUTHORIZED ) def _test_crud_inherited_and_direct_assignment_on_target(self, target_url): time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # Create a new role to avoid assignments loaded from sample data role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # Define URLs direct_url = '{}/users/{}/roles/{}'.format( target_url, self.user_id, role['id'], ) inherited_url = ( '/OS-INHERIT/%s/inherited_to_projects' % direct_url.lstrip('/') ) # Create the direct assignment self.put(direct_url) # Check the direct assignment exists, but the inherited one does # not self.head(direct_url) self.head(inherited_url, expected_status=http.client.NOT_FOUND) # Now add the inherited assignment self.put(inherited_url) # Check both the direct and inherited assignment exist self.head(direct_url) self.head(inherited_url) # Delete indirect assignment self.delete(inherited_url) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) # Check the direct assignment exists, but the inherited one does # not self.head(direct_url) self.head(inherited_url, expected_status=http.client.NOT_FOUND) # Now delete the inherited assignment self.delete(direct_url) # Check that none of them exist self.head(direct_url, expected_status=http.client.NOT_FOUND) self.head(inherited_url, expected_status=http.client.NOT_FOUND) def test_crud_inherited_and_direct_assignment_on_domains(self): self._test_crud_inherited_and_direct_assignment_on_target( '/domains/%s' % self.domain_id ) def test_crud_inherited_and_direct_assignment_on_projects(self): self._test_crud_inherited_and_direct_assignment_on_target( '/projects/%s' % self.project_id ) def test_crud_user_inherited_domain_role_grants(self): role_list = [] for _ in range(2): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) # Create a non-inherited role as a spoiler PROVIDERS.assignment_api.create_grant( role_list[1]['id'], user_id=self.user['id'], domain_id=self.domain_id, ) base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {'domain_id': self.domain_id, 'user_id': self.user['id']} ) member_url = '{collection_url}/{role_id}/inherited_to_projects'.format( collection_url=base_collection_url, role_id=role_list[0]['id'], ) collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) # Check we can read it back self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=role_list[0], resource_url=collection_url ) # Now delete and check its gone self.delete(member_url) r = self.get(collection_url) self.assertValidRoleListResponse( r, expected_length=0, resource_url=collection_url ) def test_list_role_assignments_for_inherited_domain_grants(self): """Call ``GET /role_assignments with inherited domain grants``. Test Plan: - Create 4 roles - Create a domain with a user and two projects - Assign two direct roles to project1 - Assign a spoiler role to project2 - Issue the URL to add inherited role to the domain - Issue the URL to check it is indeed on the domain - Issue the URL to check effective roles on project1 - this should return 3 roles. """ role_list = [] for _ in range(4): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some roles to the project PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id'] ) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id'] ) # ..and one on a different project as a spoiler PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id'] ) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {'domain_id': domain['id'], 'user_id': user1['id']} ) member_url = '{collection_url}/{role_id}/inherited_to_projects'.format( collection_url=base_collection_url, role_id=role_list[3]['id'], ) collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=role_list[3], resource_url=collection_url ) # Now use the list domain role assignments api to check if this # is included collection_url = ( '/role_assignments?user.id=%(user_id)s' '&scope.domain.id=%(domain_id)s' % {'user_id': user1['id'], 'domain_id': domain['id']} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=1, resource_url=collection_url ) ud_entity = self.build_role_assignment_entity( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) self.assertRoleAssignmentInListResponse(r, ud_entity) # Now ask for effective list role assignments - the role should # turn into a project role, along with the two direct roles that are # on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % {'user_id': user1['id'], 'project_id': project1['id']} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=3, resource_url=collection_url ) # An effective role for an inherited role will be a project # entity, with a domain link to the inherited assignment ud_url = self.build_role_assignment_link( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) up_entity = self.build_role_assignment_entity( link=ud_url, project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) self.assertRoleAssignmentInListResponse(r, up_entity) def _test_list_role_assignments_include_names(self, role1): """Call ``GET /role_assignments with include names``. Test Plan: - Create a domain with a group and a user - Create a project with a group and a user """ role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) group = unit.new_group_ref(domain_id=self.domain_id) group = PROVIDERS.identity_api.create_group(group) project1 = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project1['id'], project1) expected_entity1 = self.build_role_assignment_entity_include_names( role_ref=role1, project_ref=project1, user_ref=user1 ) self.put(expected_entity1['links']['assignment']) expected_entity2 = self.build_role_assignment_entity_include_names( role_ref=role1, domain_ref=self.domain, group_ref=group ) self.put(expected_entity2['links']['assignment']) expected_entity3 = self.build_role_assignment_entity_include_names( role_ref=role1, domain_ref=self.domain, user_ref=user1 ) self.put(expected_entity3['links']['assignment']) expected_entity4 = self.build_role_assignment_entity_include_names( role_ref=role1, project_ref=project1, group_ref=group ) self.put(expected_entity4['links']['assignment']) collection_url_domain = ( '/role_assignments?include_names&scope.domain.id=%(domain_id)s' % {'domain_id': self.domain_id} ) rs_domain = self.get(collection_url_domain) collection_url_project = ( '/role_assignments?include_names&' 'scope.project.id=%(project_id)s' % {'project_id': project1['id']} ) rs_project = self.get(collection_url_project) collection_url_group = ( '/role_assignments?include_names&group.id=%(group_id)s' % {'group_id': group['id']} ) rs_group = self.get(collection_url_group) collection_url_user = ( '/role_assignments?include_names&user.id=%(user_id)s' % {'user_id': user1['id']} ) rs_user = self.get(collection_url_user) collection_url_role = ( '/role_assignments?include_names&role.id=%(role_id)s' % {'role_id': role1['id']} ) rs_role = self.get(collection_url_role) # Make sure all entities were created successfully self.assertEqual(http.client.OK, rs_domain.status_int) self.assertEqual(http.client.OK, rs_project.status_int) self.assertEqual(http.client.OK, rs_group.status_int) self.assertEqual(http.client.OK, rs_user.status_int) # Make sure we can get back the correct number of entities self.assertValidRoleAssignmentListResponse( rs_domain, expected_length=2, resource_url=collection_url_domain ) self.assertValidRoleAssignmentListResponse( rs_project, expected_length=2, resource_url=collection_url_project ) self.assertValidRoleAssignmentListResponse( rs_group, expected_length=2, resource_url=collection_url_group ) self.assertValidRoleAssignmentListResponse( rs_user, expected_length=2, resource_url=collection_url_user ) self.assertValidRoleAssignmentListResponse( rs_role, expected_length=4, resource_url=collection_url_role ) # Verify all types of entities have the correct format self.assertRoleAssignmentInListResponse(rs_domain, expected_entity2) self.assertRoleAssignmentInListResponse(rs_project, expected_entity1) self.assertRoleAssignmentInListResponse(rs_group, expected_entity4) self.assertRoleAssignmentInListResponse(rs_user, expected_entity3) self.assertRoleAssignmentInListResponse(rs_role, expected_entity1) def test_list_role_assignments_include_names_global_role(self): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) self._test_list_role_assignments_include_names(role) def test_list_role_assignments_include_names_domain_role(self): role = unit.new_role_ref(domain_id=self.domain['id']) PROVIDERS.role_api.create_role(role['id'], role) self._test_list_role_assignments_include_names(role) def test_remove_assignment_for_project_acting_as_domain(self): """Test goal: remove assignment for project acting as domain. Ensure when we have two role assignments for the project acting as domain, one dealing with it as a domain and other as a project, we still able to remove those assignments later. Test plan: - Create a role and a domain with a user; - Grant a role for this user in this domain; - Grant a role for this user in the same entity as a project; - Ensure that both assignments were created and it was valid; - Remove the domain assignment for the user and show that the project assignment for him still valid """ role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) assignment_domain = self.build_role_assignment_entity( role_id=role['id'], domain_id=domain['id'], user_id=user['id'], inherited_to_projects=False, ) assignment_project = self.build_role_assignment_entity( role_id=role['id'], project_id=domain['id'], user_id=user['id'], inherited_to_projects=False, ) self.put(assignment_domain['links']['assignment']) self.put(assignment_project['links']['assignment']) collection_url = '/role_assignments?user.id=%(user_id)s' % ( {'user_id': user['id']} ) result = self.get(collection_url) # We have two role assignments based in both roles for the domain and # project scope self.assertValidRoleAssignmentListResponse( result, expected_length=2, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(result, assignment_domain) domain_url = '/domains/{}/users/{}/roles/{}'.format( domain['id'], user['id'], role['id'], ) self.delete(domain_url) collection_url = '/role_assignments?user.id=%(user_id)s' % ( {'user_id': user['id']} ) result = self.get(collection_url) # Now we only have one assignment for the project scope since the # domain scope was removed. self.assertValidRoleAssignmentListResponse( result, expected_length=1, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(result, assignment_project) def test_list_inherited_role_assignments_include_names(self): """Call ``GET /role_assignments?include_names``. Test goal: ensure calling list role assignments including names honors the inherited role assignments flag. Test plan: - Create a role and a domain with a user; - Create a inherited role assignment; - List role assignments for that user; - List role assignments for that user including names. """ role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) # Create and store expected assignment refs assignment = self.build_role_assignment_entity( role_id=role['id'], domain_id=domain['id'], user_id=user['id'], inherited_to_projects=True, ) assignment_names = self.build_role_assignment_entity_include_names( role_ref=role, domain_ref=domain, user_ref=user, inherited_assignment=True, ) # Ensure expected assignment refs are inherited and have the same URL self.assertEqual( 'projects', assignment['scope']['OS-INHERIT:inherited_to'] ) self.assertEqual( 'projects', assignment_names['scope']['OS-INHERIT:inherited_to'] ) self.assertEqual( assignment['links']['assignment'], assignment_names['links']['assignment'], ) self.put(assignment['links']['assignment']) collection_url = '/role_assignments?user.id=%(user_id)s' % ( {'user_id': user['id']} ) result = self.get(collection_url) self.assertValidRoleAssignmentListResponse( result, expected_length=1, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(result, assignment) collection_url = ( '/role_assignments?include_names&' 'user.id=%(user_id)s' % {'user_id': user['id']} ) result = self.get(collection_url) self.assertValidRoleAssignmentListResponse( result, expected_length=1, resource_url=collection_url ) self.assertRoleAssignmentInListResponse(result, assignment_names) def test_list_role_assignments_for_disabled_inheritance_extension(self): """Call ``GET /role_assignments with inherited domain grants``. Test Plan: - Issue the URL to add inherited role to the domain - Issue the URL to check effective roles on project include the inherited role - Disable the extension - Re-check the effective roles, proving the inherited role no longer shows up. """ role_list = [] for _ in range(4): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some roles to the project PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id'] ) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id'] ) # ..and one on a different project as a spoiler PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id'] ) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {'domain_id': domain['id'], 'user_id': user1['id']} ) member_url = '{collection_url}/{role_id}/inherited_to_projects'.format( collection_url=base_collection_url, role_id=role_list[3]['id'], ) collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=role_list[3], resource_url=collection_url ) # Get effective list role assignments - the role should # turn into a project role, along with the two direct roles that are # on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % {'user_id': user1['id'], 'project_id': project1['id']} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=3, resource_url=collection_url ) ud_url = self.build_role_assignment_link( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) up_entity = self.build_role_assignment_entity( link=ud_url, project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) self.assertRoleAssignmentInListResponse(r, up_entity) def test_list_role_assignments_for_inherited_group_domain_grants(self): """Call ``GET /role_assignments with inherited group domain grants``. Test Plan: - Create 4 roles - Create a domain with a user and two projects - Assign two direct roles to project1 - Assign a spoiler role to project2 - Issue the URL to add inherited role to the domain - Issue the URL to check it is indeed on the domain - Issue the URL to check effective roles on project1 - this should return 3 roles. """ role_list = [] for _ in range(4): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], group1['id']) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some roles to the project PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id'] ) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[1]['id'] ) # ..and one on a different project as a spoiler PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[2]['id'] ) # Now create our inherited role on the domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {'domain_id': domain['id'], 'group_id': group1['id']} ) member_url = '{collection_url}/{role_id}/inherited_to_projects'.format( collection_url=base_collection_url, role_id=role_list[3]['id'], ) collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=role_list[3], resource_url=collection_url ) # Now use the list domain role assignments api to check if this # is included collection_url = ( '/role_assignments?group.id=%(group_id)s' '&scope.domain.id=%(domain_id)s' % {'group_id': group1['id'], 'domain_id': domain['id']} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=1, resource_url=collection_url ) gd_entity = self.build_role_assignment_entity( domain_id=domain['id'], group_id=group1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) self.assertRoleAssignmentInListResponse(r, gd_entity) # Now ask for effective list role assignments - the role should # turn into a user project role, along with the two direct roles # that are on the project collection_url = ( '/role_assignments?effective&user.id=%(user_id)s' '&scope.project.id=%(project_id)s' % {'user_id': user1['id'], 'project_id': project1['id']} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=3, resource_url=collection_url ) # An effective role for an inherited role will be a project # entity, with a domain link to the inherited assignment up_entity = self.build_role_assignment_entity( link=gd_entity['links']['assignment'], project_id=project1['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) self.assertRoleAssignmentInListResponse(r, up_entity) def test_filtered_role_assignments_for_inherited_grants(self): """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``. Test Plan: - Create 5 roles - Create a domain with a user, group and two projects - Assign three direct spoiler roles to projects - Issue the URL to add an inherited user role to the domain - Issue the URL to add an inherited group role to the domain - Issue the URL to filter by inherited roles - this should return just the 2 inherited roles. """ role_list = [] for _ in range(5): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) group1 = unit.new_group_ref(domain_id=domain['id']) group1 = PROVIDERS.identity_api.create_group(group1) project1 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) project2 = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) # Add some spoiler roles to the projects PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project1['id'], role_list[0]['id'] ) PROVIDERS.assignment_api.add_role_to_user_and_project( user1['id'], project2['id'], role_list[1]['id'] ) # Create a non-inherited role as a spoiler PROVIDERS.assignment_api.create_grant( role_list[2]['id'], user_id=user1['id'], domain_id=domain['id'] ) # Now create two inherited roles on the domain, one for a user # and one for a domain base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles' % {'domain_id': domain['id'], 'user_id': user1['id']} ) member_url = '{collection_url}/{role_id}/inherited_to_projects'.format( collection_url=base_collection_url, role_id=role_list[3]['id'], ) collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=role_list[3], resource_url=collection_url ) base_collection_url = ( '/OS-INHERIT/domains/%(domain_id)s/groups/%(group_id)s/roles' % {'domain_id': domain['id'], 'group_id': group1['id']} ) member_url = '{collection_url}/{role_id}/inherited_to_projects'.format( collection_url=base_collection_url, role_id=role_list[4]['id'], ) collection_url = base_collection_url + '/inherited_to_projects' self.put(member_url) self.head(member_url) self.get(member_url, expected_status=http.client.NO_CONTENT) r = self.get(collection_url) self.assertValidRoleListResponse( r, ref=role_list[4], resource_url=collection_url ) # Now use the list role assignments api to get a list of inherited # roles on the domain - should get back the two roles collection_url = ( '/role_assignments?scope.OS-INHERIT:inherited_to=projects' ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, expected_length=2, resource_url=collection_url ) ud_entity = self.build_role_assignment_entity( domain_id=domain['id'], user_id=user1['id'], role_id=role_list[3]['id'], inherited_to_projects=True, ) gd_entity = self.build_role_assignment_entity( domain_id=domain['id'], group_id=group1['id'], role_id=role_list[4]['id'], inherited_to_projects=True, ) self.assertRoleAssignmentInListResponse(r, ud_entity) self.assertRoleAssignmentInListResponse(r, gd_entity) def _setup_hierarchical_projects_scenario(self): """Create basic hierarchical projects scenario. This basic scenario contains a root with one leaf project and two roles with the following names: non-inherited and inherited. """ # Create project hierarchy root = unit.new_project_ref(domain_id=self.domain['id']) leaf = unit.new_project_ref( domain_id=self.domain['id'], parent_id=root['id'] ) PROVIDERS.resource_api.create_project(root['id'], root) PROVIDERS.resource_api.create_project(leaf['id'], leaf) # Create 'non-inherited' and 'inherited' roles non_inherited_role = unit.new_role_ref(name='non-inherited') PROVIDERS.role_api.create_role( non_inherited_role['id'], non_inherited_role ) inherited_role = unit.new_role_ref(name='inherited') PROVIDERS.role_api.create_role(inherited_role['id'], inherited_role) return ( root['id'], leaf['id'], non_inherited_role['id'], inherited_role['id'], ) def test_get_token_from_inherited_user_project_role_grants(self): # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario() ) # Define root and leaf projects authentication data root_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=root_id, ) leaf_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=leaf_id, ) # Check the user cannot get a token on root nor leaf project self.v3_create_token( root_project_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token( leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Grant non-inherited role for user on leaf project non_inher_up_link = self.build_role_assignment_link( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_up_link) # Check the user can only get a token on leaf project self.v3_create_token( root_project_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token(leaf_project_auth_data) # Grant inherited role for user on root project inher_up_link = self.build_role_assignment_link( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True, ) self.put(inher_up_link) # Check the user still can get a token only on leaf project self.v3_create_token( root_project_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token(leaf_project_auth_data) # Delete non-inherited grant self.delete(non_inher_up_link) # Check the inherited role still applies for leaf project self.v3_create_token( root_project_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token(leaf_project_auth_data) # Delete inherited grant self.delete(inher_up_link) # Check the user cannot get a token on leaf project anymore self.v3_create_token( leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED ) def test_get_token_from_inherited_group_project_role_grants(self): # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario() ) # Create group and add user to it group = unit.new_group_ref(domain_id=self.domain['id']) group = PROVIDERS.identity_api.create_group(group) PROVIDERS.identity_api.add_user_to_group(self.user['id'], group['id']) # Define root and leaf projects authentication data root_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=root_id, ) leaf_project_auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=leaf_id, ) # Check the user cannot get a token on root nor leaf project self.v3_create_token( root_project_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token( leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED ) # Grant non-inherited role for group on leaf project non_inher_gp_link = self.build_role_assignment_link( project_id=leaf_id, group_id=group['id'], role_id=non_inherited_role_id, ) self.put(non_inher_gp_link) # Check the user can only get a token on leaf project self.v3_create_token( root_project_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token(leaf_project_auth_data) # Grant inherited role for group on root project inher_gp_link = self.build_role_assignment_link( project_id=root_id, group_id=group['id'], role_id=inherited_role_id, inherited_to_projects=True, ) self.put(inher_gp_link) # Check the user still can get a token only on leaf project self.v3_create_token( root_project_auth_data, expected_status=http.client.UNAUTHORIZED ) self.v3_create_token(leaf_project_auth_data) # Delete no-inherited grant self.delete(non_inher_gp_link) # Check the inherited role still applies for leaf project self.v3_create_token(leaf_project_auth_data) # Delete inherited grant self.delete(inher_gp_link) # Check the user cannot get a token on leaf project anymore self.v3_create_token( leaf_project_auth_data, expected_status=http.client.UNAUTHORIZED ) def test_get_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to get all role assignments - this should return just 2 roles (non-inherited and inherited) in the root project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario() ) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True, ) self.put(inher_up_entity['links']['assignment']) # Get role assignments collection_url = '/role_assignments' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) # Assert that the user has non-inherited role on root project self.assertRoleAssignmentInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on root project self.assertRoleAssignmentInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) def test_get_effective_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments?effective``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to get effective role assignments - this should return 1 role (non-inherited) on the root project and 1 role (inherited) on the leaf project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario() ) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True, ) self.put(inher_up_entity['links']['assignment']) # Get effective role assignments collection_url = '/role_assignments?effective' r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) # Assert that the user has non-inherited role on root project self.assertRoleAssignmentInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on root project self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentInListResponse(r, inher_up_entity) def test_project_id_specified_if_include_subtree_specified(self): """When using include_subtree, you must specify a project ID.""" r = self.get( '/role_assignments?include_subtree=True', expected_status=http.client.BAD_REQUEST, ) error_msg = ( "scope.project.id must be specified if include_subtree " "is also specified" ) self.assertEqual(error_msg, r.result['error']['message']) r = self.get( '/role_assignments?scope.project.id&include_subtree=True', expected_status=http.client.BAD_REQUEST, ) self.assertEqual(error_msg, r.result['error']['message']) def test_get_role_assignments_for_project_tree(self): """Get role_assignment?scope.project.id=X&include_subtree``. Test Plan: - Create 2 roles and a hierarchy of projects with one root and one leaf - Issue the URL to add a non-inherited user role to the root project and the leaf project - Issue the URL to get role assignments for the root project but not the subtree - this should return just the root assignment - Issue the URL to get role assignments for the root project and it's subtree - this should return both assignments - Check that explicitly setting include_subtree to False is the equivalent to not including it at all in the query. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, unused_role_id = ( self._setup_hierarchical_projects_scenario() ) # Grant non-inherited role to root and leaf projects non_inher_entity_root = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_entity_root['links']['assignment']) non_inher_entity_leaf = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_entity_leaf['links']['assignment']) # Without the subtree, we should get the one assignment on the # root project collection_url = '/role_assignments?scope.project.id={project}'.format( project=root_id ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) self.assertThat(r.result['role_assignments'], matchers.HasLength(1)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) # With the subtree, we should get both assignments collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=True' % {'project': root_id} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) self.assertThat(r.result['role_assignments'], matchers.HasLength(2)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf) # With subtree=0, we should also only get the one assignment on the # root project collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=0' % {'project': root_id} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) self.assertThat(r.result['role_assignments'], matchers.HasLength(1)) self.assertRoleAssignmentInListResponse(r, non_inher_entity_root) def test_get_effective_role_assignments_for_project_tree(self): """Get role_assignment ?project_id=X&include_subtree=True&effective``. Test Plan: - Create 2 roles and a hierarchy of projects with one root and 4 levels of child project - Issue the URL to add a non-inherited user role to the root project and a level 1 project - Issue the URL to add an inherited user role on the level 2 project - Issue the URL to get effective role assignments for the level 1 project and it's subtree - this should return a role (non-inherited) on the level 1 project and roles (inherited) on each of the level 2, 3 and 4 projects """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario() ) # Add some extra projects to the project hierarchy level2 = unit.new_project_ref( domain_id=self.domain['id'], parent_id=leaf_id ) level3 = unit.new_project_ref( domain_id=self.domain['id'], parent_id=level2['id'] ) level4 = unit.new_project_ref( domain_id=self.domain['id'], parent_id=level3['id'] ) PROVIDERS.resource_api.create_project(level2['id'], level2) PROVIDERS.resource_api.create_project(level3['id'], level3) PROVIDERS.resource_api.create_project(level4['id'], level4) # Grant non-inherited role to root (as a spoiler) and to # the level 1 (leaf) project non_inher_entity_root = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_entity_root['links']['assignment']) non_inher_entity_leaf = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_entity_leaf['links']['assignment']) # Grant inherited role to level 2 inher_entity = self.build_role_assignment_entity( project_id=level2['id'], user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True, ) self.put(inher_entity['links']['assignment']) # Get effective role assignments collection_url = ( '/role_assignments?scope.project.id=%(project)s' '&include_subtree=True&effective' % {'project': leaf_id} ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) # There should be three assignments returned in total self.assertThat(r.result['role_assignments'], matchers.HasLength(3)) # Assert that the user does not non-inherited role on root project self.assertRoleAssignmentNotInListResponse(r, non_inher_entity_root) # Assert that the user does have non-inherited role on leaf project self.assertRoleAssignmentInListResponse(r, non_inher_entity_leaf) # Assert that the user has inherited role on levels 3 and 4 inher_entity['scope']['project']['id'] = level3['id'] self.assertRoleAssignmentInListResponse(r, inher_entity) inher_entity['scope']['project']['id'] = level4['id'] self.assertRoleAssignmentInListResponse(r, inher_entity) def test_get_inherited_role_assignments_for_project_hierarchy(self): """Call ``GET /role_assignments?scope.OS-INHERIT:inherited_to``. Test Plan: - Create 2 roles - Create a hierarchy of projects with one root and one leaf project - Issue the URL to add a non-inherited user role to the root project - Issue the URL to add an inherited user role to the root project - Issue the URL to filter inherited to projects role assignments - this should return 1 role (inherited) on the root project. """ # Create default scenario root_id, leaf_id, non_inherited_role_id, inherited_role_id = ( self._setup_hierarchical_projects_scenario() ) # Grant non-inherited role non_inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.put(non_inher_up_entity['links']['assignment']) # Grant inherited role inher_up_entity = self.build_role_assignment_entity( project_id=root_id, user_id=self.user['id'], role_id=inherited_role_id, inherited_to_projects=True, ) self.put(inher_up_entity['links']['assignment']) # Get inherited role assignments collection_url = ( '/role_assignments?scope.OS-INHERIT:inherited_to=projects' ) r = self.get(collection_url) self.assertValidRoleAssignmentListResponse( r, resource_url=collection_url ) # Assert that the user does not have non-inherited role on root project self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user has inherited role on root project self.assertRoleAssignmentInListResponse(r, inher_up_entity) # Assert that the user does not have non-inherited role on leaf project non_inher_up_entity = self.build_role_assignment_entity( project_id=leaf_id, user_id=self.user['id'], role_id=non_inherited_role_id, ) self.assertRoleAssignmentNotInListResponse(r, non_inher_up_entity) # Assert that the user does not have inherited role on leaf project inher_up_entity['scope']['project']['id'] = leaf_id self.assertRoleAssignmentNotInListResponse(r, inher_up_entity) class ImpliedRolesTests( test_v3.RestfulTestCase, test_v3.AssignmentTestMixin, unit.TestCase ): def _create_role(self): """Call ``POST /roles``.""" ref = unit.new_role_ref() r = self.post('/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def test_list_implied_roles_none(self): self.prior = self._create_role() url = '/roles/%s/implies' % (self.prior['id']) response = self.get(url).json["role_inference"] self.head(url, expected_status=http.client.OK) self.assertEqual(self.prior['id'], response['prior_role']['id']) self.assertEqual(0, len(response['implies'])) def _create_implied_role(self, prior, implied): self.put( '/roles/{}/implies/{}'.format(prior['id'], implied['id']), expected_status=http.client.CREATED, ) def _delete_implied_role(self, prior, implied): self.delete('/roles/{}/implies/{}'.format(prior['id'], implied['id'])) def _setup_prior_two_implied(self): self.prior = self._create_role() self.implied1 = self._create_role() self._create_implied_role(self.prior, self.implied1) self.implied2 = self._create_role() self._create_implied_role(self.prior, self.implied2) def _assert_expected_implied_role_response( self, expected_prior_id, expected_implied_ids ): r = self.get('/roles/%s/implies' % expected_prior_id) response = r.json role_inference = response['role_inference'] self.assertEqual(expected_prior_id, role_inference['prior_role']['id']) prior_link = '/v3/roles/' + expected_prior_id + '/implies' self.assertThat( response['links']['self'], matchers.EndsWith(prior_link) ) actual_implied_ids = [ implied['id'] for implied in role_inference['implies'] ] self.assertCountEqual(expected_implied_ids, actual_implied_ids) self.assertIsNotNone(role_inference['prior_role']['links']['self']) for implied in role_inference['implies']: self.assertIsNotNone(implied['links']['self']) def _assert_expected_role_inference_rule_response( self, expected_prior_id, expected_implied_id ): url = '/roles/{}/implies/{}'.format( expected_prior_id, expected_implied_id ) response = self.get(url).json self.assertThat( response['links']['self'], matchers.EndsWith('/v3%s' % url) ) role_inference = response['role_inference'] prior_role = role_inference['prior_role'] self.assertEqual(expected_prior_id, prior_role['id']) self.assertIsNotNone(prior_role['name']) self.assertThat( prior_role['links']['self'], matchers.EndsWith('/v3/roles/%s' % expected_prior_id), ) implied_role = role_inference['implies'] self.assertEqual(expected_implied_id, implied_role['id']) self.assertIsNotNone(implied_role['name']) self.assertThat( implied_role['links']['self'], matchers.EndsWith('/v3/roles/%s' % expected_implied_id), ) def _assert_two_roles_implied(self): self._assert_expected_implied_role_response( self.prior['id'], [self.implied1['id'], self.implied2['id']] ) self._assert_expected_role_inference_rule_response( self.prior['id'], self.implied1['id'] ) self._assert_expected_role_inference_rule_response( self.prior['id'], self.implied2['id'] ) def _assert_one_role_implied(self): self._assert_expected_implied_role_response( self.prior['id'], [self.implied1['id']] ) self.get( '/roles/{}/implies/{}'.format( self.prior['id'], self.implied2['id'] ), expected_status=http.client.NOT_FOUND, ) def _assert_two_rules_defined(self): r = self.get('/role_inferences/') rules = r.result['role_inferences'] self.assertEqual(self.prior['id'], rules[0]['prior_role']['id']) self.assertEqual(2, len(rules[0]['implies'])) implied_ids = [implied['id'] for implied in rules[0]['implies']] implied_names = [implied['name'] for implied in rules[0]['implies']] self.assertIn(self.implied1['id'], implied_ids) self.assertIn(self.implied2['id'], implied_ids) self.assertIn(self.implied1['name'], implied_names) self.assertIn(self.implied2['name'], implied_names) def _assert_one_rule_defined(self): r = self.get('/role_inferences/') rules = r.result['role_inferences'] self.assertEqual(self.prior['id'], rules[0]['prior_role']['id']) self.assertEqual(self.implied1['id'], rules[0]['implies'][0]['id']) self.assertEqual(self.implied1['name'], rules[0]['implies'][0]['name']) self.assertEqual(1, len(rules[0]['implies'])) def test_list_all_rules(self): self._setup_prior_two_implied() self._assert_two_rules_defined() self._delete_implied_role(self.prior, self.implied2) self._assert_one_rule_defined() def test_CRD_implied_roles(self): self._setup_prior_two_implied() self._assert_two_roles_implied() self._delete_implied_role(self.prior, self.implied2) self._assert_one_role_implied() def _create_three_roles(self): self.role_list = [] for _ in range(3): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) self.role_list.append(role) def _create_test_domain_user_project(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) return domain, user, project def _assign_top_role_to_user_on_project(self, user, project): PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], project['id'], self.role_list[0]['id'] ) def _build_effective_role_assignments_url(self, user): return '/role_assignments?effective&user.id={user_id}'.format( user_id=user['id'] ) def _assert_all_roles_in_assignment(self, response, user): # Now use the list role assignments api to check that all three roles # appear in the collection self.assertValidRoleAssignmentListResponse( response, expected_length=len(self.role_list), resource_url=self._build_effective_role_assignments_url(user), ) def _assert_initial_assignment_in_effective(self, response, user, project): # The initial assignment should be there (the link url will be # generated and checked automatically since it matches the assignment) entity = self.build_role_assignment_entity( project_id=project['id'], user_id=user['id'], role_id=self.role_list[0]['id'], ) self.assertRoleAssignmentInListResponse(response, entity) def _assert_effective_role_for_implied_has_prior_in_links( self, response, user, project, prior_index, implied_index ): # An effective role for an implied role will have the prior role # assignment in the links prior_link = '/prior_roles/{prior}/implies/{implied}'.format( prior=self.role_list[prior_index]['id'], implied=self.role_list[implied_index]['id'], ) link = self.build_role_assignment_link( project_id=project['id'], user_id=user['id'], role_id=self.role_list[prior_index]['id'], ) entity = self.build_role_assignment_entity( link=link, project_id=project['id'], user_id=user['id'], role_id=self.role_list[implied_index]['id'], prior_link=prior_link, ) self.assertRoleAssignmentInListResponse(response, entity) def test_list_role_assignments_with_implied_roles(self): """Call ``GET /role_assignments`` with implied role grant. Test Plan: - Create a domain with a user and a project - Create 3 roles - Role 0 implies role 1 and role 1 implies role 2 - Assign the top role to the project - Issue the URL to check effective roles on project - this should return all 3 roles. - Check the links of the 3 roles indicate the prior role where appropriate """ (domain, user, project) = self._create_test_domain_user_project() self._create_three_roles() self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(user, project) response = self.get(self._build_effective_role_assignments_url(user)) r = response self._assert_all_roles_in_assignment(r, user) self._assert_initial_assignment_in_effective(response, user, project) self._assert_effective_role_for_implied_has_prior_in_links( response, user, project, 0, 1 ) self._assert_effective_role_for_implied_has_prior_in_links( response, user, project, 1, 2 ) def _create_named_role(self, name): role = unit.new_role_ref() role['name'] = name PROVIDERS.role_api.create_role(role['id'], role) return role def test_root_role_as_implied_role_forbidden(self): """Test root role is forbidden to be set as an implied role. Create 2 roles that are prohibited from being an implied role. Create 1 additional role which should be accepted as an implied role. Assure the prohibited role names cannot be set as an implied role. Assure the accepted role name which is not a member of the prohibited implied role list can be successfully set an implied role. """ prohibited_name1 = 'root1' prohibited_name2 = 'root2' accepted_name1 = 'implied1' prohibited_names = [prohibited_name1, prohibited_name2] self.config_fixture.config( group='assignment', prohibited_implied_role=prohibited_names ) prior_role = self._create_role() prohibited_role1 = self._create_named_role(prohibited_name1) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=prohibited_role1['id'], ) self.put(url, expected_status=http.client.FORBIDDEN) prohibited_role2 = self._create_named_role(prohibited_name2) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=prohibited_role2['id'], ) self.put(url, expected_status=http.client.FORBIDDEN) accepted_role1 = self._create_named_role(accepted_name1) url = '/roles/{prior_role_id}/implies/{implied_role_id}'.format( prior_role_id=prior_role['id'], implied_role_id=accepted_role1['id'], ) self.put(url, expected_status=http.client.CREATED) def test_trusts_from_implied_role(self): self._create_three_roles() self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(self.user, self.project) # Create a trustee and assign the prior role to her trustee = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) ref = unit.new_trust_ref( trustor_user_id=self.user['id'], trustee_user_id=trustee['id'], project_id=self.project['id'], role_ids=[self.role_list[0]['id']], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = r.result['trust'] # Only the role that was specified is in the trust, NOT implied roles self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id']) self.assertThat(trust['roles'], matchers.HasLength(1)) # Authenticate as the trustee auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id'], ) r = self.v3_create_token(auth_data) token = r.result['token'] self.assertThat( token['roles'], matchers.HasLength(len(self.role_list)) ) for role in token['roles']: self.assertIn(role, self.role_list) for role in self.role_list: self.assertIn(role, token['roles']) def test_trusts_from_domain_specific_implied_role(self): self._create_three_roles() # Overwrite the first role with a domain specific role role = unit.new_role_ref(domain_id=self.domain_id) self.role_list[0] = PROVIDERS.role_api.create_role(role['id'], role) self._create_implied_role(self.role_list[0], self.role_list[1]) self._create_implied_role(self.role_list[1], self.role_list[2]) self._assign_top_role_to_user_on_project(self.user, self.project) # Create a trustee and assign the prior role to her trustee = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) ref = unit.new_trust_ref( trustor_user_id=self.user['id'], trustee_user_id=trustee['id'], project_id=self.project['id'], role_ids=[self.role_list[0]['id']], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = r.result['trust'] # Only the role that was specified is in the trust, NOT implied roles self.assertEqual(self.role_list[0]['id'], trust['roles'][0]['id']) self.assertThat(trust['roles'], matchers.HasLength(1)) # Authenticate as the trustee auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id'], ) r = self.v3_create_token(auth_data) token = r.result['token'] # The token should have the roles implies by the domain specific role, # but not the domain specific role itself. self.assertThat( token['roles'], matchers.HasLength(len(self.role_list) - 1) ) for role in token['roles']: self.assertIn(role, self.role_list) for role in [self.role_list[1], self.role_list[2]]: self.assertIn(role, token['roles']) self.assertNotIn(self.role_list[0], token['roles']) def test_global_role_cannot_imply_domain_specific_role(self): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) domain_role_ref = unit.new_role_ref(domain_id=domain['id']) domain_role = PROVIDERS.role_api.create_role( domain_role_ref['id'], domain_role_ref ) global_role_ref = unit.new_role_ref() global_role = PROVIDERS.role_api.create_role( global_role_ref['id'], global_role_ref ) self.put( '/roles/{}/implies/{}'.format( global_role['id'], domain_role['id'] ), expected_status=http.client.FORBIDDEN, ) class DomainSpecificRoleTests(test_v3.RestfulTestCase, unit.TestCase): def setUp(self): def create_role(domain_id=None): """Call ``POST /roles``.""" ref = unit.new_role_ref(domain_id=domain_id) r = self.post('/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) super().setUp() self.domainA = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainB['id'], self.domainB) self.global_role1 = create_role() self.global_role2 = create_role() # Since there maybe other global roles already created, let's count # them, so we can ensure we can check subsequent list responses # are correct r = self.get('/roles') self.existing_global_roles = len(r.result['roles']) # And now create some domain specific roles self.domainA_role1 = create_role(domain_id=self.domainA['id']) self.domainA_role2 = create_role(domain_id=self.domainA['id']) self.domainB_role = create_role(domain_id=self.domainB['id']) def test_get_and_list_domain_specific_roles(self): # Check we can get a domain specific role r = self.get('/roles/%s' % self.domainA_role1['id']) self.assertValidRoleResponse(r, self.domainA_role1) # If we list without specifying a domain, we should only get global # roles back. r = self.get('/roles') self.assertValidRoleListResponse( r, expected_length=self.existing_global_roles ) self.assertRoleInListResponse(r, self.global_role1) self.assertRoleInListResponse(r, self.global_role2) self.assertRoleNotInListResponse(r, self.domainA_role1) self.assertRoleNotInListResponse(r, self.domainA_role2) self.assertRoleNotInListResponse(r, self.domainB_role) # Now list those in domainA, making sure that's all we get back r = self.get('/roles?domain_id=%s' % self.domainA['id']) self.assertValidRoleListResponse(r, expected_length=2) self.assertRoleInListResponse(r, self.domainA_role1) self.assertRoleInListResponse(r, self.domainA_role2) def test_update_domain_specific_roles(self): self.domainA_role1['name'] = uuid.uuid4().hex self.patch( '/roles/{role_id}'.format(role_id=self.domainA_role1['id']), body={'role': self.domainA_role1}, ) r = self.get('/roles/%s' % self.domainA_role1['id']) self.assertValidRoleResponse(r, self.domainA_role1) def test_delete_domain_specific_roles(self): # Check delete only removes that one domain role self.delete( '/roles/{role_id}'.format(role_id=self.domainA_role1['id']) ) self.get( '/roles/%s' % self.domainA_role1['id'], expected_status=http.client.NOT_FOUND, ) # Now re-list those in domainA, making sure there's only one left r = self.get('/roles?domain_id=%s' % self.domainA['id']) self.assertValidRoleListResponse(r, expected_length=1) self.assertRoleInListResponse(r, self.domainA_role2) def test_same_domain_assignment(self): user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainA['id'] ) projectA = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project(projectA['id'], projectA) PROVIDERS.assignment_api.create_grant( self.domainA_role1['id'], user_id=user['id'], project_id=projectA['id'], ) def test_cross_domain_assignment_valid(self): user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainB['id'] ) projectA = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project(projectA['id'], projectA) # Positive: a role on domainA can be assigned to a user from domainB # but only for use on a project from domainA PROVIDERS.assignment_api.create_grant( self.domainA_role1['id'], user_id=user['id'], project_id=projectA['id'], ) def test_cross_domain_assignment_invalid(self): user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainB['id'] ) projectB = unit.new_project_ref(domain_id=self.domainB['id']) PROVIDERS.resource_api.create_project(projectB['id'], projectB) # Negative: a role on domainA can be assigned to a user from domainB # only for a project from domainA self.assertRaises( exception.DomainSpecificRoleMismatch, PROVIDERS.assignment_api.create_grant, self.domainA_role1['id'], user_id=user['id'], project_id=projectB['id'], ) def test_cross_domain_implied_roles_authentication(self): # Create a user in domainB user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainB['id'] ) # Create project in domainA projectA = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project(projectA['id'], projectA) # Now we create an implied rule from a role in domainA to a # role in domainB self.put( '/roles/%s/implies/%s' % (self.domainA_role1['id'], self.domainB_role['id']), expected_status=http.client.CREATED, ) # A role in domainA can be assigned to a user from domainB # only for a project from domainA PROVIDERS.assignment_api.create_grant( self.domainA_role1['id'], user_id=user['id'], project_id=projectA['id'], ) # The role assignments should return an empty list since domain roles # can only be used to imply another roles assignments = PROVIDERS.assignment_api.list_role_assignments( user_id=user['id'], effective=True ) self.assertEqual([], assignments) # This also means we can't authenticate using the existing assignment auth_body = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=projectA['id'], ) self.post( '/auth/tokens', body=auth_body, expected_status=http.client.UNAUTHORIZED, ) class ListUserProjectsTestCase(test_v3.RestfulTestCase): """Test for /users//projects.""" def load_sample_data(self): # do not load base class's data, keep it focused on the tests self.auths = [] self.domains = [] self.projects = [] self.roles = [] self.users = [] root_domain = unit.new_domain_ref( id=resource_base.NULL_DOMAIN_ID, name=resource_base.NULL_DOMAIN_ID ) self.resource_api.create_domain( resource_base.NULL_DOMAIN_ID, root_domain ) # Create 3 sets of domain, roles, projects, and users to demonstrate # the right user's data is loaded and only projects they can access # are returned. for _ in range(3): domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) PROVIDERS.assignment_api.create_grant( role['id'], user_id=user['id'], domain_id=domain['id'] ) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) PROVIDERS.assignment_api.create_grant( role['id'], user_id=user['id'], project_id=project['id'] ) auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) self.auths.append(auth) self.domains.append(domain) self.projects.append(project) self.roles.append(role) self.users.append(user) def test_list_head_all(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] url = '/users/%s/projects' % user['id'] result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) self.head(url, auth=auth, expected_status=http.client.OK) def test_list_enabled(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] # There are no disabled projects url = '/users/%s/projects?enabled=True' % user['id'] result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) def test_list_disabled(self): for i in range(len(self.users)): user = self.users[i] auth = self.auths[i] project = self.projects[i] # There are no disabled projects url = '/users/%s/projects?enabled=False' % user['id'] result = self.get(url, auth=auth) self.assertEqual(0, len(result.json['projects'])) # disable this one and check again project['enabled'] = False PROVIDERS.resource_api.update_project(project['id'], project) result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) def test_list_by_domain_id(self): for i in range(len(self.users)): user = self.users[i] domain = self.domains[i] auth = self.auths[i] # Try looking for projects with a non-existent domain_id url = '/users/{}/projects?domain_id={}'.format( user['id'], uuid.uuid4().hex, ) result = self.get(url, auth=auth) self.assertEqual(0, len(result.json['projects'])) # Now try a valid one url = '/users/{}/projects?domain_id={}'.format( user['id'], domain['id'], ) result = self.get(url, auth=auth) projects_result = result.json['projects'] self.assertEqual(1, len(projects_result)) self.assertEqual(self.projects[i]['id'], projects_result[0]['id']) # FIXME(lbragstad): These tests contain system-level API calls, which means # they will log a warning message if they are called with a project-scoped # token, regardless of the role assignment on the project. We need to fix # them by using a proper system-scoped admin token to make the call instead # of a project scoped token. class UserSystemRoleAssignmentTestCase( test_v3.RestfulTestCase, SystemRoleAssignmentMixin ): def test_assign_system_role_to_user(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.put(member_url) # validate the role assignment self.head(member_url) # list system roles collection_url = '/system/users/{user_id}/roles'.format( user_id=self.user['id'] ) roles = self.get(collection_url).json_body['roles'] self.assertEqual(len(roles), 1) self.assertEqual(roles[0]['id'], system_role_id) self.head(collection_url, expected_status=http.client.OK) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertValidRoleAssignmentListResponse(response) def test_list_role_assignments_for_user_returns_all_assignments(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.put(member_url) # the response should contain one role assignment for the system role # and one for a role that was setup during setUp(). response = self.get( '/role_assignments?user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=2) def test_list_system_roles_for_user_returns_none_without_assignment(self): # list system roles for user collection_url = '/system/users/{user_id}/roles'.format( user_id=self.user['id'] ) response = self.get(collection_url) # assert that the user doesn't have any system role assignments, which # is denoted by an empty list self.assertEqual(response.json_body['roles'], []) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertEqual(len(response.json_body['role_assignments']), 0) self.assertValidRoleAssignmentListResponse(response) def test_list_system_roles_for_user_does_not_return_project_roles(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.put(member_url) # list project role assignments and save the role id of that # assignment, this assignment was created during setUp response = self.get( '/projects/%(project_id)s/users/%(user_id)s/roles' % {'project_id': self.project['id'], 'user_id': self.user['id']} ) self.assertEqual(len(response.json_body['roles']), 1) project_role_id = response.json_body['roles'][0]['id'] # list system role assignments collection_url = '/system/users/{user_id}/roles'.format( user_id=self.user['id'] ) response = self.get(collection_url) # assert the project role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], project_role_id) # make sure the role_assignment API filters correctly based on system # scope response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertEqual(len(response.json_body['role_assignments']), 1) system_assignment = response.json_body['role_assignments'][0] self.assertEqual(system_assignment['role']['id'], system_role_id) self.assertTrue(system_assignment['scope']['system']['all']) # make sure the role_assignment API doesn't include the system role # assignment when we filter based on project path = ( '/role_assignments?scope.project.id=%(project_id)s&' 'user.id=%(user_id)s' ) % {'project_id': self.project['id'], 'user_id': self.user['id']} response = self.get(path) self.assertEqual(len(response.json_body['role_assignments']), 1) project_assignment = response.json_body['role_assignments'][0] self.assertEqual(project_assignment['role']['id'], project_role_id) def test_list_system_roles_for_user_does_not_return_domain_roles(self): system_role_id = self._create_new_role() domain_role_id = self._create_new_role() # assign a role to the user on a domain domain_member_url = ( '/domains/%(domain_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'domain_id': self.user['domain_id'], 'user_id': self.user['id'], 'role_id': domain_role_id, } ) self.put(domain_member_url) # assign the user a role on the system member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.put(member_url) # list domain role assignments response = self.get( '/domains/%(domain_id)s/users/%(user_id)s/roles' % {'domain_id': self.user['domain_id'], 'user_id': self.user['id']} ) self.assertEqual(len(response.json_body['roles']), 1) # list system role assignments collection_url = '/system/users/{user_id}/roles'.format( user_id=self.user['id'] ) response = self.get(collection_url) # assert the domain role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], domain_role_id) # make sure the role_assignment API filters correctly based on system # scope response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertEqual(len(response.json_body['role_assignments']), 1) system_assignment = response.json_body['role_assignments'][0] self.assertEqual(system_assignment['role']['id'], system_role_id) self.assertTrue(system_assignment['scope']['system']['all']) # make sure the role_assignment API doesn't include the system role # assignment when we filter based on domain path = ( '/role_assignments?scope.domain.id=%(domain_id)s&' 'user.id=%(user_id)s' ) % {'domain_id': self.user['domain_id'], 'user_id': self.user['id']} response = self.get(path) self.assertEqual(len(response.json_body['role_assignments']), 1) domain_assignment = response.json_body['role_assignments'][0] self.assertEqual(domain_assignment['role']['id'], domain_role_id) def test_check_user_has_system_role_when_assignment_exists(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.put(member_url) # check the user has the system role assignment self.head(member_url) def test_check_user_does_not_have_system_role_without_assignment(self): system_role_id = self._create_new_role() # check the user does't have the system role assignment member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.head(member_url, expected_status=http.client.NOT_FOUND) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertEqual(len(response.json_body['role_assignments']), 0) self.assertValidRoleAssignmentListResponse(response) def test_unassign_system_role_from_user(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.put(member_url) # ensure the user has the role assignment self.head(member_url) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertEqual(len(response.json_body['role_assignments']), 1) self.assertValidRoleAssignmentListResponse(response) # remove the system role assignment from the user self.delete(member_url) # ensure the user doesn't have any system role assignments collection_url = '/system/users/{user_id}/roles'.format( user_id=self.user['id'] ) response = self.get(collection_url) self.assertEqual(len(response.json_body['roles']), 0) response = self.get( '/role_assignments?scope.system=all&user.id=%(user_id)s' % {'user_id': self.user['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_query_for_system_scope_and_domain_scope_fails(self): # When asking for assignments and providing query parameters, we # shouldn't be able to ask for two different types of scope. This is # also true for project + domain scope. path = ( '/role_assignments?scope.system=all' '&scope.domain.id=%(domain_id)s' ) % {'domain_id': self.domain_id} self.get(path, expected_status=http.client.BAD_REQUEST) def test_query_for_system_scope_and_project_scope_fails(self): # When asking for assignments and providing query parameters, we # shouldn't be able to ask for two different types of scope. This is # also true for project + domain scope. path = ( '/role_assignments?scope.system=all' '&scope.project.id=%(project_id)s' ) % {'project_id': self.project_id} self.get(path, expected_status=http.client.BAD_REQUEST) def test_query_for_role_id_does_not_return_system_user_roles(self): system_role_id = self._create_new_role() # assign the user a role on the system member_url = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role_id, ) self.put(member_url) # Make sure we only get one role assignment back since the system role # assignment shouldn't be returned. path = ( '/role_assignments?role.id=%(role_id)s&user.id=%(user_id)s' ) % {'role_id': self.role_id, 'user_id': self.user['id']} response = self.get(path) self.assertValidRoleAssignmentListResponse(response, expected_length=1) # FIXME(lbragstad): These tests contain system-level API calls, which means # they will log a warning message if they are called with a project-scoped # token, regardless of the role assignment on the project. We need to fix # them by using a proper system-scoped admin token to make the call instead # of a project scoped token. class GroupSystemRoleAssignmentTestCase( test_v3.RestfulTestCase, SystemRoleAssignmentMixin ): def test_assign_system_role_to_group(self): system_role_id = self._create_new_role() group = self._create_group() # assign the role to the group globally member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.put(member_url) # validate the role assignment self.head(member_url) # list global roles collection_url = '/system/groups/{group_id}/roles'.format( group_id=group['id'] ) roles = self.get(collection_url).json_body['roles'] self.assertEqual(len(roles), 1) self.assertEqual(roles[0]['id'], system_role_id) self.head(collection_url, expected_status=http.client.OK) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( response.json_body['role_assignments'][0]['role']['id'], system_role_id, ) def test_assign_system_role_to_non_existant_group_fails(self): system_role_id = self._create_new_role() group_id = uuid.uuid4().hex # assign the role to the group globally member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group_id, role_id=system_role_id, ) self.put(member_url, expected_status=http.client.NOT_FOUND) def test_list_role_assignments_for_group_returns_all_assignments(self): system_role_id = self._create_new_role() group = self._create_group() # assign the role to the group globally and on a single project member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.put(member_url) member_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' ) % { 'project_id': self.project_id, 'group_id': group['id'], 'role_id': system_role_id, } self.put(member_url) # make sure both assignments exist in the response, there should be two response = self.get( '/role_assignments?group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=2) def test_list_system_roles_for_group_returns_none_without_assignment(self): group = self._create_group() # list global roles for group collection_url = '/system/groups/{group_id}/roles'.format( group_id=group['id'] ) response = self.get(collection_url) # assert that the group doesn't have any system role assignments, which # is denoted by an empty list self.assertEqual(response.json_body['roles'], []) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_list_system_roles_for_group_does_not_return_project_roles(self): system_role_id = self._create_new_role() project_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system and a role on a project member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.put(member_url) member_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' ) % { 'project_id': self.project_id, 'group_id': group['id'], 'role_id': project_role_id, } self.put(member_url) # list system role assignments collection_url = '/system/groups/{group_id}/roles'.format( group_id=group['id'] ) response = self.get(collection_url) # assert the project role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], project_role_id) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) def test_list_system_roles_for_group_does_not_return_domain_roles(self): system_role_id = self._create_new_role() domain_role_id = self._create_new_role() group = self._create_group() # assign a role to the group on a domain domain_member_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' % { 'domain_id': group['domain_id'], 'group_id': group['id'], 'role_id': domain_role_id, } ) self.put(domain_member_url) # assign the group a role on the system member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.put(member_url) # list domain role assignments response = self.get( '/domains/%(domain_id)s/groups/%(group_id)s/roles' % {'domain_id': group['domain_id'], 'group_id': group['id']} ) self.assertEqual(len(response.json_body['roles']), 1) # list system role assignments collection_url = '/system/groups/{group_id}/roles'.format( group_id=group['id'] ) response = self.get(collection_url) # assert the domain role assignment is not in the system role # assignments for role in response.json_body['roles']: self.assertNotEqual(role['id'], domain_role_id) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) def test_check_group_has_system_role_when_assignment_exists(self): system_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.put(member_url) # check the group has the system role assignment self.head(member_url) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=1) self.assertEqual( response.json_body['role_assignments'][0]['role']['id'], system_role_id, ) def test_check_group_does_not_have_system_role_without_assignment(self): system_role_id = self._create_new_role() group = self._create_group() # check the group does't have the system role assignment member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.head(member_url, expected_status=http.client.NOT_FOUND) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_unassign_system_role_from_group(self): system_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.put(member_url) # ensure the group has the role assignment self.head(member_url) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertEqual(len(response.json_body['role_assignments']), 1) self.assertValidRoleAssignmentListResponse(response) # remove the system role assignment from the group self.delete(member_url) # ensure the group doesn't have any system role assignments collection_url = '/system/groups/{group_id}/roles'.format( group_id=group['id'] ) response = self.get(collection_url) self.assertEqual(len(response.json_body['roles']), 0) response = self.get( '/role_assignments?scope.system=all&group.id=%(group_id)s' % {'group_id': group['id']} ) self.assertValidRoleAssignmentListResponse(response, expected_length=0) def test_query_for_role_id_does_not_return_system_group_roles(self): system_role_id = self._create_new_role() group = self._create_group() # assign the group a role on the system member_url = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=system_role_id, ) self.put(member_url) # assign the group a role on the system member_url = ( '/projects/%(project_id)s/groups/%(group_id)s/roles/%(role_id)s' % { 'project_id': self.project_id, 'group_id': group['id'], 'role_id': self.role_id, } ) self.put(member_url) # Make sure we only get one role assignment back since the system role # assignment shouldn't be returned. path = ( '/role_assignments?role.id=%(role_id)s&group.id=%(group_id)s' ) % {'role_id': self.role_id, 'group_id': group['id']} response = self.get(path) self.assertValidRoleAssignmentListResponse(response, expected_length=1) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_v3_auth.py0000664000175000017500000075616700000000000022455 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import http.client import itertools import operator import re from unittest import mock from urllib import parse import uuid from cryptography.hazmat.primitives.serialization import Encoding import fixtures import freezegun from oslo_serialization import jsonutils as json from oslo_utils import fixture from oslo_utils import timeutils from testtools import matchers from testtools import testcase from keystone import auth from keystone.auth.plugins import totp from keystone.common import authorization from keystone.common import provider_api from keystone.common.rbac_enforcer import policy from keystone.common import utils import keystone.conf from keystone.credential.providers import fernet as credential_fernet from keystone import exception from keystone.identity.backends import resource_options as ro from keystone.tests.common import auth as common_auth from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestMFARules(test_v3.RestfulTestCase): def config_overrides(self): super().config_overrides() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) def assertValidErrorResponse(self, r): resp = r.result if r.headers.get(authorization.AUTH_RECEIPT_HEADER): self.assertIsNotNone(resp.get('receipt')) self.assertIsNotNone(resp.get('receipt').get('methods')) else: self.assertIsNotNone(resp.get('error')) self.assertIsNotNone(resp['error'].get('code')) self.assertIsNotNone(resp['error'].get('title')) self.assertIsNotNone(resp['error'].get('message')) self.assertEqual(int(resp['error']['code']), r.status_code) def _create_totp_cred(self): totp_cred = unit.new_totp_credential(self.user_id, self.project_id) PROVIDERS.credential_api.create_credential(uuid.uuid4().hex, totp_cred) def cleanup(testcase): totp_creds = testcase.credential_api.list_credentials_for_user( testcase.user['id'], type='totp' ) for cred in totp_creds: testcase.credential_api.delete_credential(cred['id']) self.addCleanup(cleanup, testcase=self) return totp_cred def auth_plugin_config_override(self, methods=None, **method_classes): methods = ['totp', 'token', 'password'] super().auth_plugin_config_override(methods) def _update_user_with_MFA_rules(self, rule_list, rules_enabled=True): user = self.user.copy() # Do not update password user.pop('password') user['options'][ro.MFA_RULES_OPT.option_name] = rule_list user['options'][ro.MFA_ENABLED_OPT.option_name] = rules_enabled PROVIDERS.identity_api.update_user(user['id'], user) def test_MFA_single_method_rules_requirements_met_succeeds(self): # ensure that a simple password works if a password-only rules exists rule_list = [['password'], ['password', 'totp']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): self.v3_create_token( self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ) ) def test_MFA_multi_method_rules_requirements_met_succeeds(self): # validate that multiple auth-methods function if all are specified # and the rules requires it rule_list = [['password', 'totp']] totp_cred = self._create_totp_cred() self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): auth_req = self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, passcode=totp._generate_totp_passcodes(totp_cred['blob'])[0], ) self.v3_create_token(auth_req) def test_MFA_single_method_rules_requirements_not_met_fails(self): # if a rule matching a single auth type is specified and is not matched # the result should be unauthorized rule_list = [['totp']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): self.v3_create_token( self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ), expected_status=http.client.UNAUTHORIZED, ) def test_MFA_multi_method_rules_requirements_not_met_fails(self): # if multiple rules are specified and only one is passed, # unauthorized is expected rule_list = [['password', 'totp']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): self.v3_create_token( self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ), expected_status=http.client.UNAUTHORIZED, ) def test_MFA_rules_bogus_non_existing_auth_method_succeeds(self): # Bogus auth methods are thrown out from rules. rule_list = [['password'], ['BoGusAuThMeTh0dHandl3r']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): self.v3_create_token( self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ) ) def test_MFA_rules_disabled_MFA_succeeeds(self): # ensure that if MFA is "disableD" authentication succeeds, even if # not enough auth methods are specified rule_list = [['password', 'totp']] self._update_user_with_MFA_rules( rule_list=rule_list, rules_enabled=False ) time = timeutils.utcnow() + datetime.timedelta(seconds=5) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. with freezegun.freeze_time(time): self.v3_create_token( self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ) ) def test_MFA_rules_all_bogus_rules_results_in_default_behavior(self): # if all the rules are bogus, the result is the same as the default # behavior, any single password method is sufficient rule_list = [ [uuid.uuid4().hex, uuid.uuid4().hex], ['BoGus'], ['NonExistantMethod'], ] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): self.v3_create_token( self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ) ) def test_MFA_rules_rescope_works_without_token_method_in_rules(self): rule_list = [['password', 'totp']] totp_cred = self._create_totp_cred() self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): auth_data = self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, passcode=totp._generate_totp_passcodes(totp_cred['blob'])[0], ) r = self.v3_create_token(auth_data) auth_data = self.build_authentication_request( token=r.headers.get('X-Subject-Token'), project_id=self.project_id, ) self.v3_create_token(auth_data) def test_MFA_requirements_makes_correct_receipt_for_password(self): # if multiple rules are specified and only one is passed, # unauthorized is expected rule_list = [['password', 'totp']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): response = self.admin_request( method='POST', path='/v3/auth/tokens', body=self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ), expected_status=http.client.UNAUTHORIZED, ) self.assertIsNotNone( response.headers.get(authorization.AUTH_RECEIPT_HEADER) ) resp_data = response.result # NOTE(adriant): We convert to sets to avoid any potential sorting # related failures since order isn't important, just content. self.assertEqual( {'password'}, set(resp_data.get('receipt').get('methods')) ) self.assertEqual( {frozenset(r) for r in rule_list}, {frozenset(r) for r in resp_data.get('required_auth_methods')}, ) def test_MFA_requirements_makes_correct_receipt_for_totp(self): # if multiple rules are specified and only one is passed, # unauthorized is expected totp_cred = self._create_totp_cred() rule_list = [['password', 'totp']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): response = self.admin_request( method='POST', path='/v3/auth/tokens', body=self.build_authentication_request( user_id=self.user_id, user_domain_id=self.domain_id, project_id=self.project_id, passcode=totp._generate_totp_passcodes(totp_cred['blob'])[ 0 ], ), expected_status=http.client.UNAUTHORIZED, ) self.assertIsNotNone( response.headers.get(authorization.AUTH_RECEIPT_HEADER) ) resp_data = response.result # NOTE(adriant): We convert to sets to avoid any potential sorting # related failures since order isn't important, just content. self.assertEqual( {'totp'}, set(resp_data.get('receipt').get('methods')) ) self.assertEqual( {frozenset(r) for r in rule_list}, {frozenset(r) for r in resp_data.get('required_auth_methods')}, ) def test_MFA_requirements_makes_correct_receipt_for_pass_and_totp(self): # if multiple rules are specified and only one is passed, # unauthorized is expected totp_cred = self._create_totp_cred() rule_list = [['password', 'totp', 'token']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): response = self.admin_request( method='POST', path='/v3/auth/tokens', body=self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, passcode=totp._generate_totp_passcodes(totp_cred['blob'])[ 0 ], ), expected_status=http.client.UNAUTHORIZED, ) self.assertIsNotNone( response.headers.get(authorization.AUTH_RECEIPT_HEADER) ) resp_data = response.result # NOTE(adriant): We convert to sets to avoid any potential sorting # related failures since order isn't important, just content. self.assertEqual( {'password', 'totp'}, set(resp_data.get('receipt').get('methods')) ) self.assertEqual( {frozenset(r) for r in rule_list}, {frozenset(r) for r in resp_data.get('required_auth_methods')}, ) def test_MFA_requirements_returns_correct_required_auth_methods(self): # if multiple rules are specified and only one is passed, # unauthorized is expected rule_list = [ ['password', 'totp', 'token'], ['password', 'totp'], ['token', 'totp'], ['BoGusAuThMeTh0dHandl3r'], ] expect_rule_list = rule_list = [ ['password', 'totp', 'token'], ['password', 'totp'], ] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): response = self.admin_request( method='POST', path='/v3/auth/tokens', body=self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ), expected_status=http.client.UNAUTHORIZED, ) self.assertIsNotNone( response.headers.get(authorization.AUTH_RECEIPT_HEADER) ) resp_data = response.result # NOTE(adriant): We convert to sets to avoid any potential sorting # related failures since order isn't important, just content. self.assertEqual( {'password'}, set(resp_data.get('receipt').get('methods')) ) self.assertEqual( {frozenset(r) for r in expect_rule_list}, {frozenset(r) for r in resp_data.get('required_auth_methods')}, ) def test_MFA_consuming_receipt_with_totp(self): # if multiple rules are specified and only one is passed, # unauthorized is expected totp_cred = self._create_totp_cred() rule_list = [['password', 'totp']] self._update_user_with_MFA_rules(rule_list=rule_list) # NOTE(notmorgan): Step forward in time to ensure we're not causing # issues with revocation events that occur at the same time as the # token issuance. This is a bug with the limited resolution that # tokens and revocation events have. time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): response = self.admin_request( method='POST', path='/v3/auth/tokens', body=self.build_authentication_request( user_id=self.user_id, password=self.user['password'], user_domain_id=self.domain_id, project_id=self.project_id, ), expected_status=http.client.UNAUTHORIZED, ) self.assertIsNotNone( response.headers.get(authorization.AUTH_RECEIPT_HEADER) ) receipt = response.headers.get(authorization.AUTH_RECEIPT_HEADER) resp_data = response.result # NOTE(adriant): We convert to sets to avoid any potential sorting # related failures since order isn't important, just content. self.assertEqual( {'password'}, set(resp_data.get('receipt').get('methods')) ) self.assertEqual( {frozenset(r) for r in rule_list}, {frozenset(r) for r in resp_data.get('required_auth_methods')}, ) time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): response = self.admin_request( method='POST', path='/v3/auth/tokens', headers={authorization.AUTH_RECEIPT_HEADER: receipt}, body=self.build_authentication_request( user_id=self.user_id, user_domain_id=self.domain_id, project_id=self.project_id, passcode=totp._generate_totp_passcodes(totp_cred['blob'])[ 0 ], ), ) def test_MFA_consuming_receipt_not_found(self): time = timeutils.utcnow() + datetime.timedelta(seconds=5) with freezegun.freeze_time(time): response = self.admin_request( method='POST', path='/v3/auth/tokens', headers={authorization.AUTH_RECEIPT_HEADER: "bogus-receipt"}, body=self.build_authentication_request( user_id=self.user_id, user_domain_id=self.domain_id, project_id=self.project_id, ), expected_status=http.client.UNAUTHORIZED, ) self.assertEqual(401, response.result['error']['code']) class TestAuthInfo(common_auth.AuthTestMixin, testcase.TestCase): def setUp(self): super().setUp() auth.core.load_auth_methods() def test_unsupported_auth_method(self): auth_data = {'methods': ['abc']} auth_data['abc'] = {'test': 'test'} auth_data = {'identity': auth_data} self.assertRaises( exception.AuthMethodNotSupported, auth.core.AuthInfo.create, auth_data, ) def test_missing_auth_method_data(self): auth_data = {'methods': ['password']} auth_data = {'identity': auth_data} self.assertRaises( exception.ValidationError, auth.core.AuthInfo.create, auth_data ) def test_project_name_no_domain(self): auth_data = self.build_authentication_request( username='test', password='test', project_name='abc' )['auth'] self.assertRaises( exception.ValidationError, auth.core.AuthInfo.create, auth_data ) def test_both_project_and_domain_in_scope(self): auth_data = self.build_authentication_request( user_id='test', password='test', project_name='test', domain_name='test', )['auth'] self.assertRaises( exception.ValidationError, auth.core.AuthInfo.create, auth_data ) def test_get_method_names_duplicates(self): auth_data = self.build_authentication_request( token='test', user_id='test', password='test' )['auth'] auth_data['identity']['methods'] = [ 'password', 'token', 'password', 'password', ] auth_info = auth.core.AuthInfo.create(auth_data) self.assertEqual(['password', 'token'], auth_info.get_method_names()) def test_get_method_data_invalid_method(self): auth_data = self.build_authentication_request( user_id='test', password='test' )['auth'] auth_info = auth.core.AuthInfo.create(auth_data) method_name = uuid.uuid4().hex self.assertRaises( exception.ValidationError, auth_info.get_method_data, method_name ) class TokenAPITests: # Why is this not just setUp? Because TokenAPITests is not a test class # itself. If TokenAPITests became a subclass of the testcase, it would get # called by the enumerate-tests-in-file code. The way the functions get # resolved in Python for multiple inheritance means that a setUp in this # would get skipped by the testrunner. def doSetUp(self): r = self.v3_create_token( self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain_id, password=self.user['password'], ) ) self.v3_token_data = r.result self.v3_token = r.headers.get('X-Subject-Token') self.headers = {'X-Subject-Token': r.headers.get('X-Subject-Token')} def _get_unscoped_token(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.post('/auth/tokens', body=auth_data) self.assertValidUnscopedTokenResponse(r) return r.headers.get('X-Subject-Token') def _get_domain_scoped_token(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id, ) r = self.post('/auth/tokens', body=auth_data) self.assertValidDomainScopedTokenResponse(r) return r.headers.get('X-Subject-Token') def _get_project_scoped_token(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project_id, ) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse(r) return r.headers.get('X-Subject-Token') def _get_trust_scoped_token(self, trustee_user, trust): auth_data = self.build_authentication_request( user_id=trustee_user['id'], password=trustee_user['password'], trust_id=trust['id'], ) r = self.post('/auth/tokens', body=auth_data) self.assertValidProjectScopedTokenResponse(r) return r.headers.get('X-Subject-Token') def _create_trust(self, impersonation=False): # Create a trustee user trustee_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=trustee_user['id'], project_id=self.project_id, impersonation=impersonation, role_ids=[self.role_id], ) # Create a trust r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) return (trustee_user, trust) def _validate_token( self, token, expected_status=http.client.OK, allow_expired=False ): path = '/v3/auth/tokens' if allow_expired: path += '?allow_expired=1' return self.admin_request( path=path, headers={ 'X-Auth-Token': self.get_admin_token(), 'X-Subject-Token': token, }, method='GET', expected_status=expected_status, ) def _revoke_token(self, token, expected_status=http.client.NO_CONTENT): return self.delete( '/auth/tokens', headers={'x-subject-token': token}, expected_status=expected_status, ) def _set_user_enabled(self, user, enabled=True): user['enabled'] = enabled PROVIDERS.identity_api.update_user(user['id'], user) def _create_project_and_set_as_default_project(self): # create a new project ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': ref}) project = self.assertValidProjectResponse(r, ref) # grant the user a role on the project self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'user_id': self.user['id'], 'project_id': project['id'], 'role_id': self.role['id'], } ) # make the new project the user's default project body = {'user': {'default_project_id': project['id']}} r = self.patch( '/users/{user_id}'.format(user_id=self.user['id']), body=body ) self.assertValidUserResponse(r) return project def test_auth_with_token_as_different_user_fails(self): # get the token for a user. This is self.user which is different from # self.default_domain_user. token = self.get_scoped_token() # try both password and token methods with different identities and it # should fail auth_data = self.build_authentication_request( token=token, user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_token_for_user_without_password_fails(self): user = unit.new_user_ref(domain_id=self.domain['id']) del user['password'] # can't have a password for this test user = PROVIDERS.identity_api.create_user(user) auth_data = self.build_authentication_request( user_id=user['id'], password='password' ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_unscoped_token_by_authenticating_with_unscoped_token(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) token_id = r.headers.get('X-Subject-Token') auth_data = self.build_authentication_request(token=token_id) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_create_unscoped_token_with_user_id(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_create_unscoped_token_with_user_domain_id(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password'], ) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_create_unscoped_token_with_user_domain_name(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password'], ) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_validate_unscoped_token(self): unscoped_token = self._get_unscoped_token() r = self._validate_token(unscoped_token) self.assertValidUnscopedTokenResponse(r) def test_validate_expired_unscoped_token_returns_not_found(self): # NOTE(lbragstad): We set token expiration to 10 seconds so that we can # use the context manager of freezegun without sqlite issues. self.config_fixture.config(group='token', expiration=10) time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: unscoped_token = self._get_unscoped_token() frozen_datetime.tick(delta=datetime.timedelta(seconds=15)) self._validate_token( unscoped_token, expected_status=http.client.NOT_FOUND ) def test_revoke_unscoped_token(self): unscoped_token = self._get_unscoped_token() r = self._validate_token(unscoped_token) self.assertValidUnscopedTokenResponse(r) self._revoke_token(unscoped_token) self._validate_token( unscoped_token, expected_status=http.client.NOT_FOUND ) def test_create_explicit_unscoped_token(self): self._create_project_and_set_as_default_project() # explicitly ask for an unscoped token auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], unscoped="unscoped", ) r = self.post('/auth/tokens', body=auth_data, noauth=True) self.assertValidUnscopedTokenResponse(r) def test_disabled_users_default_project_result_in_unscoped_token(self): # create a disabled project to work with project = self.create_new_default_project_for_user( self.user['id'], self.domain_id, enable_project=False ) # assign a role to user for the new project PROVIDERS.assignment_api.add_role_to_user_and_project( self.user['id'], project['id'], self.role_id ) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_disabled_default_project_domain_result_in_unscoped_token(self): domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) domain = self.assertValidDomainResponse(r, domain_ref) project = self.create_new_default_project_for_user( self.user['id'], domain['id'] ) # assign a role to user for the new project PROVIDERS.assignment_api.add_role_to_user_and_project( self.user['id'], project['id'], self.role_id ) # now disable the project domain body = {'domain': {'enabled': False}} r = self.patch( '/domains/{domain_id}'.format(domain_id=domain['id']), body=body ) self.assertValidDomainResponse(r) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_unscoped_token_is_invalid_after_disabling_user(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid r = self._validate_token(unscoped_token) self.assertValidUnscopedTokenResponse(r) # Disable the user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self._validate_token( unscoped_token, expected_status=http.client.NOT_FOUND ) def test_unscoped_token_is_invalid_after_enabling_disabled_user(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid r = self._validate_token(unscoped_token) self.assertValidUnscopedTokenResponse(r) # Disable the user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self._validate_token( unscoped_token, expected_status=http.client.NOT_FOUND ) # Enable the user self._set_user_enabled(self.user) # Ensure validating a token for a re-enabled user fails self._validate_token( unscoped_token, expected_status=http.client.NOT_FOUND ) def test_unscoped_token_is_invalid_after_disabling_user_domain(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid r = self._validate_token(unscoped_token) self.assertValidUnscopedTokenResponse(r) # Disable the user's domain self.domain['enabled'] = False PROVIDERS.resource_api.update_domain(self.domain['id'], self.domain) # Ensure validating a token for a disabled user fails self._validate_token( unscoped_token, expected_status=http.client.NOT_FOUND ) def test_unscoped_token_is_invalid_after_changing_user_password(self): unscoped_token = self._get_unscoped_token() # Make sure the token is valid r = self._validate_token(unscoped_token) self.assertValidUnscopedTokenResponse(r) # Change user's password self.user['password'] = 'Password1' PROVIDERS.identity_api.update_user(self.user['id'], self.user) # Ensure updating user's password revokes existing user's tokens self._validate_token( unscoped_token, expected_status=http.client.NOT_FOUND ) def test_create_system_token_with_user_id(self): path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=self.role_id, ) self.put(path=path) auth_request_body = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) def test_create_system_token_with_username(self): path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=self.role_id, ) self.put(path=path) auth_request_body = self.build_authentication_request( username=self.user['name'], password=self.user['password'], user_domain_id=self.domain['id'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) def test_create_system_token_fails_without_system_assignment(self): auth_request_body = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], system=True, ) self.v3_create_token( auth_request_body, expected_status=http.client.UNAUTHORIZED ) def test_system_token_is_invalid_after_disabling_user(self): path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=self.role_id, ) self.put(path=path) auth_request_body = self.build_authentication_request( username=self.user['name'], password=self.user['password'], user_domain_id=self.domain['id'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) token = response.headers.get('X-Subject-Token') self._validate_token(token) # NOTE(lbragstad): This would make a good test for groups, but # apparently it's not possible to disable a group. user_ref = {'user': {'enabled': False}} self.patch( '/users/{user_id}'.format(user_id=self.user['id']), body=user_ref ) self.admin_request( path='/v3/auth/tokens', headers={'X-Auth-Token': token, 'X-Subject-Token': token}, method='GET', expected_status=http.client.UNAUTHORIZED, ) self.admin_request( path='/v3/auth/tokens', headers={'X-Auth-Token': token, 'X-Subject-Token': token}, method='HEAD', expected_status=http.client.UNAUTHORIZED, ) def test_create_system_token_via_system_group_assignment(self): ref = { 'group': unit.new_group_ref( domain_id=CONF.identity.default_domain_id ) } group = self.post('/groups', body=ref).json_body['group'] path = '/system/groups/{group_id}/roles/{role_id}'.format( group_id=group['id'], role_id=self.role_id, ) self.put(path=path) path = '/groups/{group_id}/users/{user_id}'.format( group_id=group['id'], user_id=self.user['id'], ) self.put(path=path) auth_request_body = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) token = response.headers.get('X-Subject-Token') self._validate_token(token) def test_revoke_system_token(self): path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=self.role_id, ) self.put(path=path) auth_request_body = self.build_authentication_request( username=self.user['name'], password=self.user['password'], user_domain_id=self.domain['id'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) token = response.headers.get('X-Subject-Token') self._validate_token(token) self._revoke_token(token) self._validate_token(token, expected_status=http.client.NOT_FOUND) def test_system_token_is_invalid_after_deleting_system_role(self): ref = {'role': unit.new_role_ref()} system_role = self.post('/roles', body=ref).json_body['role'] path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role['id'], ) self.put(path=path) auth_request_body = self.build_authentication_request( username=self.user['name'], password=self.user['password'], user_domain_id=self.domain['id'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) token = response.headers.get('X-Subject-Token') self._validate_token(token) self.delete('/roles/{role_id}'.format(role_id=system_role['id'])) self._validate_token(token, expected_status=http.client.NOT_FOUND) def test_rescoping_a_system_token_for_a_project_token_fails(self): ref = {'role': unit.new_role_ref()} system_role = self.post('/roles', body=ref).json_body['role'] path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role['id'], ) self.put(path=path) auth_request_body = self.build_authentication_request( username=self.user['name'], password=self.user['password'], user_domain_id=self.domain['id'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) system_token = response.headers.get('X-Subject-Token') auth_request_body = self.build_authentication_request( token=system_token, project_id=self.project_id ) self.v3_create_token( auth_request_body, expected_status=http.client.FORBIDDEN ) def test_rescoping_a_system_token_for_a_domain_token_fails(self): ref = {'role': unit.new_role_ref()} system_role = self.post('/roles', body=ref).json_body['role'] path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=system_role['id'], ) self.put(path=path) auth_request_body = self.build_authentication_request( username=self.user['name'], password=self.user['password'], user_domain_id=self.domain['id'], system=True, ) response = self.v3_create_token(auth_request_body) self.assertValidSystemScopedTokenResponse(response) system_token = response.headers.get('X-Subject-Token') auth_request_body = self.build_authentication_request( token=system_token, domain_id=CONF.identity.default_domain_id ) self.v3_create_token( auth_request_body, expected_status=http.client.FORBIDDEN ) def test_create_domain_token_scoped_with_domain_id_and_user_id(self): # grant the user a role on the domain path = '/domains/{}/users/{}/roles/{}'.format( self.domain['id'], self.user['id'], self.role['id'], ) self.put(path=path) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id'], ) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_create_domain_token_scoped_with_domain_id_and_username(self): # grant the user a role on the domain path = '/domains/{}/users/{}/roles/{}'.format( self.domain['id'], self.user['id'], self.role['id'], ) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password'], domain_id=self.domain['id'], ) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_create_domain_token_scoped_with_domain_id(self): # grant the user a role on the domain path = '/domains/{}/users/{}/roles/{}'.format( self.domain['id'], self.user['id'], self.role['id'], ) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password'], domain_id=self.domain['id'], ) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_create_domain_token_scoped_with_domain_name(self): # grant the user a role on the domain path = '/domains/{}/users/{}/roles/{}'.format( self.domain['id'], self.user['id'], self.role['id'], ) self.put(path=path) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_name=self.domain['name'], ) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_create_domain_token_scoped_with_domain_name_and_username(self): # grant the user a role on the domain path = '/domains/{}/users/{}/roles/{}'.format( self.domain['id'], self.user['id'], self.role['id'], ) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password'], domain_name=self.domain['name'], ) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_create_domain_token_with_only_domain_name_and_username(self): # grant the user a role on the domain path = '/domains/{}/users/{}/roles/{}'.format( self.domain['id'], self.user['id'], self.role['id'], ) self.put(path=path) auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password'], domain_name=self.domain['name'], ) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_create_domain_token_with_group_role(self): group = unit.new_group_ref(domain_id=self.domain_id) group = PROVIDERS.identity_api.create_group(group) # add user to group PROVIDERS.identity_api.add_user_to_group(self.user['id'], group['id']) # grant the domain role to group path = '/domains/{}/groups/{}/roles/{}'.format( self.domain['id'], group['id'], self.role['id'], ) self.put(path=path) # now get a domain-scoped token auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id'], ) r = self.v3_create_token(auth_data) self.assertValidDomainScopedTokenResponse(r) def test_create_domain_token_fails_if_domain_name_unsafe(self): """Verify authenticate to a domain with unsafe name fails.""" # Start with url name restrictions off, so we can create the unsafe # named domain self.config_fixture.config( group='resource', domain_name_url_safe='off' ) unsafe_name = 'i am not / safe' domain = unit.new_domain_ref(name=unsafe_name) PROVIDERS.resource_api.create_domain(domain['id'], domain) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.create_grant( role_member['id'], user_id=self.user['id'], domain_id=domain['id'] ) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_name=domain['name'], ) # Since name url restriction is off, we should be able to authenticate self.v3_create_token(auth_data) # Set the name url restriction to new, which should still allow us to # authenticate self.config_fixture.config( group='resource', project_name_url_safe='new' ) self.v3_create_token(auth_data) # Set the name url restriction to strict and we should fail to # authenticate self.config_fixture.config( group='resource', domain_name_url_safe='strict' ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_domain_token_without_grant_returns_unauthorized(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id'], ) # this fails because the user does not have a role on self.domain self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_validate_domain_scoped_token(self): # Grant user access to domain PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domain['id'], ) domain_scoped_token = self._get_domain_scoped_token() r = self._validate_token(domain_scoped_token) self.assertValidDomainScopedTokenResponse(r) resp_json = json.loads(r.body) self.assertIsNotNone(resp_json['token']['catalog']) self.assertIsNotNone(resp_json['token']['roles']) self.assertIsNotNone(resp_json['token']['domain']) def test_validate_expired_domain_scoped_token_returns_not_found(self): # Grant user access to domain PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domain['id'], ) # NOTE(lbragstad): We set token expiration to 10 seconds so that we can # use the context manager of freezegun without sqlite issues. self.config_fixture.config(group='token', expiration=10) time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: domain_scoped_token = self._get_domain_scoped_token() frozen_datetime.tick(delta=datetime.timedelta(seconds=15)) self._validate_token( domain_scoped_token, expected_status=http.client.NOT_FOUND ) def test_domain_scoped_token_is_invalid_after_disabling_user(self): # Grant user access to domain PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domain['id'], ) domain_scoped_token = self._get_domain_scoped_token() # Make sure the token is valid r = self._validate_token(domain_scoped_token) self.assertValidDomainScopedTokenResponse(r) # Disable user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self._validate_token( domain_scoped_token, expected_status=http.client.NOT_FOUND ) def test_domain_scoped_token_is_invalid_after_deleting_grant(self): # Grant user access to domain PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domain['id'], ) domain_scoped_token = self._get_domain_scoped_token() # Make sure the token is valid r = self._validate_token(domain_scoped_token) self.assertValidDomainScopedTokenResponse(r) # Delete access to domain PROVIDERS.assignment_api.delete_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domain['id'], ) # Ensure validating a token for a disabled user fails self._validate_token( domain_scoped_token, expected_status=http.client.NOT_FOUND ) def test_domain_scoped_token_invalid_after_disabling_domain(self): # Grant user access to domain PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domain['id'], ) domain_scoped_token = self._get_domain_scoped_token() # Make sure the token is valid r = self._validate_token(domain_scoped_token) self.assertValidDomainScopedTokenResponse(r) # Disable domain self.domain['enabled'] = False PROVIDERS.resource_api.update_domain(self.domain['id'], self.domain) # Ensure validating a token for a disabled domain fails self._validate_token( domain_scoped_token, expected_status=http.client.NOT_FOUND ) def test_create_project_scoped_token_with_project_id_and_user_id(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r) def test_validate_project_scoped_token(self): project_scoped_token = self._get_project_scoped_token() r = self._validate_token(project_scoped_token) self.assertValidProjectScopedTokenResponse(r) def test_validate_expired_project_scoped_token_returns_not_found(self): # NOTE(lbragstad): We set token expiration to 10 seconds so that we can # use the context manager of freezegun without sqlite issues. self.config_fixture.config(group='token', expiration=10) time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: project_scoped_token = self._get_project_scoped_token() frozen_datetime.tick(delta=datetime.timedelta(seconds=15)) self._validate_token( project_scoped_token, expected_status=http.client.NOT_FOUND ) def test_revoke_project_scoped_token(self): project_scoped_token = self._get_project_scoped_token() r = self._validate_token(project_scoped_token) self.assertValidProjectScopedTokenResponse(r) self._revoke_token(project_scoped_token) self._validate_token( project_scoped_token, expected_status=http.client.NOT_FOUND ) def test_project_scoped_token_is_scoped_to_default_project(self): project = self._create_project_and_set_as_default_project() # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.v3_create_token(auth_data) # ensure the project id in the token matches the default project id self.assertValidProjectScopedTokenResponse(r) self.assertEqual(project['id'], r.result['token']['project']['id']) def test_project_scoped_token_no_catalog_is_scoped_to_default_project( self, ): project = self._create_project_and_set_as_default_project() # attempt to authenticate without requesting a project or catalog auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True) # ensure the project id in the token matches the default project id self.assertValidProjectScopedTokenResponse(r, require_catalog=False) self.assertEqual(project['id'], r.result['token']['project']['id']) def test_implicit_project_id_scoped_token_with_user_id_no_catalog(self): self._create_project_and_set_as_default_project() # create a project scoped token that isn't scoped to the default # project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.post('/auth/tokens?nocatalog', body=auth_data, noauth=True) # ensure the project id in the token matches the one we as for self.assertValidProjectScopedTokenResponse(r, require_catalog=False) self.assertEqual( self.project['id'], r.result['token']['project']['id'] ) def test_project_scoped_token_catalog_attributes(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.v3_create_token(auth_data) catalog = r.result['token']['catalog'] self.assertEqual(1, len(catalog)) catalog = catalog[0] self.assertEqual(self.service['id'], catalog['id']) self.assertEqual(self.service['name'], catalog['name']) self.assertEqual(self.service['type'], catalog['type']) endpoint = catalog['endpoints'] self.assertEqual(1, len(endpoint)) endpoint = endpoint[0] self.assertEqual(self.endpoint['id'], endpoint['id']) self.assertEqual(self.endpoint['interface'], endpoint['interface']) self.assertEqual(self.endpoint['region_id'], endpoint['region_id']) self.assertEqual(self.endpoint['url'], endpoint['url']) def test_project_scoped_token_catalog_excludes_disabled_endpoint(self): # Create a disabled endpoint disabled_endpoint_ref = copy.copy(self.endpoint) disabled_endpoint_id = uuid.uuid4().hex disabled_endpoint_ref.update( { 'id': disabled_endpoint_id, 'enabled': False, 'interface': 'internal', } ) PROVIDERS.catalog_api.create_endpoint( disabled_endpoint_id, disabled_endpoint_ref ) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) resp = self.v3_create_token(auth_data) # make sure the disabled endpoint id isn't in the list of endpoints endpoints = resp.result['token']['catalog'][0]['endpoints'] endpoint_ids = [endpoint['id'] for endpoint in endpoints] self.assertNotIn(disabled_endpoint_id, endpoint_ids) def test_project_scoped_token_catalog_excludes_disabled_service(self): """On authenticate, get a catalog that excludes disabled services.""" # although the endpoint associated with the service is enabled, the # service is disabled self.assertTrue(self.endpoint['enabled']) PROVIDERS.catalog_api.update_service( self.endpoint['service_id'], {'enabled': False} ) service = PROVIDERS.catalog_api.get_service( self.endpoint['service_id'] ) self.assertFalse(service['enabled']) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.v3_create_token(auth_data) self.assertEqual([], r.result['token']['catalog']) def test_scope_to_project_without_grant_returns_unauthorized(self): project = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project(project['id'], project) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=project['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_project_scoped_token_with_username_and_domain_id(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=self.domain['id'], password=self.user['password'], project_id=self.project['id'], ) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r) def test_create_project_scoped_token_with_username_and_domain_name(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=self.domain['name'], password=self.user['password'], project_id=self.project['id'], ) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r) def test_create_project_scoped_token_fails_if_project_name_unsafe(self): """Verify authenticate to a project with unsafe name fails.""" # Start with url name restrictions off, so we can create the unsafe # named project self.config_fixture.config( group='resource', project_name_url_safe='off' ) unsafe_name = 'i am not / safe' project = unit.new_project_ref( domain_id=test_v3.DEFAULT_DOMAIN_ID, name=unsafe_name ) PROVIDERS.resource_api.create_project(project['id'], project) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user['id'], project['id'], role_member['id'] ) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=project['name'], project_domain_id=test_v3.DEFAULT_DOMAIN_ID, ) # Since name url restriction is off, we should be able to authenticate self.v3_create_token(auth_data) # Set the name url restriction to new, which should still allow us to # authenticate self.config_fixture.config( group='resource', project_name_url_safe='new' ) self.v3_create_token(auth_data) # Set the name url restriction to strict and we should fail to # authenticate self.config_fixture.config( group='resource', project_name_url_safe='strict' ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_project_scoped_token_fails_if_domain_name_unsafe(self): """Verify authenticate to a project using unsafe domain name fails.""" # Start with url name restrictions off, so we can create the unsafe # named domain self.config_fixture.config( group='resource', domain_name_url_safe='off' ) unsafe_name = 'i am not / safe' domain = unit.new_domain_ref(name=unsafe_name) PROVIDERS.resource_api.create_domain(domain['id'], domain) # Add a (safely named) project to that domain project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.create_grant( role_member['id'], user_id=self.user['id'], project_id=project['id'], ) # An auth request via project ID, but specifying domain by name auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=project['name'], project_domain_name=domain['name'], ) # Since name url restriction is off, we should be able to authenticate self.v3_create_token(auth_data) # Set the name url restriction to new, which should still allow us to # authenticate self.config_fixture.config( group='resource', project_name_url_safe='new' ) self.v3_create_token(auth_data) # Set the name url restriction to strict and we should fail to # authenticate self.config_fixture.config( group='resource', domain_name_url_safe='strict' ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_project_token_with_same_domain_and_project_name(self): """Authenticate to a project with the same name as its domain.""" domain = unit.new_project_ref(is_domain=True) domain = PROVIDERS.resource_api.create_project(domain['id'], domain) project = unit.new_project_ref( domain_id=domain['id'], name=domain['name'] ) PROVIDERS.resource_api.create_project(project['id'], project) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user['id'], project['id'], role_member['id'] ) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=project['name'], project_domain_name=domain['name'], ) r = self.v3_create_token(auth_data) self.assertEqual(project['id'], r.result['token']['project']['id']) def test_create_project_token_fails_with_project_acting_as_domain(self): domain = unit.new_project_ref(is_domain=True) domain = PROVIDERS.resource_api.create_project(domain['id'], domain) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.create_grant( role_member['id'], user_id=self.user['id'], domain_id=domain['id'] ) # authentication will fail because the project name is incorrect auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=domain['name'], project_domain_name=domain['name'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_project_token_with_disabled_project_domain_fails(self): # create a disabled domain domain = unit.new_domain_ref() domain = PROVIDERS.resource_api.create_domain(domain['id'], domain) # create a project in the domain project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) # assign some role to self.user for the project in the domain PROVIDERS.assignment_api.add_role_to_user_and_project( self.user['id'], project['id'], self.role_id ) # Disable the domain domain['enabled'] = False PROVIDERS.resource_api.update_domain(domain['id'], domain) # user should not be able to auth with project_id auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=project['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) # user should not be able to auth with project_name & domain auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_name=project['name'], project_domain_id=domain['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_project_token_with_default_domain_as_project(self): # Authenticate to a project with the default domain as project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=test_v3.DEFAULT_DOMAIN_ID, ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_project_scoped_token_is_invalid_after_disabling_user(self): project_scoped_token = self._get_project_scoped_token() # Make sure the token is valid r = self._validate_token(project_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Disable the user self._set_user_enabled(self.user, enabled=False) # Ensure validating a token for a disabled user fails self._validate_token( project_scoped_token, expected_status=http.client.NOT_FOUND ) def test_project_scoped_token_invalid_after_changing_user_password(self): project_scoped_token = self._get_project_scoped_token() # Make sure the token is valid r = self._validate_token(project_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Update user's password self.user['password'] = 'Password1' PROVIDERS.identity_api.update_user(self.user['id'], self.user) # Ensure updating user's password revokes existing tokens self._validate_token( project_scoped_token, expected_status=http.client.NOT_FOUND ) def test_project_scoped_token_invalid_after_disabling_project(self): project_scoped_token = self._get_project_scoped_token() # Make sure the token is valid r = self._validate_token(project_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Disable project self.project['enabled'] = False PROVIDERS.resource_api.update_project(self.project['id'], self.project) # Ensure validating a token for a disabled project fails self._validate_token( project_scoped_token, expected_status=http.client.NOT_FOUND ) def test_project_scoped_token_is_invalid_after_deleting_grant(self): # disable caching so that user grant deletion is not hidden # by token caching self.config_fixture.config(group='cache', enabled=False) # Grant user access to project PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], project_id=self.project['id'], ) project_scoped_token = self._get_project_scoped_token() # Make sure the token is valid r = self._validate_token(project_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Delete access to project PROVIDERS.assignment_api.delete_grant( self.role['id'], user_id=self.user['id'], project_id=self.project['id'], ) # Ensure the token has been revoked self._validate_token( project_scoped_token, expected_status=http.client.NOT_FOUND ) def test_no_access_to_default_project_result_in_unscoped_token(self): # create a disabled project to work with self.create_new_default_project_for_user( self.user['id'], self.domain_id ) # attempt to authenticate without requesting a project auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.v3_create_token(auth_data) self.assertValidUnscopedTokenResponse(r) def test_rescope_unscoped_token_with_trust(self): trustee_user, trust = self._create_trust() self._get_trust_scoped_token(trustee_user, trust) def test_validate_a_trust_scoped_token(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) def test_validate_expired_trust_scoped_token_returns_not_found(self): # NOTE(lbragstad): We set token expiration to 10 seconds so that we can # use the context manager of freezegun without sqlite issues. self.config_fixture.config(group='token', expiration=10) time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token( trustee_user, trust ) frozen_datetime.tick(delta=datetime.timedelta(seconds=15)) self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_validate_a_trust_scoped_token_impersonated(self): trustee_user, trust = self._create_trust(impersonation=True) trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) def test_revoke_trust_scoped_token(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) self._revoke_token(trust_scoped_token) self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_trust_scoped_token_is_invalid_after_disabling_trustee(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Disable trustee trustee_update_ref = dict(enabled=False) PROVIDERS.identity_api.update_user( trustee_user['id'], trustee_update_ref ) # Ensure validating a token for a disabled user fails self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_trust_token_is_invalid_when_trustee_domain_disabled(self): # create a new domain with new user in that domain new_domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain( new_domain_ref['id'], new_domain_ref ) trustee_ref = unit.create_user( PROVIDERS.identity_api, domain_id=new_domain_ref['id'] ) new_project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project( new_project_ref['id'], new_project_ref ) # grant the trustor access to the new project PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user_id, project_id=new_project_ref['id'], ) trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=trustee_ref['id'], expires=dict(minutes=1), project_id=new_project_ref['id'], impersonation=True, role_ids=[self.role['id']], ) resp = self.post('/OS-TRUST/trusts', body={'trust': trust_ref}) self.assertValidTrustResponse(resp, trust_ref) trust_id = resp.json_body['trust']['id'] # get a project-scoped token using the trust trust_auth_data = self.build_authentication_request( user_id=trustee_ref['id'], password=trustee_ref['password'], trust_id=trust_id, ) trust_scoped_token = self.get_requested_token(trust_auth_data) # ensure the project-scoped token from the trust is valid self._validate_token(trust_scoped_token) disable_body = {'domain': {'enabled': False}} self.patch( '/domains/{domain_id}'.format(domain_id=new_domain_ref['id']), body=disable_body, ) # ensure the project-scoped token from the trust is invalid self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_trust_scoped_token_invalid_after_changing_trustee_password(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Change trustee's password trustee_update_ref = dict(password='Password1') PROVIDERS.identity_api.update_user( trustee_user['id'], trustee_update_ref ) # Ensure updating trustee's password revokes existing tokens self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_trust_scoped_token_is_invalid_after_disabling_trustor(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Disable the trustor trustor_update_ref = dict(enabled=False) PROVIDERS.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure validating a token for a disabled user fails self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_trust_scoped_token_invalid_after_changing_trustor_password(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Change trustor's password trustor_update_ref = dict(password='Password1') PROVIDERS.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure updating trustor's password revokes existing user's tokens self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_trust_scoped_token_invalid_after_disabled_trustor_domain(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Disable trustor's domain self.domain['enabled'] = False PROVIDERS.resource_api.update_domain(self.domain['id'], self.domain) trustor_update_ref = dict(password='Password1') PROVIDERS.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure updating trustor's password revokes existing user's tokens self._validate_token( trust_scoped_token, expected_status=http.client.NOT_FOUND ) def test_default_fixture_scope_token(self): self.assertIsNotNone(self.get_scoped_token()) def test_rescoping_token(self): expires = self.v3_token_data['token']['expires_at'] # rescope the token r = self.v3_create_token( self.build_authentication_request( token=self.v3_token, project_id=self.project_id ) ) self.assertValidProjectScopedTokenResponse(r) # ensure token expiration stayed the same self.assertTimestampEqual(expires, r.result['token']['expires_at']) def test_check_token(self): self.head( '/auth/tokens', headers=self.headers, expected_status=http.client.OK, ) def test_validate_token(self): r = self.get('/auth/tokens', headers=self.headers) self.assertValidUnscopedTokenResponse(r) def test_validate_missing_subject_token(self): self.get('/auth/tokens', expected_status=http.client.NOT_FOUND) def test_validate_missing_auth_token(self): self.admin_request( method='GET', path='/v3/projects', token=None, expected_status=http.client.UNAUTHORIZED, ) def test_validate_token_nocatalog(self): v3_token = self.get_requested_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) ) r = self.get( '/auth/tokens?nocatalog', headers={'X-Subject-Token': v3_token} ) self.assertValidProjectScopedTokenResponse(r, require_catalog=False) def test_is_admin_token_by_ids(self): self.config_fixture.config( group='resource', admin_project_domain_name=self.domain['name'], admin_project_name=self.project['name'], ) r = self.v3_create_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) ) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) def test_is_admin_token_by_names(self): self.config_fixture.config( group='resource', admin_project_domain_name=self.domain['name'], admin_project_name=self.project['name'], ) r = self.v3_create_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_domain_name=self.domain['name'], project_name=self.project['name'], ) ) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=True) def test_token_for_non_admin_project_is_not_admin(self): self.config_fixture.config( group='resource', admin_project_domain_name=self.domain['name'], admin_project_name=uuid.uuid4().hex, ) r = self.v3_create_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) ) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) def test_token_for_non_admin_domain_same_project_name_is_not_admin(self): self.config_fixture.config( group='resource', admin_project_domain_name=uuid.uuid4().hex, admin_project_name=self.project['name'], ) r = self.v3_create_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) ) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=False) def test_only_admin_project_set_acts_as_non_admin(self): self.config_fixture.config( group='resource', admin_project_name=self.project['name'] ) r = self.v3_create_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) ) self.assertValidProjectScopedTokenResponse(r, is_admin_project=None) v3_token = r.headers.get('X-Subject-Token') r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) self.assertValidProjectScopedTokenResponse(r, is_admin_project=None) def _create_role(self, domain_id=None): """Call ``POST /roles``.""" ref = unit.new_role_ref(domain_id=domain_id) r = self.post('/roles', body={'role': ref}) return self.assertValidRoleResponse(r, ref) def _create_implied_role(self, prior_id): implied = self._create_role() url = '/roles/{}/implies/{}'.format(prior_id, implied['id']) self.put(url, expected_status=http.client.CREATED) return implied def _delete_implied_role(self, prior_role_id, implied_role_id): url = f'/roles/{prior_role_id}/implies/{implied_role_id}' self.delete(url) def _get_scoped_token_roles(self, is_domain=False): if is_domain: v3_token = self.get_domain_scoped_token() else: v3_token = self.get_scoped_token() r = self.get('/auth/tokens', headers={'X-Subject-Token': v3_token}) v3_token_data = r.result token_roles = v3_token_data['token']['roles'] return token_roles def _create_implied_role_shows_in_v3_token(self, is_domain): token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(2, len(token_roles)) implied2 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(3, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) def test_create_implied_role_shows_in_v3_project_token(self): # regardless of the default chosen, this should always # test with the option set. self.config_fixture.config(group='token') self._create_implied_role_shows_in_v3_token(False) def test_create_implied_role_shows_in_v3_domain_token(self): self.config_fixture.config(group='token') PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domain['id'], ) self._create_implied_role_shows_in_v3_token(True) def test_create_implied_role_shows_in_v3_system_token(self): self.config_fixture.config(group='token') PROVIDERS.assignment_api.create_system_grant_for_user( self.user['id'], self.role['id'] ) token_id = self.get_system_scoped_token() r = self.get('/auth/tokens', headers={'X-Subject-Token': token_id}) token_roles = r.result['token']['roles'] prior = token_roles[0]['id'] self._create_implied_role(prior) r = self.get('/auth/tokens', headers={'X-Subject-Token': token_id}) token_roles = r.result['token']['roles'] self.assertEqual(2, len(token_roles)) def test_group_assigned_implied_role_shows_in_v3_token(self): self.config_fixture.config(group='token') is_domain = False token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(1, len(token_roles)) new_role = self._create_role() prior = new_role['id'] new_group_ref = unit.new_group_ref(domain_id=self.domain['id']) new_group = PROVIDERS.identity_api.create_group(new_group_ref) PROVIDERS.assignment_api.create_grant( prior, group_id=new_group['id'], project_id=self.project['id'] ) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(1, len(token_roles)) PROVIDERS.identity_api.add_user_to_group( self.user['id'], new_group['id'] ) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(2, len(token_roles)) implied1 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(3, len(token_roles)) implied2 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles(is_domain) self.assertEqual(4, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) def test_multiple_implied_roles_show_in_v3_token(self): self.config_fixture.config(group='token') token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) implied2 = self._create_implied_role(prior) implied3 = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles() self.assertEqual(4, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) self.assertIn(implied3['id'], token_role_ids) def test_chained_implied_role_shows_in_v3_token(self): self.config_fixture.config(group='token') token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) implied2 = self._create_implied_role(implied1['id']) implied3 = self._create_implied_role(implied2['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(4, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) self.assertIn(implied1['id'], token_role_ids) self.assertIn(implied2['id'], token_role_ids) self.assertIn(implied3['id'], token_role_ids) def test_implied_role_disabled_by_config(self): self.config_fixture.config(group='token') token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) prior = token_roles[0]['id'] implied1 = self._create_implied_role(prior) implied2 = self._create_implied_role(implied1['id']) self._create_implied_role(implied2['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(4, len(token_roles)) token_role_ids = [role['id'] for role in token_roles] self.assertIn(prior, token_role_ids) def test_delete_implied_role_do_not_show_in_v3_token(self): self.config_fixture.config(group='token') token_roles = self._get_scoped_token_roles() prior = token_roles[0]['id'] implied = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) self._delete_implied_role(prior, implied['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(1, len(token_roles)) def test_unrelated_implied_roles_do_not_change_v3_token(self): self.config_fixture.config(group='token') token_roles = self._get_scoped_token_roles() prior = token_roles[0]['id'] implied = self._create_implied_role(prior) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) unrelated = self._create_role() url = '/roles/{}/implies/{}'.format(unrelated['id'], implied['id']) self.put(url, expected_status=http.client.CREATED) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) self._delete_implied_role(unrelated['id'], implied['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(2, len(token_roles)) def test_domain_specific_roles_do_not_show_v3_token(self): self.config_fixture.config(group='token') initial_token_roles = self._get_scoped_token_roles() new_role = self._create_role(domain_id=self.domain_id) PROVIDERS.assignment_api.create_grant( new_role['id'], user_id=self.user['id'], project_id=self.project['id'], ) implied = self._create_implied_role(new_role['id']) token_roles = self._get_scoped_token_roles() self.assertEqual(len(initial_token_roles) + 1, len(token_roles)) # The implied role from the domain specific role should be in the # token, but not the domain specific role itself. token_role_ids = [role['id'] for role in token_roles] self.assertIn(implied['id'], token_role_ids) self.assertNotIn(new_role['id'], token_role_ids) def test_remove_all_roles_from_scope_result_in_404(self): # create a new user new_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) # give the new user a role on a project path = '/projects/{}/users/{}/roles/{}'.format( self.project['id'], new_user['id'], self.role['id'], ) self.put(path=path) # authenticate as the new user and get a project-scoped token auth_data = self.build_authentication_request( user_id=new_user['id'], password=new_user['password'], project_id=self.project['id'], ) subject_token_id = self.v3_create_token(auth_data).headers.get( 'X-Subject-Token' ) # make sure the project-scoped token is valid headers = {'X-Subject-Token': subject_token_id} r = self.get('/auth/tokens', headers=headers) self.assertValidProjectScopedTokenResponse(r) # remove the roles from the user for the given scope path = '/projects/{}/users/{}/roles/{}'.format( self.project['id'], new_user['id'], self.role['id'], ) self.delete(path=path) # token validation should now result in 404 self.get( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) def test_create_token_with_nonexistant_user_id_fails(self): auth_data = self.build_authentication_request( user_id=uuid.uuid4().hex, password=self.user['password'] ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_token_with_nonexistant_username_fails(self): auth_data = self.build_authentication_request( username=uuid.uuid4().hex, user_domain_id=self.domain['id'], password=self.user['password'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_token_with_nonexistant_domain_id_fails(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_id=uuid.uuid4().hex, password=self.user['password'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_token_with_nonexistant_domain_name_fails(self): auth_data = self.build_authentication_request( username=self.user['name'], user_domain_name=uuid.uuid4().hex, password=self.user['password'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_token_with_wrong_password_fails(self): auth_data = self.build_authentication_request( user_id=self.user['id'], password=uuid.uuid4().hex ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_user_and_group_roles_scoped_token(self): """Test correct roles are returned in scoped token. Test Plan: - Create a domain, with 1 project, 2 users (user1 and user2) and 2 groups (group1 and group2) - Make user1 a member of group1, user2 a member of group2 - Create 8 roles, assigning them to each of the 8 combinations of users/groups on domain/project - Get a project scoped token for user1, checking that the right two roles are returned (one directly assigned, one by virtue of group membership) - Repeat this for a domain scoped token - Make user1 also a member of group2 - Get another scoped token making sure the additional role shows up - User2 is just here as a spoiler, to make sure we don't get any roles uniquely assigned to it returned in any of our tokens """ domainA = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domainA['id'], domainA) projectA = unit.new_project_ref(domain_id=domainA['id']) PROVIDERS.resource_api.create_project(projectA['id'], projectA) user1 = unit.create_user( PROVIDERS.identity_api, domain_id=domainA['id'] ) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=domainA['id'] ) group1 = unit.new_group_ref(domain_id=domainA['id']) group1 = PROVIDERS.identity_api.create_group(group1) group2 = unit.new_group_ref(domain_id=domainA['id']) group2 = PROVIDERS.identity_api.create_group(group2) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.identity_api.add_user_to_group(user2['id'], group2['id']) # Now create all the roles and assign them role_list = [] for _ in range(8): role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) role_list.append(role) PROVIDERS.assignment_api.create_grant( role_list[0]['id'], user_id=user1['id'], domain_id=domainA['id'] ) PROVIDERS.assignment_api.create_grant( role_list[1]['id'], user_id=user1['id'], project_id=projectA['id'] ) PROVIDERS.assignment_api.create_grant( role_list[2]['id'], user_id=user2['id'], domain_id=domainA['id'] ) PROVIDERS.assignment_api.create_grant( role_list[3]['id'], user_id=user2['id'], project_id=projectA['id'] ) PROVIDERS.assignment_api.create_grant( role_list[4]['id'], group_id=group1['id'], domain_id=domainA['id'] ) PROVIDERS.assignment_api.create_grant( role_list[5]['id'], group_id=group1['id'], project_id=projectA['id'], ) PROVIDERS.assignment_api.create_grant( role_list[6]['id'], group_id=group2['id'], domain_id=domainA['id'] ) PROVIDERS.assignment_api.create_grant( role_list[7]['id'], group_id=group2['id'], project_id=projectA['id'], ) # First, get a project scoped token - which should # contain the direct user role and the one by virtue # of group membership auth_data = self.build_authentication_request( user_id=user1['id'], password=user1['password'], project_id=projectA['id'], ) r = self.v3_create_token(auth_data) token = self.assertValidScopedTokenResponse(r) roles_ids = [] for ref in token['roles']: roles_ids.append(ref['id']) self.assertEqual(2, len(token['roles'])) self.assertIn(role_list[1]['id'], roles_ids) self.assertIn(role_list[5]['id'], roles_ids) # Now the same thing for a domain scoped token auth_data = self.build_authentication_request( user_id=user1['id'], password=user1['password'], domain_id=domainA['id'], ) r = self.v3_create_token(auth_data) token = self.assertValidScopedTokenResponse(r) roles_ids = [] for ref in token['roles']: roles_ids.append(ref['id']) self.assertEqual(2, len(token['roles'])) self.assertIn(role_list[0]['id'], roles_ids) self.assertIn(role_list[4]['id'], roles_ids) # Finally, add user1 to the 2nd group, and get a new # scoped token - the extra role should now be included # by virtue of the 2nd group PROVIDERS.identity_api.add_user_to_group(user1['id'], group2['id']) auth_data = self.build_authentication_request( user_id=user1['id'], password=user1['password'], project_id=projectA['id'], ) r = self.v3_create_token(auth_data) token = self.assertValidScopedTokenResponse(r) roles_ids = [] for ref in token['roles']: roles_ids.append(ref['id']) self.assertEqual(3, len(token['roles'])) self.assertIn(role_list[1]['id'], roles_ids) self.assertIn(role_list[5]['id'], roles_ids) self.assertIn(role_list[7]['id'], roles_ids) def test_auth_token_cross_domain_group_and_project(self): """Verify getting a token in cross domain group/project roles.""" # create domain, project and group and grant roles to user domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) project1 = unit.new_project_ref(domain_id=domain1['id']) PROVIDERS.resource_api.create_project(project1['id'], project1) user_foo = unit.create_user( PROVIDERS.identity_api, domain_id=test_v3.DEFAULT_DOMAIN_ID ) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) role_admin = unit.new_role_ref() PROVIDERS.role_api.create_role(role_admin['id'], role_admin) role_foo_domain1 = unit.new_role_ref() PROVIDERS.role_api.create_role( role_foo_domain1['id'], role_foo_domain1 ) role_group_domain1 = unit.new_role_ref() PROVIDERS.role_api.create_role( role_group_domain1['id'], role_group_domain1 ) new_group = unit.new_group_ref(domain_id=domain1['id']) new_group = PROVIDERS.identity_api.create_group(new_group) PROVIDERS.identity_api.add_user_to_group( user_foo['id'], new_group['id'] ) PROVIDERS.assignment_api.create_grant( user_id=user_foo['id'], project_id=project1['id'], role_id=role_member['id'], ) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], project_id=project1['id'], role_id=role_admin['id'], ) PROVIDERS.assignment_api.create_grant( user_id=user_foo['id'], domain_id=domain1['id'], role_id=role_foo_domain1['id'], ) PROVIDERS.assignment_api.create_grant( group_id=new_group['id'], domain_id=domain1['id'], role_id=role_group_domain1['id'], ) # Get a scoped token for the project auth_data = self.build_authentication_request( username=user_foo['name'], user_domain_id=test_v3.DEFAULT_DOMAIN_ID, password=user_foo['password'], project_name=project1['name'], project_domain_id=domain1['id'], ) r = self.v3_create_token(auth_data) scoped_token = self.assertValidScopedTokenResponse(r) project = scoped_token["project"] roles_ids = [] for ref in scoped_token['roles']: roles_ids.append(ref['id']) self.assertEqual(project1['id'], project["id"]) self.assertIn(role_member['id'], roles_ids) self.assertIn(role_admin['id'], roles_ids) self.assertNotIn(role_foo_domain1['id'], roles_ids) self.assertNotIn(role_group_domain1['id'], roles_ids) def test_remote_user_no_realm(self): app = self.loadapp() auth_contexts = [] # NOTE(morgan): This __init__ is used to inject the auth context into # the auth_contexts list so that we can perform introspection. This way # we do not need to try and mock out anything deep within keystone's # auth pipeline. Note that we are using MockPatch to ensure we undo # the mock after the fact. def new_init(self, *args, **kwargs): super(auth.core.AuthContext, self).__init__(*args, **kwargs) auth_contexts.append(self) self.useFixture( fixtures.MockPatch( 'keystone.auth.core.AuthContext.__init__', new_init ) ) with app.test_client() as c: c.environ_base.update( self.build_external_auth_environ( self.default_domain_user['name'] ) ) auth_req = self.build_authentication_request() c.post('/v3/auth/tokens', json=auth_req) self.assertEqual( self.default_domain_user['id'], auth_contexts[-1]['user_id'] ) # Now test to make sure the user name can, itself, contain the # '@' character. user = {'name': 'myname@mydivision'} PROVIDERS.identity_api.update_user( self.default_domain_user['id'], user ) with app.test_client() as c: c.environ_base.update( self.build_external_auth_environ(user['name']) ) auth_req = self.build_authentication_request() c.post('/v3/auth/tokens', json=auth_req) self.assertEqual( self.default_domain_user['id'], auth_contexts[-1]['user_id'] ) self.assertEqual( self.default_domain_user['id'], auth_contexts[-1]['user_id'] ) def test_remote_user_no_domain(self): app = self.loadapp() with app.test_client() as c: c.environ_base.update( self.build_external_auth_environ(self.user['name']) ) auth_request = self.build_authentication_request() c.post( '/v3/auth/tokens', json=auth_request, expected_status_code=http.client.UNAUTHORIZED, ) def test_remote_user_and_password(self): # both REMOTE_USER and password methods must pass. # note that they do not have to match app = self.loadapp() with app.test_client() as c: auth_data = self.build_authentication_request( user_domain_id=self.default_domain_user['domain_id'], username=self.default_domain_user['name'], password=self.default_domain_user['password'], ) c.post('/v3/auth/tokens', json=auth_data) def test_remote_user_and_explicit_external(self): # both REMOTE_USER and password methods must pass. # note that they do not have to match auth_data = self.build_authentication_request( user_domain_id=self.domain['id'], username=self.user['name'], password=self.user['password'], ) auth_data['auth']['identity']['methods'] = ["password", "external"] auth_data['auth']['identity']['external'] = {} app = self.loadapp() with app.test_client() as c: c.post( '/v3/auth/tokens', json=auth_data, expected_status_code=http.client.UNAUTHORIZED, ) def test_remote_user_bad_password(self): # both REMOTE_USER and password methods must pass. app = self.loadapp() auth_data = self.build_authentication_request( user_domain_id=self.domain['id'], username=self.user['name'], password='badpassword', ) with app.test_client() as c: c.post( '/v3/auth/tokens', json=auth_data, expected_status_code=http.client.UNAUTHORIZED, ) def test_fetch_expired_allow_expired(self): self.config_fixture.config( group='token', expiration=10, allow_expired_window=20 ) time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: token = self._get_project_scoped_token() # initially it validates because it's within time frozen_datetime.tick(delta=datetime.timedelta(seconds=2)) self._validate_token(token) # after passing expiry time validation fails frozen_datetime.tick(delta=datetime.timedelta(seconds=12)) self._validate_token(token, expected_status=http.client.NOT_FOUND) # but if we pass allow_expired it validates self._validate_token(token, allow_expired=True) # and then if we're passed the allow_expired_window it will fail # anyway raises expired when now > expiration + window frozen_datetime.tick(delta=datetime.timedelta(seconds=22)) self._validate_token( token, allow_expired=True, expected_status=http.client.NOT_FOUND, ) def test_system_scoped_token_works_with_domain_specific_drivers(self): self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True ) PROVIDERS.assignment_api.create_system_grant_for_user( self.user['id'], self.role['id'] ) token_id = self.get_system_scoped_token() headers = {'X-Auth-Token': token_id} app = self.loadapp() with app.test_client() as c: c.get('/v3/users', headers=headers) def test_fetch_expired_allow_expired_in_expired_window(self): self.config_fixture.config( group='token', expiration=10, allow_expired_window=20 ) time = timeutils.utcnow() with freezegun.freeze_time(time): token = self._get_project_scoped_token() tick = datetime.timedelta(seconds=15) with freezegun.freeze_time(time + tick): # after passing expiry time validation fails self._validate_token(token, expected_status=http.client.NOT_FOUND) # but if we pass allow_expired it validates r = self._validate_token(token, allow_expired=True) self.assertValidProjectScopedTokenResponse(r) def _create_project_user(self): new_domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain( new_domain_ref['id'], new_domain_ref ) new_project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project( new_project_ref['id'], new_project_ref ) new_user = unit.create_user( PROVIDERS.identity_api, domain_id=new_domain_ref['id'], project_id=new_project_ref['id'], ) PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=new_user['id'], project_id=new_project_ref['id'], ) return new_user, new_domain_ref, new_project_ref def _create_certificates( self, root_dn=None, server_dn=None, client_dn=None ): root_subj = unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organization_name='fujitsu', organizational_unit_name='test', common_name='root', ) if root_dn: root_subj = unit.update_dn(root_subj, root_dn) root_cert, root_key = unit.create_certificate(root_subj) keystone_subj = unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organization_name='fujitsu', organizational_unit_name='test', common_name='keystone.local', ) if server_dn: keystone_subj = unit.update_dn(keystone_subj, server_dn) ks_cert, ks_key = unit.create_certificate( keystone_subj, ca=root_cert, ca_key=root_key ) client_subj = unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organization_name='fujitsu', organizational_unit_name='test', common_name='client', ) if client_dn: client_subj = unit.update_dn(client_subj, client_dn) client_cert, client_key = unit.create_certificate( client_subj, ca=root_cert, ca_key=root_key ) return root_cert, root_key, ks_cert, ks_key, client_cert, client_key def _get_cert_content(self, cert): return cert.public_bytes(Encoding.PEM).decode('ascii') def _get_oauth2_access_token( self, client_id, client_cert_content, expected_status=http.client.OK ): headers = { 'Content-Type': 'application/x-www-form-urlencoded', } data = {'grant_type': 'client_credentials', 'client_id': client_id} extra_environ = {'SSL_CLIENT_CERT': client_cert_content} data = parse.urlencode(data).encode() resp = self.post( '/OS-OAUTH2/token', headers=headers, noauth=True, convert=False, body=data, environ=extra_environ, expected_status=expected_status, ) return resp def _create_mapping(self): mapping = { 'id': 'oauth2_mapping', 'rules': [ { 'local': [ { 'user': { 'name': '{0}', 'id': '{1}', 'email': '{2}', 'domain': {'name': '{3}', 'id': '{4}'}, } } ], 'remote': [ {'type': 'SSL_CLIENT_SUBJECT_DN_CN'}, {'type': 'SSL_CLIENT_SUBJECT_DN_UID'}, {'type': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS'}, {'type': 'SSL_CLIENT_SUBJECT_DN_O'}, {'type': 'SSL_CLIENT_SUBJECT_DN_DC'}, { 'type': 'SSL_CLIENT_ISSUER_DN_CN', 'any_one_of': ['root'], }, ], } ], } PROVIDERS.federation_api.create_mapping(mapping['id'], mapping) def test_verify_oauth2_token_project_scope_ok(self): cache_on_issue = CONF.token.cache_on_issue caching = CONF.token.caching self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( root_dn=unit.create_dn(common_name='root'), client_dn=unit.create_dn( common_name=user['name'], user_id=user['id'], email_address=user['email'], organization_name=user_domain['name'], domain_component=user_domain['id'], ), ) cert_content = self._get_cert_content(client_cert) CONF.token.cache_on_issue = False CONF.token.caching = False resp = self._get_oauth2_access_token(user['id'], cert_content) json_resp = json.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, expected_status=http.client.OK, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) CONF.token.cache_on_issue = cache_on_issue CONF.token.caching = caching class TokenDataTests: """Test the data in specific token types.""" def test_unscoped_token_format(self): # ensure the unscoped token response contains the appropriate data r = self.get('/auth/tokens', headers=self.headers) self.assertValidUnscopedTokenResponse(r) def test_domain_scoped_token_format(self): # ensure the domain scoped token response contains the appropriate data PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.default_domain_user['id'], domain_id=self.domain['id'], ) domain_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], domain_id=self.domain['id'], ) ) self.headers['X-Subject-Token'] = domain_scoped_token r = self.get('/auth/tokens', headers=self.headers) self.assertValidDomainScopedTokenResponse(r) def test_project_scoped_token_format(self): # ensure project scoped token responses contains the appropriate data project_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project['id'], ) ) self.headers['X-Subject-Token'] = project_scoped_token r = self.get('/auth/tokens', headers=self.headers) self.assertValidProjectScopedTokenResponse(r) def test_extra_data_in_unscoped_token_fails_validation(self): # ensure unscoped token response contains the appropriate data r = self.get('/auth/tokens', headers=self.headers) # populate the response result with some extra data r.result['token']['extra'] = str(uuid.uuid4().hex) self.assertRaises( exception.SchemaValidationError, self.assertValidUnscopedTokenResponse, r, ) def test_extra_data_in_domain_scoped_token_fails_validation(self): # ensure domain scoped token response contains the appropriate data PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.default_domain_user['id'], domain_id=self.domain['id'], ) domain_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], domain_id=self.domain['id'], ) ) self.headers['X-Subject-Token'] = domain_scoped_token r = self.get('/auth/tokens', headers=self.headers) # populate the response result with some extra data r.result['token']['extra'] = str(uuid.uuid4().hex) self.assertRaises( exception.SchemaValidationError, self.assertValidDomainScopedTokenResponse, r, ) def test_extra_data_in_project_scoped_token_fails_validation(self): # ensure project scoped token responses contains the appropriate data project_scoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], project_id=self.default_domain_project['id'], ) ) self.headers['X-Subject-Token'] = project_scoped_token resp = self.get('/auth/tokens', headers=self.headers) # populate the response result with some extra data resp.result['token']['extra'] = str(uuid.uuid4().hex) self.assertRaises( exception.SchemaValidationError, self.assertValidProjectScopedTokenResponse, resp, ) class AllowRescopeScopedTokenDisabledTests(test_v3.RestfulTestCase): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='token', allow_rescope_scoped_token=False ) def test_rescoping_v3_to_v3_disabled(self): self.v3_create_token( self.build_authentication_request( token=self.get_scoped_token(), project_id=self.project_id ), expected_status=http.client.FORBIDDEN, ) def test_rescoped_domain_token_disabled(self): self.domainA = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainA['id'], self.domainA) PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=self.user['id'], domain_id=self.domainA['id'], ) unscoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) ) # Get a domain-scoped token from the unscoped token domain_scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, domain_id=self.domainA['id'] ) ) self.v3_create_token( self.build_authentication_request( token=domain_scoped_token, project_id=self.project_id ), expected_status=http.client.FORBIDDEN, ) class TestFernetTokenAPIs( test_v3.RestfulTestCase, TokenAPITests, TokenDataTests ): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='token', provider='fernet', cache_on_issue=True ) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) def setUp(self): super().setUp() self.doSetUp() def _make_auth_request(self, auth_data): token = super()._make_auth_request(auth_data) self.assertLess(len(token), 255) return token def test_validate_tampered_unscoped_token_fails(self): unscoped_token = self._get_unscoped_token() tampered_token = ( unscoped_token[:50] + uuid.uuid4().hex + unscoped_token[50 + 32 :] ) self._validate_token( tampered_token, expected_status=http.client.NOT_FOUND ) def test_validate_tampered_project_scoped_token_fails(self): project_scoped_token = self._get_project_scoped_token() tampered_token = ( project_scoped_token[:50] + uuid.uuid4().hex + project_scoped_token[50 + 32 :] ) self._validate_token( tampered_token, expected_status=http.client.NOT_FOUND ) def test_validate_tampered_trust_scoped_token_fails(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Get a trust scoped token tampered_token = ( trust_scoped_token[:50] + uuid.uuid4().hex + trust_scoped_token[50 + 32 :] ) self._validate_token( tampered_token, expected_status=http.client.NOT_FOUND ) def test_trust_scoped_token_is_invalid_after_disabling_trustor(self): # NOTE(amakarov): have to override this test for non-persistent tokens # as TokenNotFound exception makes no sense for those. trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Disable the trustor trustor_update_ref = dict(enabled=False) PROVIDERS.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure validating a token for a disabled user fails self._validate_token( trust_scoped_token, expected_status=http.client.FORBIDDEN ) class TestJWSTokenAPIs(test_v3.RestfulTestCase, TokenAPITests, TokenDataTests): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='token', provider='jws', cache_on_issue=True ) self.useFixture(ksfixtures.JWSKeyRepository(self.config_fixture)) def setUp(self): super().setUp() self.doSetUp() def _make_auth_request(self, auth_data): token = super()._make_auth_request(auth_data) self.assertLess(len(token), 350) return token def test_validate_tampered_unscoped_token_fails(self): unscoped_token = self._get_unscoped_token() tampered_token = ( unscoped_token[:50] + uuid.uuid4().hex + unscoped_token[50 + 32 :] ) self._validate_token( tampered_token, expected_status=http.client.NOT_FOUND ) def test_validate_tampered_project_scoped_token_fails(self): project_scoped_token = self._get_project_scoped_token() tampered_token = ( project_scoped_token[:50] + uuid.uuid4().hex + project_scoped_token[50 + 32 :] ) self._validate_token( tampered_token, expected_status=http.client.NOT_FOUND ) def test_validate_tampered_trust_scoped_token_fails(self): trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Get a trust scoped token tampered_token = ( trust_scoped_token[:50] + uuid.uuid4().hex + trust_scoped_token[50 + 32 :] ) self._validate_token( tampered_token, expected_status=http.client.NOT_FOUND ) def test_trust_scoped_token_is_invalid_after_disabling_trustor(self): # NOTE(amakarov): have to override this test for non-persistent tokens # as TokenNotFound exception makes no sense for those. trustee_user, trust = self._create_trust() trust_scoped_token = self._get_trust_scoped_token(trustee_user, trust) # Validate a trust scoped token r = self._validate_token(trust_scoped_token) self.assertValidProjectScopedTokenResponse(r) # Disable the trustor trustor_update_ref = dict(enabled=False) PROVIDERS.identity_api.update_user(self.user['id'], trustor_update_ref) # Ensure validating a token for a disabled user fails self._validate_token( trust_scoped_token, expected_status=http.client.FORBIDDEN ) class TestTokenRevokeById(test_v3.RestfulTestCase): """Test token revocation on the v3 Identity API.""" def config_overrides(self): super().config_overrides() self.config_fixture.config( group='token', provider='fernet', revoke_by_id=False ) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) def setUp(self): """Setup for Token Revoking Test Cases. As well as the usual housekeeping, create a set of domains, users, groups, roles and projects for the subsequent tests: - Two domains: A & B - Three users (1, 2 and 3) - Three groups (1, 2 and 3) - Two roles (1 and 2) - DomainA owns user1, domainB owns user2 and user3 - DomainA owns group1 and group2, domainB owns group3 - User1 and user2 are members of group1 - User3 is a member of group2 - Two projects: A & B, both in domainA - Group1 has role1 on Project A and B, meaning that user1 and user2 will get these roles by virtue of membership - User1, 2 and 3 have role1 assigned to projectA - Group1 has role1 on Project A and B, meaning that user1 and user2 will get role1 (duplicated) by virtue of membership - User1 has role2 assigned to domainA """ super().setUp() # Start by creating a couple of domains and projects self.domainA = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainB['id'], self.domainB) self.projectA = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project( self.projectA['id'], self.projectA ) self.projectB = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project( self.projectB['id'], self.projectB ) # Now create some users self.user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainA['id'] ) self.user2 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainB['id'] ) self.user3 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainB['id'] ) self.group1 = unit.new_group_ref(domain_id=self.domainA['id']) self.group1 = PROVIDERS.identity_api.create_group(self.group1) self.group2 = unit.new_group_ref(domain_id=self.domainA['id']) self.group2 = PROVIDERS.identity_api.create_group(self.group2) self.group3 = unit.new_group_ref(domain_id=self.domainB['id']) self.group3 = PROVIDERS.identity_api.create_group(self.group3) PROVIDERS.identity_api.add_user_to_group( self.user1['id'], self.group1['id'] ) PROVIDERS.identity_api.add_user_to_group( self.user2['id'], self.group1['id'] ) PROVIDERS.identity_api.add_user_to_group( self.user3['id'], self.group2['id'] ) self.role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(self.role1['id'], self.role1) self.role2 = unit.new_role_ref() PROVIDERS.role_api.create_role(self.role2['id'], self.role2) PROVIDERS.assignment_api.create_grant( self.role2['id'], user_id=self.user1['id'], domain_id=self.domainA['id'], ) PROVIDERS.assignment_api.create_grant( self.role1['id'], user_id=self.user1['id'], project_id=self.projectA['id'], ) PROVIDERS.assignment_api.create_grant( self.role1['id'], user_id=self.user2['id'], project_id=self.projectA['id'], ) PROVIDERS.assignment_api.create_grant( self.role1['id'], user_id=self.user3['id'], project_id=self.projectA['id'], ) PROVIDERS.assignment_api.create_grant( self.role1['id'], group_id=self.group1['id'], project_id=self.projectA['id'], ) def test_unscoped_token_remains_valid_after_role_assignment(self): unscoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'] ) ) scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, project_id=self.projectA['id'] ) ) # confirm both tokens are valid self.head( '/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': scoped_token}, expected_status=http.client.OK, ) # create a new role role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # assign a new role self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'project_id': self.projectA['id'], 'user_id': self.user1['id'], 'role_id': role['id'], } ) # both tokens should remain valid self.head( '/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': scoped_token}, expected_status=http.client.OK, ) def test_deleting_user_grant_revokes_token(self): """Test deleting a user grant revokes token. Test Plan: - Get a token for user, scoped to Project - Delete the grant user has on Project - Check token is no longer valid """ auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) token = self.get_requested_token(auth_data) # Confirm token is valid self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) # Delete the grant, which should invalidate the token grant_url = ( '/projects/%(project_id)s/users/%(user_id)s/' 'roles/%(role_id)s' % { 'project_id': self.project['id'], 'user_id': self.user['id'], 'role_id': self.role['id'], } ) self.delete(grant_url) self.head( '/auth/tokens', token=token, expected_status=http.client.UNAUTHORIZED, ) def role_data_fixtures(self): self.projectC = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project( self.projectC['id'], self.projectC ) self.user4 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainB['id'] ) self.user5 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainA['id'] ) self.user6 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domainA['id'] ) PROVIDERS.identity_api.add_user_to_group( self.user5['id'], self.group1['id'] ) PROVIDERS.assignment_api.create_grant( self.role1['id'], group_id=self.group1['id'], project_id=self.projectB['id'], ) PROVIDERS.assignment_api.create_grant( self.role2['id'], user_id=self.user4['id'], project_id=self.projectC['id'], ) PROVIDERS.assignment_api.create_grant( self.role1['id'], user_id=self.user6['id'], project_id=self.projectA['id'], ) PROVIDERS.assignment_api.create_grant( self.role1['id'], user_id=self.user6['id'], domain_id=self.domainA['id'], ) def test_deleting_role_revokes_token(self): """Test deleting a role revokes token. Add some additional test data, namely: - A third project (project C) - Three additional users - user4 owned by domainB and user5 and 6 owned by domainA (different domain ownership should not affect the test results, just provided to broaden test coverage) - User5 is a member of group1 - Group1 gets an additional assignment - role1 on projectB as well as its existing role1 on projectA - User4 has role2 on Project C - User6 has role1 on projectA and domainA - This allows us to create 5 tokens by virtue of different types of role assignment: - user1, scoped to ProjectA by virtue of user role1 assignment - user5, scoped to ProjectB by virtue of group role1 assignment - user4, scoped to ProjectC by virtue of user role2 assignment - user6, scoped to ProjectA by virtue of user role1 assignment - user6, scoped to DomainA by virtue of user role1 assignment - role1 is then deleted - Check the tokens on Project A and B, and DomainA are revoked, but not the one for Project C """ self.role_data_fixtures() # Now we are ready to start issuing requests auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'], ) tokenA = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user5['id'], password=self.user5['password'], project_id=self.projectB['id'], ) tokenB = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user4['id'], password=self.user4['password'], project_id=self.projectC['id'], ) tokenC = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user6['id'], password=self.user6['password'], project_id=self.projectA['id'], ) tokenD = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user6['id'], password=self.user6['password'], domain_id=self.domainA['id'], ) tokenE = self.get_requested_token(auth_data) # Confirm tokens are valid self.head( '/auth/tokens', headers={'X-Subject-Token': tokenA}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': tokenB}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': tokenC}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': tokenD}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': tokenE}, expected_status=http.client.OK, ) # Delete the role, which should invalidate the tokens role_url = '/roles/%s' % self.role1['id'] self.delete(role_url) # Check the tokens that used role1 is invalid self.head( '/auth/tokens', headers={'X-Subject-Token': tokenA}, expected_status=http.client.NOT_FOUND, ) self.head( '/auth/tokens', headers={'X-Subject-Token': tokenB}, expected_status=http.client.NOT_FOUND, ) self.head( '/auth/tokens', headers={'X-Subject-Token': tokenD}, expected_status=http.client.NOT_FOUND, ) self.head( '/auth/tokens', headers={'X-Subject-Token': tokenE}, expected_status=http.client.NOT_FOUND, ) # ...but the one using role2 is still valid self.head( '/auth/tokens', headers={'X-Subject-Token': tokenC}, expected_status=http.client.OK, ) def test_domain_user_role_assignment_maintains_token(self): """Test user-domain role assignment maintains existing token. Test Plan: - Get a token for user1, scoped to ProjectA - Create a grant for user1 on DomainB - Check token is still valid """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'], ) token = self.get_requested_token(auth_data) # Confirm token is valid self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) # Assign a role, which should not affect the token grant_url = ( '/domains/%(domain_id)s/users/%(user_id)s/' 'roles/%(role_id)s' % { 'domain_id': self.domainB['id'], 'user_id': self.user1['id'], 'role_id': self.role1['id'], } ) self.put(grant_url) self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) def test_disabling_project_revokes_token(self): token = self.get_requested_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'], ) ) # confirm token is valid self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) # disable the project, which should invalidate the token self.patch( '/projects/{project_id}'.format(project_id=self.projectA['id']), body={'project': {'enabled': False}}, ) # user should no longer have access to the project self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.NOT_FOUND, ) self.v3_create_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'], ), expected_status=http.client.UNAUTHORIZED, ) def test_deleting_project_revokes_token(self): token = self.get_requested_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'], ) ) # confirm token is valid self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) # delete the project, which should invalidate the token self.delete( '/projects/{project_id}'.format(project_id=self.projectA['id']) ) # user should no longer have access to the project self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.NOT_FOUND, ) self.v3_create_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'], ), expected_status=http.client.UNAUTHORIZED, ) def test_deleting_group_grant_revokes_tokens(self): """Test deleting a group grant revokes tokens. Test Plan: - Get a token for user1, scoped to ProjectA - Get a token for user2, scoped to ProjectA - Get a token for user3, scoped to ProjectA - Delete the grant group1 has on ProjectA - Check tokens for user1 & user2 are no longer valid, since user1 and user2 are members of group1 - Check token for user3 is invalid too """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'], ) token1 = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user2['id'], password=self.user2['password'], project_id=self.projectA['id'], ) token2 = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'], ) token3 = self.get_requested_token(auth_data) # Confirm tokens are valid self.head( '/auth/tokens', headers={'X-Subject-Token': token1}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': token3}, expected_status=http.client.OK, ) # Delete the group grant, which should invalidate the # tokens for user1 and user2 grant_url = ( '/projects/%(project_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' % { 'project_id': self.projectA['id'], 'group_id': self.group1['id'], 'role_id': self.role1['id'], } ) self.delete(grant_url) PROVIDERS.assignment_api.delete_grant( role_id=self.role1['id'], project_id=self.projectA['id'], user_id=self.user1['id'], ) PROVIDERS.assignment_api.delete_grant( role_id=self.role1['id'], project_id=self.projectA['id'], user_id=self.user2['id'], ) self.head( '/auth/tokens', token=token1, expected_status=http.client.UNAUTHORIZED, ) self.head( '/auth/tokens', token=token2, expected_status=http.client.UNAUTHORIZED, ) # But user3's token should be invalid too as revocation is done for # scope role & project self.head( '/auth/tokens', headers={'X-Subject-Token': token3}, expected_status=http.client.OK, ) def test_domain_group_role_assignment_maintains_token(self): """Test domain-group role assignment maintains existing token. Test Plan: - Get a token for user1, scoped to ProjectA - Create a grant for group1 on DomainB - Check token is still longer valid """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'], ) token = self.get_requested_token(auth_data) # Confirm token is valid self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) # Delete the grant, which should invalidate the token grant_url = ( '/domains/%(domain_id)s/groups/%(group_id)s/' 'roles/%(role_id)s' % { 'domain_id': self.domainB['id'], 'group_id': self.group1['id'], 'role_id': self.role1['id'], } ) self.put(grant_url) self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) def test_group_membership_changes_revokes_token(self): """Test add/removal to/from group revokes token. Test Plan: - Get a token for user1, scoped to ProjectA - Get a token for user2, scoped to ProjectA - Remove user1 from group1 - Check token for user1 is no longer valid - Check token for user2 is still valid, even though user2 is also part of group1 - Add user2 to group2 - Check token for user2 is now no longer valid """ auth_data = self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'], ) token1 = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( user_id=self.user2['id'], password=self.user2['password'], project_id=self.projectA['id'], ) token2 = self.get_requested_token(auth_data) # Confirm tokens are valid self.head( '/auth/tokens', headers={'X-Subject-Token': token1}, expected_status=http.client.OK, ) self.head( '/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http.client.OK, ) # Remove user1 from group1, which should invalidate # the token self.delete( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group1['id'], 'user_id': self.user1['id']} ) self.head( '/auth/tokens', headers={'X-Subject-Token': token1}, expected_status=http.client.NOT_FOUND, ) # But user2's token should still be valid self.head( '/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http.client.OK, ) # Adding user2 to a group should not invalidate token self.put( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group2['id'], 'user_id': self.user2['id']} ) self.head( '/auth/tokens', headers={'X-Subject-Token': token2}, expected_status=http.client.OK, ) def test_removing_role_assignment_does_not_affect_other_users(self): """Revoking a role from one user should not affect other users.""" time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # This group grant is not needed for the test self.delete( '/projects/%(p_id)s/groups/%(g_id)s/roles/%(r_id)s' % { 'p_id': self.projectA['id'], 'g_id': self.group1['id'], 'r_id': self.role1['id'], } ) # NOTE(lbragstad): Here we advance the clock one second to pass # into the threshold of a new second because we just persisted a # revocation event for removing a role from a group on a project. # One thing to note about that revocation event is that it has no # context about the group, so even though user3 might not be in # group1, they could have their token revoked because the # revocation event is very general. frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) user1_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'], ) ) user3_token = self.get_requested_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'], ) ) # delete relationships between user1 and projectA from setUp self.delete( '/projects/%(p_id)s/users/%(u_id)s/roles/%(r_id)s' % { 'p_id': self.projectA['id'], 'u_id': self.user1['id'], 'r_id': self.role1['id'], } ) # authorization for the first user should now fail self.head( '/auth/tokens', headers={'X-Subject-Token': user1_token}, expected_status=http.client.NOT_FOUND, ) self.v3_create_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'], project_id=self.projectA['id'], ), expected_status=http.client.UNAUTHORIZED, ) # authorization for the second user should still succeed self.head( '/auth/tokens', headers={'X-Subject-Token': user3_token}, expected_status=http.client.OK, ) self.v3_create_token( self.build_authentication_request( user_id=self.user3['id'], password=self.user3['password'], project_id=self.projectA['id'], ) ) def test_deleting_project_deletes_grants(self): # This is to make it a little bit more pretty with PEP8 role_path = ( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' ) role_path = role_path % { 'user_id': self.user['id'], 'project_id': self.projectA['id'], 'role_id': self.role['id'], } # grant the user a role on the project self.put(role_path) # delete the project, which should remove the roles self.delete( '/projects/{project_id}'.format(project_id=self.projectA['id']) ) # Make sure that we get a 404 Not Found when heading that role. self.head(role_path, expected_status=http.client.NOT_FOUND) def test_revoke_token_from_token(self): # Test that a scoped token can be requested from an unscoped token, # the scoped token can be revoked, and the unscoped token remains # valid. unscoped_token = self.get_requested_token( self.build_authentication_request( user_id=self.user1['id'], password=self.user1['password'] ) ) # Get a project-scoped token from the unscoped token project_scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, project_id=self.projectA['id'] ) ) # Get a domain-scoped token from the unscoped token domain_scoped_token = self.get_requested_token( self.build_authentication_request( token=unscoped_token, domain_id=self.domainA['id'] ) ) # revoke the project-scoped token. self.delete( '/auth/tokens', headers={'X-Subject-Token': project_scoped_token} ) # The project-scoped token is invalidated. self.head( '/auth/tokens', headers={'X-Subject-Token': project_scoped_token}, expected_status=http.client.NOT_FOUND, ) # The unscoped token should still be valid. self.head( '/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http.client.OK, ) # The domain-scoped token should still be valid. self.head( '/auth/tokens', headers={'X-Subject-Token': domain_scoped_token}, expected_status=http.client.OK, ) # revoke the domain-scoped token. self.delete( '/auth/tokens', headers={'X-Subject-Token': domain_scoped_token} ) # The domain-scoped token is invalid. self.head( '/auth/tokens', headers={'X-Subject-Token': domain_scoped_token}, expected_status=http.client.NOT_FOUND, ) # The unscoped token should still be valid. self.head( '/auth/tokens', headers={'X-Subject-Token': unscoped_token}, expected_status=http.client.OK, ) class TestTokenRevokeApi(TestTokenRevokeById): """Test token revocation on the v3 Identity API.""" def config_overrides(self): super().config_overrides() self.config_fixture.config( group='token', provider='fernet', revoke_by_id=False ) self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) def assertValidDeletedProjectResponse(self, events_response, project_id): events = events_response['events'] self.assertEqual(1, len(events)) self.assertEqual(project_id, events[0]['project_id']) self.assertIsNotNone(events[0]['issued_before']) self.assertIsNotNone(events_response['links']) del events_response['events'][0]['issued_before'] del events_response['events'][0]['revoked_at'] del events_response['links'] expected_response = {'events': [{'project_id': project_id}]} self.assertEqual(expected_response, events_response) def assertValidRevokedTokenResponse(self, events_response, **kwargs): events = events_response['events'] self.assertEqual(1, len(events)) for k, v in kwargs.items(): self.assertEqual(v, events[0].get(k)) self.assertIsNotNone(events[0]['issued_before']) self.assertIsNotNone(events_response['links']) del events_response['events'][0]['issued_before'] del events_response['events'][0]['revoked_at'] del events_response['links'] expected_response = {'events': [kwargs]} self.assertEqual(expected_response, events_response) def test_revoke_token(self): scoped_token = self.get_scoped_token() headers = {'X-Subject-Token': scoped_token} response = self.get('/auth/tokens', headers=headers).json_body['token'] self.delete('/auth/tokens', headers=headers) self.head( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) events_response = self.get('/OS-REVOKE/events').json_body self.assertValidRevokedTokenResponse( events_response, audit_id=response['audit_ids'][0] ) def test_get_revoke_by_id_false_returns_gone(self): self.get( '/auth/tokens/OS-PKI/revoked', expected_status=http.client.GONE ) def test_head_revoke_by_id_false_returns_gone(self): self.head( '/auth/tokens/OS-PKI/revoked', expected_status=http.client.GONE ) def test_revoke_by_id_true_returns_forbidden(self): self.config_fixture.config(group='token', revoke_by_id=True) self.get( '/auth/tokens/OS-PKI/revoked', expected_status=http.client.FORBIDDEN, ) self.head( '/auth/tokens/OS-PKI/revoked', expected_status=http.client.FORBIDDEN, ) def test_list_delete_project_shows_in_event_list(self): self.role_data_fixtures() events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual([], events) self.delete( '/projects/{project_id}'.format(project_id=self.projectA['id']) ) events_response = self.get('/OS-REVOKE/events').json_body self.assertValidDeletedProjectResponse( events_response, self.projectA['id'] ) def assertEventDataInList(self, events, **kwargs): found = False for e in events: for key, value in kwargs.items(): try: if e[key] != value: break except KeyError: # Break the loop and present a nice error instead of # KeyError break else: # If the value of the event[key] matches the value of the kwarg # for each item in kwargs, the event was fully matched and # the assertTrue below should succeed. found = True self.assertTrue( found, 'event with correct values not in list, expected to ' 'find event with key-value pairs. Expected: ' '"%(expected)s" Events: "%(events)s"' % { 'expected': ','.join( [f"'{k}={v}'" for k, v in kwargs.items()] ), 'events': events, }, ) def test_list_delete_token_shows_in_event_list(self): self.role_data_fixtures() events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual([], events) scoped_token = self.get_scoped_token() headers = {'X-Subject-Token': scoped_token} auth_req = self.build_authentication_request(token=scoped_token) response = self.v3_create_token(auth_req) token2 = response.json_body['token'] headers2 = {'X-Subject-Token': response.headers['X-Subject-Token']} response = self.v3_create_token(auth_req) response.json_body['token'] headers3 = {'X-Subject-Token': response.headers['X-Subject-Token']} self.head( '/auth/tokens', headers=headers, expected_status=http.client.OK ) self.head( '/auth/tokens', headers=headers2, expected_status=http.client.OK ) self.head( '/auth/tokens', headers=headers3, expected_status=http.client.OK ) self.delete('/auth/tokens', headers=headers) # NOTE(ayoung): not deleting token3, as it should be deleted # by previous events_response = self.get('/OS-REVOKE/events').json_body events = events_response['events'] self.assertEqual(1, len(events)) self.assertEventDataInList(events, audit_id=token2['audit_ids'][1]) self.head( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) self.head( '/auth/tokens', headers=headers2, expected_status=http.client.OK ) self.head( '/auth/tokens', headers=headers3, expected_status=http.client.OK ) def test_list_with_filter(self): self.role_data_fixtures() events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual(0, len(events)) scoped_token = self.get_scoped_token() headers = {'X-Subject-Token': scoped_token} auth = self.build_authentication_request(token=scoped_token) headers2 = {'X-Subject-Token': self.get_requested_token(auth)} self.delete('/auth/tokens', headers=headers) self.delete('/auth/tokens', headers=headers2) events = self.get('/OS-REVOKE/events').json_body['events'] self.assertEqual(2, len(events)) future = utils.isotime( timeutils.utcnow() + datetime.timedelta(seconds=1000) ) events = self.get('/OS-REVOKE/events?since=%s' % (future)).json_body[ 'events' ] self.assertEqual(0, len(events)) class TestAuthExternalDisabled(test_v3.RestfulTestCase): def config_overrides(self): super().config_overrides() self.config_fixture.config(group='auth', methods=['password', 'token']) def test_remote_user_disabled(self): app = self.loadapp() remote_user = '{}@{}'.format(self.user['name'], self.domain['name']) with app.test_client() as c: c.environ_base.update( self.build_external_auth_environ(remote_user) ) auth_data = self.build_authentication_request() c.post( '/v3/auth/tokens', json=auth_data, expected_status_code=http.client.UNAUTHORIZED, ) # FIXME(morgan): This test case must be re-worked to function under flask. It # has been commented out until it is re-worked ensuring no issues when webob # classes are removed. # https://bugs.launchpad.net/keystone/+bug/1793756 # class AuthExternalDomainBehavior(object): # content_type = 'json' # # def test_remote_user_with_realm(self): # api = auth.controllers.Auth() # remote_user = self.user['name'] # remote_domain = self.domain['name'] # request, auth_info, auth_context = self.build_external_auth_request( # remote_user, remote_domain=remote_domain, kerberos=self.kerberos) # # api.authenticate(request, auth_info, auth_context) # self.assertEqual(self.user['id'], auth_context['user_id']) # # # Now test to make sure the user name can, itself, contain the # # '@' character. # user = {'name': 'myname@mydivision'} # PROVIDERS.identity_api.update_user(self.user['id'], user) # remote_user = user['name'] # request, auth_info, auth_context = self.build_external_auth_request( # remote_user, remote_domain=remote_domain, kerberos=self.kerberos) # # api.authenticate(request, auth_info, auth_context) # self.assertEqual(self.user['id'], auth_context['user_id']) # # # FIXME(morgan): This test case must be re-worked to function under flask. It # has been commented out until it is re-worked ensuring no issues when webob # classes are removed. # https://bugs.launchpad.net/keystone/+bug/1793756 # class TestAuthExternalDefaultDomain(object): # content_type = 'json' # # def config_overrides(self): # super(TestAuthExternalDefaultDomain, self).config_overrides() # self.kerberos = False # self.auth_plugin_config_override(external='DefaultDomain') # # def test_remote_user_with_default_domain(self): # api = auth.controllers.Auth() # remote_user = self.default_domain_user['name'] # request, auth_info, auth_context = self.build_external_auth_request( # remote_user, kerberos=self.kerberos) # # api.authenticate(request, auth_info, auth_context) # self.assertEqual(self.default_domain_user['id'], # auth_context['user_id']) # # # Now test to make sure the user name can, itself, contain the # # '@' character. # user = {'name': 'myname@mydivision'} # PROVIDERS.identity_api.update_user( # self.default_domain_user['id'], user # ) # remote_user = user['name'] # request, auth_info, auth_context = self.build_external_auth_request( # remote_user, kerberos=self.kerberos) # # api.authenticate(request, auth_info, auth_context) # self.assertEqual(self.default_domain_user['id'], # auth_context['user_id']) # class TestAuthJSONExternal(test_v3.RestfulTestCase): content_type = 'json' def auth_plugin_config_override(self, methods=None, **method_classes): self.config_fixture.config(group='auth', methods=[]) def test_remote_user_no_method(self): app = self.loadapp() with app.test_client() as c: c.environ_base.update( self.build_external_auth_environ( self.default_domain_user['name'] ) ) auth_data = self.build_authentication_request() c.post( '/v3/auth/tokens', json=auth_data, expected_status_code=http.client.UNAUTHORIZED, ) class TrustAPIBehavior(test_v3.RestfulTestCase): """Redelegation valid and secure. Redelegation is a hierarchical structure of trusts between initial trustor and a group of users allowed to impersonate trustor and act in his name. Hierarchy is created in a process of trusting already trusted permissions and organized as an adjacency list using 'redelegated_trust_id' field. Redelegation is valid if each subsequent trust in a chain passes 'not more' permissions than being redelegated. Trust constraints are: * roles - set of roles trusted by trustor * expiration_time * allow_redelegation - a flag * redelegation_count - decreasing value restricting length of trust chain * remaining_uses - DISALLOWED when allow_redelegation == True Trust becomes invalid in case: * trust roles were revoked from trustor * one of the users in the delegation chain was disabled or deleted * expiration time passed * one of the parent trusts has become invalid * one of the parent trusts was deleted """ def config_overrides(self): super().config_overrides() self.config_fixture.config( group='trust', allow_redelegation=True, max_redelegation_count=10 ) def setUp(self): super().setUp() # Create a trustee to delegate stuff to self.trustee_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) # trustor->trustee self.redelegated_trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], allow_redelegation=True, ) # trustor->trustee (no redelegation) self.chained_trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, role_ids=[self.role_id], allow_redelegation=True, ) def _get_trust_token(self, trust): trust_id = trust['id'] auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust_id, ) trust_token = self.get_requested_token(auth_data) return trust_token def test_depleted_redelegation_count_error(self): self.redelegated_trust_ref['redelegation_count'] = 0 r = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref} ) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust. self.post( '/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http.client.FORBIDDEN, ) def test_modified_redelegation_count_error(self): r = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref} ) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust with incorrect # redelegation_count. correct = trust['redelegation_count'] - 1 incorrect = correct - 1 self.chained_trust_ref['redelegation_count'] = incorrect self.post( '/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http.client.FORBIDDEN, ) def test_max_redelegation_count_constraint(self): incorrect = CONF.trust.max_redelegation_count + 1 self.redelegated_trust_ref['redelegation_count'] = incorrect self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}, expected_status=http.client.FORBIDDEN, ) def test_redelegation_expiry(self): r = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref} ) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust supposed to last longer # than the parent trust: let's give it 10 minutes (>1 minute). too_long_live_chained_trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=10), role_ids=[self.role_id], ) self.post( '/OS-TRUST/trusts', body={'trust': too_long_live_chained_trust_ref}, token=trust_token, expected_status=http.client.FORBIDDEN, ) def test_redelegation_remaining_uses(self): r = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref} ) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Attempt to create a redelegated trust with remaining_uses defined. # It must fail according to specification: remaining_uses must be # omitted for trust redelegation. Any number here. self.chained_trust_ref['remaining_uses'] = 5 self.post( '/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http.client.BAD_REQUEST, ) def test_roles_subset(self): # Build second role role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # assign a new role to the user PROVIDERS.assignment_api.create_grant( role_id=role['id'], user_id=self.user_id, project_id=self.project_id, ) # Create first trust with extended set of roles ref = self.redelegated_trust_ref ref['expires_at'] = ( timeutils.utcnow().replace(year=2032).strftime(unit.TIME_FORMAT) ) ref['roles'].append({'id': role['id']}) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # Trust created with exact set of roles (checked by role id) role_id_set = {r['id'] for r in ref['roles']} trust_role_id_set = {r['id'] for r in trust['roles']} self.assertEqual(role_id_set, trust_role_id_set) trust_token = self._get_trust_token(trust) # Chain second trust with roles subset self.chained_trust_ref['expires_at'] = ( timeutils.utcnow().replace(year=2028).strftime(unit.TIME_FORMAT) ) r = self.post( '/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, ) trust2 = self.assertValidTrustResponse(r) # First trust contains roles superset # Second trust contains roles subset role_id_set1 = {r['id'] for r in trust['roles']} role_id_set2 = {r['id'] for r in trust2['roles']} self.assertThat(role_id_set1, matchers.GreaterThan(role_id_set2)) def test_trust_with_implied_roles(self): # Create some roles role1 = unit.new_role_ref() PROVIDERS.role_api.create_role(role1['id'], role1) role2 = unit.new_role_ref() PROVIDERS.role_api.create_role(role2['id'], role2) # Implication PROVIDERS.role_api.create_implied_role(role1['id'], role2['id']) # Assign new roles to the user (with role2 implied) PROVIDERS.assignment_api.create_grant( role_id=role1['id'], user_id=self.user_id, project_id=self.project_id, ) # Create trust ref = self.redelegated_trust_ref ref['roles'] = [{'id': role1['id']}, {'id': role2['id']}] resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) # Trust created with exact set of roles (checked by role id) role_ids = [r['id'] for r in ref['roles']] trust_role_ids = [r['id'] for r in trust['roles']] # Compare requested roles with roles in response self.assertEqual(role_ids, trust_role_ids) # Get a trust-scoped token auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) resp = self.post('/auth/tokens', body=auth_data) trust_token_role_ids = [r['id'] for r in resp.json['token']['roles']] # Compare requested roles with roles given in token data self.assertEqual(sorted(role_ids), sorted(trust_token_role_ids)) def test_redelegate_with_role_by_name(self): # For role by name testing ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_names=[self.role['name']], allow_redelegation=True, ) ref['expires_at'] = ( timeutils.utcnow().replace(year=2032).strftime(unit.TIME_FORMAT) ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # Ensure we can get a token with this trust trust_token = self._get_trust_token(trust) # Chain second trust with roles subset ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, role_names=[self.role['name']], allow_redelegation=True, ) ref['expires_at'] = ( timeutils.utcnow().replace(year=2028).strftime(unit.TIME_FORMAT) ) r = self.post( '/OS-TRUST/trusts', body={'trust': ref}, token=trust_token ) trust = self.assertValidTrustResponse(r) # Ensure we can get a token with this trust self._get_trust_token(trust) def test_redelegate_new_role_fails(self): r = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref} ) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Build second trust with a role not in parent's roles role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # assign a new role to the user PROVIDERS.assignment_api.create_grant( role_id=role['id'], user_id=self.user_id, project_id=self.project_id, ) # Try to chain a trust with the role not from parent trust self.chained_trust_ref['roles'] = [{'id': role['id']}] # Bypass policy enforcement with mock.patch.object(policy, 'enforce', return_value=True): self.post( '/OS-TRUST/trusts', body={'trust': self.chained_trust_ref}, token=trust_token, expected_status=http.client.FORBIDDEN, ) def test_redelegation_terminator(self): self.redelegated_trust_ref['expires_at'] = ( timeutils.utcnow().replace(year=2032).strftime(unit.TIME_FORMAT) ) r = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref} ) trust = self.assertValidTrustResponse(r) trust_token = self._get_trust_token(trust) # Build second trust - the terminator self.chained_trust_ref['expires_at'] = ( timeutils.utcnow().replace(year=2028).strftime(unit.TIME_FORMAT) ) ref = dict( self.chained_trust_ref, redelegation_count=1, allow_redelegation=False, ) r = self.post( '/OS-TRUST/trusts', body={'trust': ref}, token=trust_token ) trust = self.assertValidTrustResponse(r) # Check that allow_redelegation == False caused redelegation_count # to be set to 0, while allow_redelegation is removed self.assertNotIn('allow_redelegation', trust) self.assertEqual(0, trust['redelegation_count']) trust_token = self._get_trust_token(trust) # Build third trust, same as second self.post( '/OS-TRUST/trusts', body={'trust': ref}, token=trust_token, expected_status=http.client.FORBIDDEN, ) def test_redelegation_without_impersonation(self): # Update trust to not allow impersonation self.redelegated_trust_ref['impersonation'] = False # Create trust resp = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref}, expected_status=http.client.CREATED, ) trust = self.assertValidTrustResponse(resp) # Get trusted token without impersonation auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) trust_token = self.get_requested_token(auth_data) # Create second user for redelegation trustee_user_2 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) # Trust for redelegation trust_ref_2 = unit.new_trust_ref( trustor_user_id=self.trustee_user['id'], trustee_user_id=trustee_user_2['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], allow_redelegation=False, ) # Creating a second trust should not be allowed since trustor does not # have the role to delegate thus returning 404 NOT FOUND. resp = self.post( '/OS-TRUST/trusts', body={'trust': trust_ref_2}, token=trust_token, expected_status=http.client.NOT_FOUND, ) def test_create_unscoped_trust(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) self.assertValidTrustResponse(r, ref) def test_create_trust_no_roles(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.FORBIDDEN, ) def _initialize_test_consume_trust(self, count): # Make sure remaining_uses is decremented as we consume the trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, remaining_uses=count, role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) # make sure the trust exists trust = self.assertValidTrustResponse(r, ref) r = self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']) ) # get a token for the trustee auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], ) r = self.v3_create_token(auth_data) token = r.headers.get('X-Subject-Token') # get a trust token, consume one use auth_data = self.build_authentication_request( token=token, trust_id=trust['id'] ) r = self.v3_create_token(auth_data) return trust def test_authenticate_without_trust_dict_returns_bad_request(self): # Authenticate for a token to use in the request token = self.v3_create_token( self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], ) ).headers.get('X-Subject-Token') auth_data = { 'auth': { 'identity': {'methods': ['token'], 'token': {'id': token}}, # We don't need a trust to execute this test, the # OS-TRUST:trust key of the request body just has to be a # string instead of a dictionary in order to throw a 500 when # it should a 400 Bad Request. 'scope': {'OS-TRUST:trust': ''}, } } self.admin_request( method='POST', path='/v3/auth/tokens', body=auth_data, expected_status=http.client.BAD_REQUEST, ) def test_consume_trust_once(self): trust = self._initialize_test_consume_trust(2) # check decremented value r = self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']) ) trust = r.result.get('trust') self.assertIsNotNone(trust) self.assertEqual(1, trust['remaining_uses']) self.assertEqual(self.role['name'], trust['roles'][0]['name']) self.assertEqual(self.role['id'], trust['roles'][0]['id']) def test_create_one_time_use_trust(self): trust = self._initialize_test_consume_trust(1) # No more uses, the trust is made unavailable self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']), expected_status=http.client.NOT_FOUND, ) # this time we can't get a trust token auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_create_unlimited_use_trust(self): # by default trusts are unlimited in terms of tokens that can be # generated from them, this test creates such a trust explicitly ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, remaining_uses=None, role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) r = self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']) ) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], ) r = self.v3_create_token(auth_data) token = r.headers.get('X-Subject-Token') auth_data = self.build_authentication_request( token=token, trust_id=trust['id'] ) r = self.v3_create_token(auth_data) r = self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']) ) trust = r.result.get('trust') self.assertIsNone(trust['remaining_uses']) def test_impersonation_token_cannot_create_new_trust(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) trust_token = self.get_requested_token(auth_data) # Build second trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, token=trust_token, expected_status=http.client.FORBIDDEN, ) def test_trust_deleted_grant(self): # create a new role role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) grant_url = ( '/projects/%(project_id)s/users/%(user_id)s/' 'roles/%(role_id)s' % { 'project_id': self.project_id, 'user_id': self.user_id, 'role_id': role['id'], } ) # assign a new role self.put(grant_url) # create a trust that delegates the new role ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[role['id']], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # delete the grant self.delete(grant_url) # attempt to get a trust token with the deleted grant # and ensure it's unauthorized auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) r = self.v3_create_token( auth_data, expected_status=http.client.FORBIDDEN ) def test_trust_chained(self): """Test that a trust token can't be used to execute another trust. To do this, we create an A->B->C hierarchy of trusts, then attempt to execute the trusts in series (C->B->A). """ # create a sub-trustee user sub_trustee_user = unit.create_user( PROVIDERS.identity_api, domain_id=test_v3.DEFAULT_DOMAIN_ID ) sub_trustee_user_id = sub_trustee_user['id'] # create a new role role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # assign the new role to trustee self.put( '/projects/%(project_id)s/users/%(user_id)s/roles/%(role_id)s' % { 'project_id': self.project_id, 'user_id': self.trustee_user['id'], 'role_id': role['id'], } ) # create a trust from trustor -> trustee ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust1 = self.assertValidTrustResponse(r) # authenticate as trustee so we can create a second trust auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], project_id=self.project_id, ) token = self.get_requested_token(auth_data) # create a trust from trustee -> sub-trustee ref = unit.new_trust_ref( trustor_user_id=self.trustee_user['id'], trustee_user_id=sub_trustee_user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[role['id']], ) r = self.post('/OS-TRUST/trusts', token=token, body={'trust': ref}) trust2 = self.assertValidTrustResponse(r) # authenticate as sub-trustee and get a trust token auth_data = self.build_authentication_request( user_id=sub_trustee_user['id'], password=sub_trustee_user['password'], trust_id=trust2['id'], ) trust_token = self.get_requested_token(auth_data) # attempt to get the second trust using a trust token auth_data = self.build_authentication_request( token=trust_token, trust_id=trust1['id'] ) r = self.v3_create_token( auth_data, expected_status=http.client.FORBIDDEN ) def assertTrustTokensRevoked(self, trust_id): revocation_response = self.get('/OS-REVOKE/events') revocation_events = revocation_response.json_body['events'] found = False for event in revocation_events: if event.get('OS-TRUST:trust_id') == trust_id: found = True self.assertTrue( found, 'event with trust_id %s not found in list' % trust_id ) def test_delete_trust_revokes_tokens(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) trust_id = trust['id'] auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust_id, ) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r, self.trustee_user) trust_token = r.headers['X-Subject-Token'] self.delete(f'/OS-TRUST/trusts/{trust_id}') headers = {'X-Subject-Token': trust_token} self.head( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) self.assertTrustTokensRevoked(trust_id) def disable_user(self, user): user['enabled'] = False PROVIDERS.identity_api.update_user(user['id'], user) def test_trust_get_token_fails_if_trustor_disabled(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) self.v3_create_token(auth_data) self.disable_user(self.user) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) self.v3_create_token(auth_data, expected_status=http.client.FORBIDDEN) def test_trust_get_token_fails_if_trustee_disabled(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) self.v3_create_token(auth_data) self.disable_user(self.trustee_user) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_delete_trust(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) self.delete('/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id'])) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_change_password_invalidates_trust_tokens(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r, self.user) trust_token = r.headers.get('X-Subject-Token') self.get( '/OS-TRUST/trusts?trustor_user_id=%s' % self.user_id, token=trust_token, ) self.assertValidUserResponse( self.patch( '/users/%s' % self.trustee_user['id'], body={'user': {'password': uuid.uuid4().hex}}, ) ) self.get( '/OS-TRUST/trusts?trustor_user_id=%s' % self.user_id, expected_status=http.client.UNAUTHORIZED, token=trust_token, ) def test_trustee_can_do_role_ops(self): resp = self.post( '/OS-TRUST/trusts', body={'trust': self.redelegated_trust_ref} ) trust = self.assertValidTrustResponse(resp) trust_token = self._get_trust_token(trust) resp = self.get( '/OS-TRUST/trusts/{trust_id}/roles'.format(trust_id=trust['id']), token=trust_token, ) self.assertValidRoleListResponse(resp, self.role) self.head( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {'trust_id': trust['id'], 'role_id': self.role['id']}, token=trust_token, expected_status=http.client.OK, ) resp = self.get( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {'trust_id': trust['id'], 'role_id': self.role['id']}, token=trust_token, ) self.assertValidRoleResponse(resp, self.role) def test_do_not_consume_remaining_uses_when_get_token_fails(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user['id'], project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], remaining_uses=3, ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) new_trust = r.result.get('trust') trust_id = new_trust.get('id') # Pass in another user's ID as the trustee, the result being a failed # token authenticate and the remaining_uses of the trust should not be # decremented. auth_data = self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], trust_id=trust_id, ) self.v3_create_token(auth_data, expected_status=http.client.FORBIDDEN) r = self.get('/OS-TRUST/trusts/%s' % trust_id) self.assertEqual(3, r.result.get('trust').get('remaining_uses')) class TestTrustChain(test_v3.RestfulTestCase): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='trust', allow_redelegation=True, max_redelegation_count=10 ) def setUp(self): super().setUp() """Create a trust chain using redelegation. A trust chain is a series of trusts that are redelegated. For example, self.user_list consists of userA, userB, and userC. The first trust in the trust chain is going to be established between self.user and userA, call it trustA. Then, userA is going to obtain a trust scoped token using trustA, and with that token create a trust between userA and userB called trustB. This pattern will continue with userB creating a trust with userC. So the trust chain should look something like: trustA -> trustB -> trustC Where: self.user is trusting userA with trustA userA is trusting userB with trustB userB is trusting userC with trustC """ self.user_list = list() self.trust_chain = list() for _ in range(3): user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) self.user_list.append(user) # trustor->trustee redelegation with impersonation trustee = self.user_list[0] trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=trustee['id'], project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], allow_redelegation=True, redelegation_count=3, ) # Create a trust between self.user and the first user in the list r = self.post('/OS-TRUST/trusts', body={'trust': trust_ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id'], ) # Generate a trusted token for the first user trust_token = self.get_requested_token(auth_data) self.trust_chain.append(trust) # Loop through the user to create a chain of redelegated trust. for next_trustee in self.user_list[1:]: trust_ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=next_trustee['id'], project_id=self.project_id, impersonation=True, role_ids=[self.role_id], allow_redelegation=True, ) r = self.post( '/OS-TRUST/trusts', body={'trust': trust_ref}, token=trust_token, ) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=next_trustee['id'], password=next_trustee['password'], trust_id=trust['id'], ) trust_token = self.get_requested_token(auth_data) self.trust_chain.append(trust) trustee = self.user_list[-1] trust = self.trust_chain[-1] auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'], trust_id=trust['id'], ) self.last_token = self.get_requested_token(auth_data) def assert_user_authenticate(self, user): auth_data = self.build_authentication_request( user_id=user['id'], password=user['password'] ) r = self.v3_create_token(auth_data) self.assertValidTokenResponse(r) def assert_trust_tokens_revoked(self, trust_id): trustee = self.user_list[0] auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'] ) r = self.v3_create_token(auth_data) self.assertValidTokenResponse(r) revocation_response = self.get('/OS-REVOKE/events') revocation_events = revocation_response.json_body['events'] found = False for event in revocation_events: if event.get('OS-TRUST:trust_id') == trust_id: found = True self.assertTrue( found, 'event with trust_id %s not found in list' % trust_id ) def test_delete_trust_cascade(self): self.assert_user_authenticate(self.user_list[0]) self.delete( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': self.trust_chain[0]['id']} ) headers = {'X-Subject-Token': self.last_token} self.head( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) self.assert_trust_tokens_revoked(self.trust_chain[0]['id']) def test_delete_broken_chain(self): self.assert_user_authenticate(self.user_list[0]) self.delete( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': self.trust_chain[0]['id']} ) # Verify the two remaining trust have been deleted for i in range(len(self.user_list) - 1): auth_data = self.build_authentication_request( user_id=self.user_list[i]['id'], password=self.user_list[i]['password'], ) auth_token = self.get_requested_token(auth_data) # Assert chained trust have been deleted self.get( '/OS-TRUST/trusts/%(trust_id)s' % {'trust_id': self.trust_chain[i + 1]['id']}, token=auth_token, expected_status=http.client.NOT_FOUND, ) def test_trustor_roles_revoked(self): self.assert_user_authenticate(self.user_list[0]) PROVIDERS.assignment_api.remove_role_from_user_and_project( self.user_id, self.project_id, self.role_id ) # Verify that users are not allowed to authenticate with trust for i in range(len(self.user_list[1:])): trustee = self.user_list[i] auth_data = self.build_authentication_request( user_id=trustee['id'], password=trustee['password'] ) # Attempt to authenticate with trust token = self.get_requested_token(auth_data) auth_data = self.build_authentication_request( token=token, trust_id=self.trust_chain[i - 1]['id'] ) # Trustee has no delegated roles self.v3_create_token( auth_data, expected_status=http.client.FORBIDDEN ) def test_intermediate_user_disabled(self): self.assert_user_authenticate(self.user_list[0]) disabled = self.user_list[0] disabled['enabled'] = False PROVIDERS.identity_api.update_user(disabled['id'], disabled) # Bypass policy enforcement with mock.patch.object(policy, 'enforce', return_value=True): headers = {'X-Subject-Token': self.last_token} self.head( '/auth/tokens', headers=headers, expected_status=http.client.FORBIDDEN, ) def test_intermediate_user_deleted(self): self.assert_user_authenticate(self.user_list[0]) PROVIDERS.identity_api.delete_user(self.user_list[0]['id']) # Bypass policy enforcement # Delete trustee will invalidate the trust. with mock.patch.object(policy, 'enforce', return_value=True): headers = {'X-Subject-Token': self.last_token} self.head( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) class TestAuthContext(unit.TestCase): def setUp(self): super().setUp() self.auth_context = auth.core.AuthContext() def test_pick_lowest_expires_at(self): expires_at_1 = utils.isotime(timeutils.utcnow()) expires_at_2 = utils.isotime( timeutils.utcnow() + datetime.timedelta(seconds=10) ) # make sure auth_context picks the lowest value self.auth_context['expires_at'] = expires_at_1 self.auth_context['expires_at'] = expires_at_2 self.assertEqual(expires_at_1, self.auth_context['expires_at']) def test_identity_attribute_conflict(self): for identity_attr in auth.core.AuthContext.IDENTITY_ATTRIBUTES: self.auth_context[identity_attr] = uuid.uuid4().hex if identity_attr == 'expires_at': # 'expires_at' is a special case. Will test it in a separate # test case. continue self.assertRaises( exception.Unauthorized, operator.setitem, self.auth_context, identity_attr, uuid.uuid4().hex, ) def test_identity_attribute_conflict_with_none_value(self): for identity_attr in auth.core.AuthContext.IDENTITY_ATTRIBUTES: self.auth_context[identity_attr] = None if identity_attr == 'expires_at': # 'expires_at' is a special case and is tested above. self.auth_context['expires_at'] = uuid.uuid4().hex continue self.assertRaises( exception.Unauthorized, operator.setitem, self.auth_context, identity_attr, uuid.uuid4().hex, ) def test_non_identity_attribute_conflict_override(self): # for attributes Keystone doesn't know about, make sure they can be # freely manipulated attr_name = uuid.uuid4().hex attr_val_1 = uuid.uuid4().hex attr_val_2 = uuid.uuid4().hex self.auth_context[attr_name] = attr_val_1 self.auth_context[attr_name] = attr_val_2 self.assertEqual(attr_val_2, self.auth_context[attr_name]) class TestAuthSpecificData(test_v3.RestfulTestCase): def test_get_catalog_with_project_scoped_token(self): """Call ``GET /auth/catalog`` with a project-scoped token.""" r = self.get('/auth/catalog', expected_status=http.client.OK) self.assertValidCatalogResponse(r) def test_head_catalog_with_project_scoped_token(self): """Call ``HEAD /auth/catalog`` with a project-scoped token.""" self.head('/auth/catalog', expected_status=http.client.OK) def test_get_catalog_with_domain_scoped_token(self): """Call ``GET /auth/catalog`` with a domain-scoped token.""" # grant a domain role to a user self.put( path='/domains/%s/users/%s/roles/%s' % (self.domain['id'], self.user['id'], self.role['id']) ) self.get( '/auth/catalog', auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id'], ), expected_status=http.client.FORBIDDEN, ) def test_head_catalog_with_domain_scoped_token(self): """Call ``HEAD /auth/catalog`` with a domain-scoped token.""" # grant a domain role to a user self.put( path='/domains/%s/users/%s/roles/%s' % (self.domain['id'], self.user['id'], self.role['id']) ) self.head( '/auth/catalog', auth=self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id'], ), expected_status=http.client.FORBIDDEN, ) def test_get_catalog_with_unscoped_token(self): """Call ``GET /auth/catalog`` with an unscoped token.""" self.get( '/auth/catalog', auth=self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], ), expected_status=http.client.FORBIDDEN, ) def test_head_catalog_with_unscoped_token(self): """Call ``HEAD /auth/catalog`` with an unscoped token.""" self.head( '/auth/catalog', auth=self.build_authentication_request( user_id=self.default_domain_user['id'], password=self.default_domain_user['password'], ), expected_status=http.client.FORBIDDEN, ) def test_get_catalog_no_token(self): """Call ``GET /auth/catalog`` without a token.""" self.get( '/auth/catalog', noauth=True, expected_status=http.client.UNAUTHORIZED, ) def test_head_catalog_no_token(self): """Call ``HEAD /auth/catalog`` without a token.""" self.head( '/auth/catalog', noauth=True, expected_status=http.client.UNAUTHORIZED, ) def test_get_projects_with_project_scoped_token(self): r = self.get('/auth/projects', expected_status=http.client.OK) self.assertThat(r.json['projects'], matchers.HasLength(1)) self.assertValidProjectListResponse(r) def test_head_projects_with_project_scoped_token(self): self.head('/auth/projects', expected_status=http.client.OK) def test_get_projects_matches_federated_get_projects(self): # create at least one addition project to make sure it doesn't end up # in the response, since the user doesn't have any authorization on it ref = unit.new_project_ref(domain_id=CONF.identity.default_domain_id) r = self.post('/projects', body={'project': ref}) unauthorized_project_id = r.json['project']['id'] r = self.get('/auth/projects', expected_status=http.client.OK) self.assertThat(r.json['projects'], matchers.HasLength(1)) for project in r.json['projects']: self.assertNotEqual(unauthorized_project_id, project['id']) expected_project_id = r.json['projects'][0]['id'] # call GET /v3/OS-FEDERATION/projects r = self.get('/OS-FEDERATION/projects', expected_status=http.client.OK) # make sure the response is the same self.assertThat(r.json['projects'], matchers.HasLength(1)) for project in r.json['projects']: self.assertEqual(expected_project_id, project['id']) def test_get_domains_matches_federated_get_domains(self): # create at least one addition domain to make sure it doesn't end up # in the response, since the user doesn't have any authorization on it ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': ref}) unauthorized_domain_id = r.json['domain']['id'] ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': ref}) authorized_domain_id = r.json['domain']['id'] path = '/domains/{domain_id}/users/{user_id}/roles/{role_id}'.format( domain_id=authorized_domain_id, user_id=self.user_id, role_id=self.role_id, ) self.put(path, expected_status=http.client.NO_CONTENT) r = self.get('/auth/domains', expected_status=http.client.OK) self.assertThat(r.json['domains'], matchers.HasLength(1)) self.assertEqual(authorized_domain_id, r.json['domains'][0]['id']) self.assertNotEqual(unauthorized_domain_id, r.json['domains'][0]['id']) # call GET /v3/OS-FEDERATION/domains r = self.get('/OS-FEDERATION/domains', expected_status=http.client.OK) # make sure the response is the same self.assertThat(r.json['domains'], matchers.HasLength(1)) self.assertEqual(authorized_domain_id, r.json['domains'][0]['id']) self.assertNotEqual(unauthorized_domain_id, r.json['domains'][0]['id']) def test_get_domains_with_project_scoped_token(self): self.put( path='/domains/%s/users/%s/roles/%s' % (self.domain['id'], self.user['id'], self.role['id']) ) r = self.get('/auth/domains', expected_status=http.client.OK) self.assertThat(r.json['domains'], matchers.HasLength(1)) self.assertValidDomainListResponse(r) def test_head_domains_with_project_scoped_token(self): self.put( path='/domains/%s/users/%s/roles/%s' % (self.domain['id'], self.user['id'], self.role['id']) ) self.head('/auth/domains', expected_status=http.client.OK) def test_get_system_roles_with_unscoped_token(self): path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=self.role_id, ) self.put(path=path) unscoped_request = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.post('/auth/tokens', body=unscoped_request) unscoped_token = r.headers.get('X-Subject-Token') self.assertValidUnscopedTokenResponse(r) response = self.get('/auth/system', token=unscoped_token) self.assertTrue(response.json_body['system'][0]['all']) self.head( '/auth/system', token=unscoped_token, expected_status=http.client.OK, ) def test_get_system_roles_returns_empty_list_without_system_roles(self): # A user without a system role assignment shouldn't expect an empty # list when calling /v3/auth/system regardless of calling the API with # an unscoped token or a project-scoped token. unscoped_request = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'] ) r = self.post('/auth/tokens', body=unscoped_request) unscoped_token = r.headers.get('X-Subject-Token') self.assertValidUnscopedTokenResponse(r) response = self.get('/auth/system', token=unscoped_token) self.assertEqual(response.json_body['system'], []) self.head( '/auth/system', token=unscoped_token, expected_status=http.client.OK, ) project_scoped_request = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project_id, ) r = self.post('/auth/tokens', body=project_scoped_request) project_scoped_token = r.headers.get('X-Subject-Token') self.assertValidProjectScopedTokenResponse(r) response = self.get('/auth/system', token=project_scoped_token) self.assertEqual(response.json_body['system'], []) self.head( '/auth/system', token=project_scoped_token, expected_status=http.client.OK, ) def test_get_system_roles_with_project_scoped_token(self): path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=self.role_id, ) self.put(path=path) self.put( path='/domains/%s/users/%s/roles/%s' % (self.domain['id'], self.user['id'], self.role['id']) ) domain_scoped_request = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain['id'], ) r = self.post('/auth/tokens', body=domain_scoped_request) domain_scoped_token = r.headers.get('X-Subject-Token') self.assertValidDomainScopedTokenResponse(r) response = self.get('/auth/system', token=domain_scoped_token) self.assertTrue(response.json_body['system'][0]['all']) self.head( '/auth/system', token=domain_scoped_token, expected_status=http.client.OK, ) def test_get_system_roles_with_domain_scoped_token(self): path = '/system/users/{user_id}/roles/{role_id}'.format( user_id=self.user['id'], role_id=self.role_id, ) self.put(path=path) project_scoped_request = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project_id, ) r = self.post('/auth/tokens', body=project_scoped_request) project_scoped_token = r.headers.get('X-Subject-Token') self.assertValidProjectScopedTokenResponse(r) response = self.get('/auth/system', token=project_scoped_token) self.assertTrue(response.json_body['system'][0]['all']) self.head( '/auth/system', token=project_scoped_token, expected_status=http.client.OK, ) class TestTrustAuthFernetTokenProvider(TrustAPIBehavior, TestTrustChain): def config_overrides(self): super().config_overrides() self.config_fixture.config( group='token', provider='fernet', revoke_by_id=False ) self.config_fixture.config(group='trust') self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) class TestAuthTOTP(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) ref = unit.new_totp_credential( user_id=self.default_domain_user['id'], project_id=self.default_domain_project['id'], ) self.secret = ref['blob'] r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) self.addCleanup(self.cleanup) def auth_plugin_config_override(self): methods = ['totp', 'token', 'password'] super().auth_plugin_config_override(methods) def _make_credentials( self, cred_type, count=1, user_id=None, project_id=None, blob=None ): user_id = user_id or self.default_domain_user['id'] project_id = project_id or self.default_domain_project['id'] creds = [] for __ in range(count): if cred_type == 'totp': ref = unit.new_totp_credential( user_id=user_id, project_id=project_id, blob=blob ) else: ref = unit.new_credential_ref( user_id=user_id, project_id=project_id ) resp = self.post('/credentials', body={'credential': ref}) creds.append(resp.json['credential']) return creds def _make_auth_data_by_id(self, passcode, user_id=None): return self.build_authentication_request( user_id=user_id or self.default_domain_user['id'], passcode=passcode, project_id=self.project['id'], ) def _make_auth_data_by_name(self, passcode, username, user_domain_id): return self.build_authentication_request( username=username, user_domain_id=user_domain_id, passcode=passcode, project_id=self.project['id'], ) def cleanup(self): totp_creds = PROVIDERS.credential_api.list_credentials_for_user( self.default_domain_user['id'], type='totp' ) other_creds = PROVIDERS.credential_api.list_credentials_for_user( self.default_domain_user['id'], type='other' ) for cred in itertools.chain(other_creds, totp_creds): self.delete( '/credentials/%s' % cred['id'], expected_status=http.client.NO_CONTENT, ) def test_with_a_valid_passcode(self): creds = self._make_credentials('totp') secret = creds[-1]['blob'] # Stop the clock otherwise there is a chance of auth failure due to # getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0] ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_with_an_expired_passcode(self): creds = self._make_credentials('totp') secret = creds[-1]['blob'] past = timeutils.utcnow() - datetime.timedelta(minutes=2) with freezegun.freeze_time(past): auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0] ) # Stop the clock otherwise there is a chance of accidental success due # to getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_with_an_expired_passcode_no_previous_windows(self): self.config_fixture.config(group='totp', included_previous_windows=0) creds = self._make_credentials('totp') secret = creds[-1]['blob'] past = timeutils.utcnow() - datetime.timedelta(seconds=30) with freezegun.freeze_time(past): auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0] ) # Stop the clock otherwise there is a chance of accidental success due # to getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_with_passcode_no_previous_windows(self): self.config_fixture.config(group='totp', included_previous_windows=0) creds = self._make_credentials('totp') secret = creds[-1]['blob'] auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0] ) # Stop the clock otherwise there is a chance of auth failure due to # getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_with_passcode_in_previous_windows_default(self): """Confirm previous window default of 1 works.""" creds = self._make_credentials('totp') secret = creds[-1]['blob'] past = timeutils.utcnow() - datetime.timedelta(seconds=30) with freezegun.freeze_time(past): auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0] ) # Stop the clock otherwise there is a chance of auth failure due to # getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_with_passcode_in_previous_windows_extended(self): self.config_fixture.config(group='totp', included_previous_windows=4) creds = self._make_credentials('totp') secret = creds[-1]['blob'] past = timeutils.utcnow() - datetime.timedelta(minutes=2) self.useFixture(fixture.TimeFixture(past)) auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0] ) # Stop the clock otherwise there is a chance of auth failure due to # getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_with_an_invalid_passcode_and_user_credentials(self): self._make_credentials('totp') auth_data = self._make_auth_data_by_id('000000') self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_with_an_invalid_passcode_with_no_user_credentials(self): auth_data = self._make_auth_data_by_id('000000') self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_with_a_corrupt_totp_credential(self): self._make_credentials('totp', count=1, blob='0') auth_data = self._make_auth_data_by_id('000000') self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_with_multiple_credentials(self): self._make_credentials('other', 3) creds = self._make_credentials('totp', count=3) secret = creds[-1]['blob'] # Stop the clock otherwise there is a chance of auth failure due to # getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0] ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_with_multiple_users(self): # make some credentials for the existing user self._make_credentials('totp', count=3) # create a new user and their credentials user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=user['id'], project_id=self.project['id'] ) creds = self._make_credentials('totp', count=1, user_id=user['id']) secret = creds[-1]['blob'] # Stop the clock otherwise there is a chance of auth failure due to # getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0], user_id=user['id'] ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_with_multiple_users_and_invalid_credentials(self): """Prevent logging in with someone else's credentials. It's very easy to forget to limit the credentials query by user. Let's just test it for a sanity check. """ # make some credentials for the existing user self._make_credentials('totp', count=3) # create a new user and their credentials new_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=new_user['id'], project_id=self.project['id'], ) user2_creds = self._make_credentials( 'totp', count=1, user_id=new_user['id'] ) user_id = self.default_domain_user['id'] # user1 secret = user2_creds[-1]['blob'] auth_data = self._make_auth_data_by_id( totp._generate_totp_passcodes(secret)[0], user_id=user_id ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_with_username_and_domain_id(self): creds = self._make_credentials('totp') secret = creds[-1]['blob'] # Stop the clock otherwise there is a chance of auth failure due to # getting a different TOTP between the call here and the call in the # auth plugin. self.useFixture(fixture.TimeFixture()) auth_data = self._make_auth_data_by_name( totp._generate_totp_passcodes(secret)[0], username=self.default_domain_user['name'], user_domain_id=self.default_domain_user['domain_id'], ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_generated_passcode_is_correct_format(self): secret = self._make_credentials('totp')[-1]['blob'] passcode = totp._generate_totp_passcodes(secret)[0] reg = re.compile(r'^-?[0-9]+$') self.assertTrue(reg.match(passcode)) class TestFetchRevocationList(test_v3.RestfulTestCase): """Test fetch token revocation list on the v3 Identity API.""" def config_overrides(self): super().config_overrides() self.config_fixture.config(group='token', revoke_by_id=True) def test_get_ids_no_tokens_returns_forbidden(self): # NOTE(vishakha): Since this API is deprecated and isn't supported. # Returning a 403 till API is removed. If API is removed a 410 # can be returned. self.get( '/auth/tokens/OS-PKI/revoked', expected_status=http.client.FORBIDDEN, ) def test_head_ids_no_tokens_returns_forbidden(self): # NOTE(vishakha): Since this API is deprecated and isn't supported. # Returning a 403 till API is removed. If API is removed a 410 # can be returned. self.head( '/auth/tokens/OS-PKI/revoked', expected_status=http.client.FORBIDDEN, ) class ApplicationCredentialAuth(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.app_cred_api = PROVIDERS.application_credential_api def config_overrides(self): super().config_overrides() self.auth_plugin_config_override( methods=['application_credential', 'password', 'token'] ) def _make_app_cred(self, expires=None, access_rules=None): roles = [{'id': self.role_id}] data = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'secret': uuid.uuid4().hex, 'user_id': self.user['id'], 'project_id': self.project['id'], 'description': uuid.uuid4().hex, 'roles': roles, } if expires: data['expires_at'] = expires if access_rules: data['access_rules'] = access_rules return data def _validate_token( self, token, headers=None, expected_status=http.client.OK ): path = '/v3/auth/tokens' headers = headers or {} headers.update({'X-Auth-Token': token, 'X-Subject-Token': token}) with self.test_client() as c: resp = c.get( path, headers=headers, expected_status_code=expected_status ) return resp def test_valid_application_credential_succeeds(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_validate_application_credential_token_populates_restricted(self): self.config_fixture.config(group='token', cache_on_issue=False) app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) auth_response = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) self.assertTrue( auth_response.json['token']['application_credential']['restricted'] ) token_id = auth_response.headers.get('X-Subject-Token') headers = {'X-Auth-Token': token_id, 'X-Subject-Token': token_id} validate_response = self.get('/auth/tokens', headers=headers).json_body self.assertTrue( validate_response['token']['application_credential']['restricted'] ) def test_valid_application_credential_with_name_succeeds(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_name=app_cred_ref['name'], secret=app_cred_ref['secret'], user_id=self.user['id'], ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_valid_application_credential_name_and_username_succeeds(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_name=app_cred_ref['name'], secret=app_cred_ref['secret'], username=self.user['name'], user_domain_id=self.user['domain_id'], ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_application_credential_with_invalid_secret_fails(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret='badsecret' ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_unexpired_application_credential_succeeds(self): expires_at = timeutils.utcnow() + datetime.timedelta(minutes=1) app_cred = self._make_app_cred(expires=expires_at) app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_expired_application_credential_fails(self): expires_at = timeutils.utcnow() + datetime.timedelta(minutes=1) app_cred = self._make_app_cred(expires=expires_at) app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) future = timeutils.utcnow() + datetime.timedelta(minutes=2) with freezegun.freeze_time(future): self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_application_credential_expiration_limits_token_expiration(self): expires_at = timeutils.utcnow() + datetime.timedelta(minutes=1) app_cred = self._make_app_cred(expires=expires_at) app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) resp = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) self.assertValidTokenResponse(resp) token = resp.headers.get('X-Subject-Token') future = timeutils.utcnow() + datetime.timedelta(minutes=2) with freezegun.freeze_time(future): self._validate_token( token, expected_status=http.client.UNAUTHORIZED ) def test_application_credential_fails_when_user_deleted(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) PROVIDERS.identity_api.delete_user(self.user['id']) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token(auth_data, expected_status=http.client.NOT_FOUND) def test_application_credential_fails_when_user_disabled(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) PROVIDERS.identity_api.update_user(self.user['id'], {'enabled': False}) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_application_credential_fails_when_project_deleted(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) PROVIDERS.resource_api.delete_project(self.project['id']) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token(auth_data, expected_status=http.client.NOT_FOUND) def test_application_credential_fails_when_role_deleted(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) PROVIDERS.role_api.delete_role(self.role_id) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token(auth_data, expected_status=http.client.NOT_FOUND) def test_application_credential_fails_when_role_unassigned(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) PROVIDERS.assignment_api.remove_role_from_user_and_project( self.user['id'], self.project['id'], self.role_id ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token(auth_data, expected_status=http.client.NOT_FOUND) def test_application_credential_through_group_membership(self): user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) group1 = unit.new_group_ref(domain_id=self.domain_id) group1 = PROVIDERS.identity_api.create_group(group1) PROVIDERS.identity_api.add_user_to_group(user1['id'], group1['id']) PROVIDERS.assignment_api.create_grant( self.role_id, group_id=group1['id'], project_id=self.project_id ) app_cred = { 'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex, 'secret': uuid.uuid4().hex, 'user_id': user1['id'], 'project_id': self.project_id, 'description': uuid.uuid4().hex, 'roles': [{'id': self.role_id}], } app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) self.v3_create_token(auth_data, expected_status=http.client.CREATED) def test_application_credential_cannot_scope(self): app_cred = self._make_app_cred() app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) new_project_ref = unit.new_project_ref(domain_id=self.domain_id) # Create a new project and assign the user a valid role on it new_project = PROVIDERS.resource_api.create_project( new_project_ref['id'], new_project_ref ) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user['id'], new_project['id'], self.role_id ) # Check that a password auth would work password_auth = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=new_project['id'], ) password_response = self.v3_create_token(password_auth) self.assertValidProjectScopedTokenResponse(password_response) # Should not be able to use that scope with an application credential # even though the user has a valid assignment on it app_cred_auth = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'], project_id=new_project['id'], ) self.v3_create_token( app_cred_auth, expected_status=http.client.UNAUTHORIZED ) def test_application_credential_with_access_rules(self): access_rules = [ { 'id': uuid.uuid4().hex, 'path': '/v2.1/servers', 'method': 'POST', 'service': uuid.uuid4().hex, } ] app_cred = self._make_app_cred(access_rules=access_rules) app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) resp = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) token = resp.headers.get('X-Subject-Token') headers = {'OpenStack-Identity-Access-Rules': '1.0'} self._validate_token(token, headers=headers) def test_application_credential_access_rules_without_header_fails(self): access_rules = [ { 'id': uuid.uuid4().hex, 'path': '/v2.1/servers', 'method': 'POST', 'service': uuid.uuid4().hex, } ] app_cred = self._make_app_cred(access_rules=access_rules) app_cred_ref = self.app_cred_api.create_application_credential( app_cred ) auth_data = self.build_authentication_request( app_cred_id=app_cred_ref['id'], secret=app_cred_ref['secret'] ) resp = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) token = resp.headers.get('X-Subject-Token') self._validate_token(token, expected_status=http.client.NOT_FOUND) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867753.0 keystone-26.0.0/keystone/tests/unit/test_v3_catalog.py0000664000175000017500000012053700000000000023110 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client import uuid from testtools import matchers from keystone.common import provider_api from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs class CatalogTestCase(test_v3.RestfulTestCase): """Test service & endpoint CRUD.""" # region crud tests def test_create_region_with_id(self): """Call ``PUT /regions/{region_id}`` w/o an ID in the request body.""" ref = unit.new_region_ref() region_id = ref.pop('id') r = self.put( '/regions/%s' % region_id, body={'region': ref}, expected_status=http.client.CREATED, ) self.assertValidRegionResponse(r, ref) # Double-check that the region ID was kept as-is and not # populated with a UUID, as is the case with POST /v3/regions self.assertEqual(region_id, r.json['region']['id']) def test_create_region_with_matching_ids(self): """Call ``PUT /regions/{region_id}`` with an ID in the request body.""" ref = unit.new_region_ref() region_id = ref['id'] r = self.put( '/regions/%s' % region_id, body={'region': ref}, expected_status=http.client.CREATED, ) self.assertValidRegionResponse(r, ref) # Double-check that the region ID was kept as-is and not # populated with a UUID, as is the case with POST /v3/regions self.assertEqual(region_id, r.json['region']['id']) def test_create_region_with_duplicate_id(self): """Call ``PUT /regions/{region_id}``.""" ref = unit.new_region_ref() region_id = ref['id'] self.put( '/regions/%s' % region_id, body={'region': ref}, expected_status=http.client.CREATED, ) # Create region again with duplicate id self.put( '/regions/%s' % region_id, body={'region': ref}, expected_status=http.client.CONFLICT, ) def test_create_region(self): """Call ``POST /regions`` with an ID in the request body.""" # the ref will have an ID defined on it ref = unit.new_region_ref() r = self.post('/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) # we should be able to get the region, having defined the ID ourselves r = self.get('/regions/{region_id}'.format(region_id=ref['id'])) self.assertValidRegionResponse(r, ref) def test_create_region_with_empty_id(self): """Call ``POST /regions`` with an empty ID in the request body.""" ref = unit.new_region_ref(id='') r = self.post('/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) self.assertNotEmpty(r.result['region'].get('id')) def test_create_region_without_id(self): """Call ``POST /regions`` without an ID in the request body.""" ref = unit.new_region_ref() # instead of defining the ID ourselves... del ref['id'] # let the service define the ID r = self.post('/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) def test_create_region_without_description(self): """Call ``POST /regions`` without description in the request body.""" ref = unit.new_region_ref(description=None) del ref['description'] r = self.post('/regions', body={'region': ref}) # Create the description in the reference to compare to since the # response should now have a description, even though we didn't send # it with the original reference. ref['description'] = '' self.assertValidRegionResponse(r, ref) def test_create_regions_with_same_description_string(self): """Call ``POST /regions`` with duplicate descriptions.""" # NOTE(lbragstad): Make sure we can create two regions that have the # same description. region_desc = 'Some Region Description' ref1 = unit.new_region_ref(description=region_desc) ref2 = unit.new_region_ref(description=region_desc) resp1 = self.post('/regions', body={'region': ref1}) self.assertValidRegionResponse(resp1, ref1) resp2 = self.post('/regions', body={'region': ref2}) self.assertValidRegionResponse(resp2, ref2) def test_create_regions_without_descriptions(self): """Call ``POST /regions`` with no description.""" # NOTE(lbragstad): Make sure we can create two regions that have # no description in the request body. The description should be # populated by Catalog Manager. ref1 = unit.new_region_ref() ref2 = unit.new_region_ref() del ref1['description'] ref2['description'] = None resp1 = self.post('/regions', body={'region': ref1}) resp2 = self.post('/regions', body={'region': ref2}) # Create the descriptions in the references to compare to since the # responses should now have descriptions, even though we didn't send # a description with the original references. ref1['description'] = '' ref2['description'] = '' self.assertValidRegionResponse(resp1, ref1) self.assertValidRegionResponse(resp2, ref2) def test_create_region_with_conflicting_ids(self): """Call ``PUT /regions/{region_id}`` with conflicting region IDs.""" # the region ref is created with an ID ref = unit.new_region_ref() # but instead of using that ID, make up a new, conflicting one self.put( '/regions/%s' % uuid.uuid4().hex, body={'region': ref}, expected_status=http.client.BAD_REQUEST, ) def test_list_head_regions(self): """Call ``GET & HEAD /regions``.""" resource_url = '/regions' r = self.get(resource_url) self.assertValidRegionListResponse(r, ref=self.region) self.head(resource_url, expected_status=http.client.OK) def _create_region_with_parent_id(self, parent_id=None): ref = unit.new_region_ref(parent_region_id=parent_id) return self.post('/regions', body={'region': ref}) def test_list_regions_filtered_by_parent_region_id(self): """Call ``GET /regions?parent_region_id={parent_region_id}``.""" new_region = self._create_region_with_parent_id() parent_id = new_region.result['region']['id'] new_region = self._create_region_with_parent_id(parent_id) new_region = self._create_region_with_parent_id(parent_id) r = self.get('/regions?parent_region_id=%s' % parent_id) for region in r.result['regions']: self.assertEqual(parent_id, region['parent_region_id']) def test_get_head_region(self): """Call ``GET & HEAD /regions/{region_id}``.""" resource_url = f'/regions/{self.region_id}' r = self.get(resource_url) self.assertValidRegionResponse(r, self.region) self.head(resource_url, expected_status=http.client.OK) def test_update_region(self): """Call ``PATCH /regions/{region_id}``.""" region = unit.new_region_ref() del region['id'] r = self.patch( f'/regions/{self.region_id}', body={'region': region}, ) self.assertValidRegionResponse(r, region) def test_update_region_without_description_keeps_original(self): """Call ``PATCH /regions/{region_id}``.""" region_ref = unit.new_region_ref() resp = self.post('/regions', body={'region': region_ref}) region_updates = { # update with something that's not the description 'parent_region_id': self.region_id, } resp = self.patch( '/regions/%s' % region_ref['id'], body={'region': region_updates} ) # NOTE(dstanek): Keystone should keep the original description. self.assertEqual( region_ref['description'], resp.result['region']['description'] ) def test_update_region_with_null_description(self): """Call ``PATCH /regions/{region_id}``.""" region = unit.new_region_ref(description=None) del region['id'] r = self.patch( f'/regions/{self.region_id}', body={'region': region}, ) # NOTE(dstanek): Keystone should turn the provided None value into # an empty string before storing in the backend. region['description'] = '' self.assertValidRegionResponse(r, region) def test_delete_region(self): """Call ``DELETE /regions/{region_id}``.""" ref = unit.new_region_ref() r = self.post('/regions', body={'region': ref}) self.assertValidRegionResponse(r, ref) self.delete('/regions/{region_id}'.format(region_id=ref['id'])) # service crud tests def test_create_service(self): """Call ``POST /services``.""" ref = unit.new_service_ref() r = self.post('/services', body={'service': ref}) self.assertValidServiceResponse(r, ref) def test_create_service_no_name(self): """Call ``POST /services``.""" ref = unit.new_service_ref() del ref['name'] r = self.post('/services', body={'service': ref}) ref['name'] = '' self.assertValidServiceResponse(r, ref) def test_create_service_no_enabled(self): """Call ``POST /services``.""" ref = unit.new_service_ref() del ref['enabled'] r = self.post('/services', body={'service': ref}) ref['enabled'] = True self.assertValidServiceResponse(r, ref) self.assertIs(True, r.result['service']['enabled']) def test_create_service_enabled_false(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled=False) r = self.post('/services', body={'service': ref}) self.assertValidServiceResponse(r, ref) self.assertIs(False, r.result['service']['enabled']) def test_create_service_enabled_true(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled=True) r = self.post('/services', body={'service': ref}) self.assertValidServiceResponse(r, ref) self.assertIs(True, r.result['service']['enabled']) def test_create_service_enabled_str_true(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled='True') self.post( '/services', body={'service': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_service_enabled_str_false(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled='False') self.post( '/services', body={'service': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_service_enabled_str_random(self): """Call ``POST /services``.""" ref = unit.new_service_ref(enabled='puppies') self.post( '/services', body={'service': ref}, expected_status=http.client.BAD_REQUEST, ) def test_list_head_services(self): """Call ``GET & HEAD /services``.""" resource_url = '/services' r = self.get(resource_url) self.assertValidServiceListResponse(r, ref=self.service) self.head(resource_url, expected_status=http.client.OK) def _create_random_service(self): ref = unit.new_service_ref() response = self.post('/services', body={'service': ref}) return response.json['service'] def test_filter_list_services_by_type(self): """Call ``GET /services?type=``.""" target_ref = self._create_random_service() # create unrelated services self._create_random_service() self._create_random_service() response = self.get('/services?type=' + target_ref['type']) self.assertValidServiceListResponse(response, ref=target_ref) filtered_service_list = response.json['services'] self.assertEqual(1, len(filtered_service_list)) filtered_service = filtered_service_list[0] self.assertEqual(target_ref['type'], filtered_service['type']) def test_filter_list_services_by_name(self): """Call ``GET /services?name=``.""" # create unrelated services self._create_random_service() self._create_random_service() # create the desired service target_ref = self._create_random_service() response = self.get('/services?name=' + target_ref['name']) self.assertValidServiceListResponse(response, ref=target_ref) filtered_service_list = response.json['services'] self.assertEqual(1, len(filtered_service_list)) filtered_service = filtered_service_list[0] self.assertEqual(target_ref['name'], filtered_service['name']) def test_filter_list_services_by_name_with_list_limit(self): """Call ``GET /services?name=``.""" self.config_fixture.config(list_limit=1) self.test_filter_list_services_by_name() def test_get_head_service(self): """Call ``GET & HEAD /services/{service_id}``.""" resource_url = '/services/{service_id}'.format( service_id=self.service_id ) r = self.get(resource_url) self.assertValidServiceResponse(r, self.service) self.head(resource_url, expected_status=http.client.OK) def test_update_service(self): """Call ``PATCH /services/{service_id}``.""" service = unit.new_service_ref() del service['id'] r = self.patch( f'/services/{self.service_id}', body={'service': service}, ) self.assertValidServiceResponse(r, service) def test_delete_service(self): """Call ``DELETE /services/{service_id}``.""" self.delete(f'/services/{self.service_id}') # endpoint crud tests def test_list_head_endpoints(self): """Call ``GET & HEAD /endpoints``.""" resource_url = '/endpoints' r = self.get(resource_url) self.assertValidEndpointListResponse(r, ref=self.endpoint) self.head(resource_url, expected_status=http.client.OK) def _create_random_endpoint( self, interface='public', parent_region_id=None ): region = self._create_region_with_parent_id(parent_id=parent_region_id) service = self._create_random_service() ref = unit.new_endpoint_ref( service_id=service['id'], interface=interface, region_id=region.result['region']['id'], ) response = self.post('/endpoints', body={'endpoint': ref}) return response.json['endpoint'] def test_list_endpoints_filtered_by_interface(self): """Call ``GET /endpoints?interface={interface}``.""" ref = self._create_random_endpoint(interface='internal') response = self.get('/endpoints?interface=%s' % ref['interface']) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) def test_list_endpoints_filtered_by_service_id(self): """Call ``GET /endpoints?service_id={service_id}``.""" ref = self._create_random_endpoint() response = self.get('/endpoints?service_id=%s' % ref['service_id']) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['service_id'], endpoint['service_id']) def test_list_endpoints_filtered_by_region_id(self): """Call ``GET /endpoints?region_id={region_id}``.""" ref = self._create_random_endpoint() response = self.get('/endpoints?region_id=%s' % ref['region_id']) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['region_id'], endpoint['region_id']) def test_list_endpoints_filtered_by_parent_region_id(self): """Call ``GET /endpoints?region_id={region_id}``. Ensure passing the parent_region_id as filter returns an empty list. """ parent_region = self._create_region_with_parent_id() parent_region_id = parent_region.result['region']['id'] self._create_random_endpoint(parent_region_id=parent_region_id) response = self.get('/endpoints?region_id=%s' % parent_region_id) self.assertEqual(0, len(response.json['endpoints'])) def test_list_endpoints_with_multiple_filters(self): """Call ``GET /endpoints?interface={interface}...``. Ensure passing different combinations of interface, region_id and service_id as filters will return the correct result. """ # interface and region_id specified ref = self._create_random_endpoint(interface='internal') response = self.get( '/endpoints?interface=%s®ion_id=%s' % (ref['interface'], ref['region_id']) ) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) self.assertEqual(ref['region_id'], endpoint['region_id']) # interface and service_id specified ref = self._create_random_endpoint(interface='internal') response = self.get( '/endpoints?interface=%s&service_id=%s' % (ref['interface'], ref['service_id']) ) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) self.assertEqual(ref['service_id'], endpoint['service_id']) # region_id and service_id specified ref = self._create_random_endpoint(interface='internal') response = self.get( '/endpoints?region_id=%s&service_id=%s' % (ref['region_id'], ref['service_id']) ) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['region_id'], endpoint['region_id']) self.assertEqual(ref['service_id'], endpoint['service_id']) # interface, region_id and service_id specified ref = self._create_random_endpoint(interface='internal') response = self.get( ('/endpoints?interface=%s®ion_id=%s&service_id=%s') % (ref['interface'], ref['region_id'], ref['service_id']) ) self.assertValidEndpointListResponse(response, ref=ref) for endpoint in response.json['endpoints']: self.assertEqual(ref['interface'], endpoint['interface']) self.assertEqual(ref['region_id'], endpoint['region_id']) self.assertEqual(ref['service_id'], endpoint['service_id']) def test_list_endpoints_with_random_filter_values(self): """Call ``GET /endpoints?interface={interface}...``. Ensure passing random values for: interface, region_id and service_id will return an empty list. """ self._create_random_endpoint(interface='internal') response = self.get('/endpoints?interface=%s' % uuid.uuid4().hex) self.assertEqual(0, len(response.json['endpoints'])) response = self.get('/endpoints?region_id=%s' % uuid.uuid4().hex) self.assertEqual(0, len(response.json['endpoints'])) response = self.get('/endpoints?service_id=%s' % uuid.uuid4().hex) self.assertEqual(0, len(response.json['endpoints'])) def test_create_endpoint_no_enabled(self): """Call ``POST /endpoints``.""" ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, ) r = self.post('/endpoints', body={'endpoint': ref}) ref['enabled'] = True self.assertValidEndpointResponse(r, ref) def test_create_endpoint_enabled_true(self): """Call ``POST /endpoints`` with enabled: true.""" ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, enabled=True, ) r = self.post('/endpoints', body={'endpoint': ref}) self.assertValidEndpointResponse(r, ref) def test_create_endpoint_enabled_false(self): """Call ``POST /endpoints`` with enabled: false.""" ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, enabled=False, ) r = self.post('/endpoints', body={'endpoint': ref}) self.assertValidEndpointResponse(r, ref) def test_create_endpoint_enabled_str_true(self): """Call ``POST /endpoints`` with enabled: 'True'.""" ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, enabled='True', ) self.post( '/endpoints', body={'endpoint': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_endpoint_enabled_str_false(self): """Call ``POST /endpoints`` with enabled: 'False'.""" ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, enabled='False', ) self.post( '/endpoints', body={'endpoint': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_endpoint_enabled_str_random(self): """Call ``POST /endpoints`` with enabled: 'puppies'.""" ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, enabled='puppies', ) self.post( '/endpoints', body={'endpoint': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_endpoint_with_invalid_region_id(self): """Call ``POST /endpoints``.""" ref = unit.new_endpoint_ref(service_id=self.service_id) self.post( '/endpoints', body={'endpoint': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_endpoint_with_region(self): """EndpointV3 creates the region before creating the endpoint. This occurs when endpoint is provided with 'region' and no 'region_id'. """ ref = unit.new_endpoint_ref_with_region( service_id=self.service_id, region=uuid.uuid4().hex ) self.post('/endpoints', body={'endpoint': ref}) # Make sure the region is created self.get('/regions/{region_id}'.format(region_id=ref["region"])) def test_create_endpoint_with_no_region(self): """EndpointV3 allows to creates the endpoint without region.""" ref = unit.new_endpoint_ref(service_id=self.service_id, region_id=None) del ref['region_id'] # cannot just be None, it needs to not exist self.post('/endpoints', body={'endpoint': ref}) def test_create_endpoint_with_empty_url(self): """Call ``POST /endpoints``.""" ref = unit.new_endpoint_ref(service_id=self.service_id, url='') self.post( '/endpoints', body={'endpoint': ref}, expected_status=http.client.BAD_REQUEST, ) def test_get_head_endpoint(self): """Call ``GET & HEAD /endpoints/{endpoint_id}``.""" resource_url = '/endpoints/{endpoint_id}'.format( endpoint_id=self.endpoint_id ) r = self.get(resource_url) self.assertValidEndpointResponse(r, self.endpoint) self.head(resource_url, expected_status=http.client.OK) def test_update_endpoint(self): """Call ``PATCH /endpoints/{endpoint_id}``.""" ref = unit.new_endpoint_ref( service_id=self.service_id, interface='public', region_id=self.region_id, ) del ref['id'] r = self.patch( f'/endpoints/{self.endpoint_id}', body={'endpoint': ref}, ) ref['enabled'] = True self.assertValidEndpointResponse(r, ref) def test_update_endpoint_enabled_true(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: True.""" r = self.patch( f'/endpoints/{self.endpoint_id}', body={'endpoint': {'enabled': True}}, ) self.assertValidEndpointResponse(r, self.endpoint) def test_update_endpoint_enabled_false(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: False.""" r = self.patch( f'/endpoints/{self.endpoint_id}', body={'endpoint': {'enabled': False}}, ) exp_endpoint = copy.copy(self.endpoint) exp_endpoint['enabled'] = False self.assertValidEndpointResponse(r, exp_endpoint) def test_update_endpoint_enabled_str_true(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'True'.""" self.patch( f'/endpoints/{self.endpoint_id}', body={'endpoint': {'enabled': 'True'}}, expected_status=http.client.BAD_REQUEST, ) def test_update_endpoint_enabled_str_false(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'False'.""" self.patch( f'/endpoints/{self.endpoint_id}', body={'endpoint': {'enabled': 'False'}}, expected_status=http.client.BAD_REQUEST, ) def test_update_endpoint_enabled_str_random(self): """Call ``PATCH /endpoints/{endpoint_id}`` with enabled: 'kitties'.""" self.patch( f'/endpoints/{self.endpoint_id}', body={'endpoint': {'enabled': 'kitties'}}, expected_status=http.client.BAD_REQUEST, ) def test_delete_endpoint(self): """Call ``DELETE /endpoints/{endpoint_id}``.""" self.delete(f'/endpoints/{self.endpoint_id}') def test_deleting_endpoint_with_space_in_url(self): # add a space to all urls (intentional "i d" to test bug) url_with_space = "http://127.0.0.1:8774 /v1.1/\\$(tenant_i d)s" # create a v3 endpoint ref ref = unit.new_endpoint_ref( service_id=self.service['id'], region_id=None, publicurl=url_with_space, internalurl=url_with_space, adminurl=url_with_space, url=url_with_space, ) # add the endpoint to the database PROVIDERS.catalog_api.create_endpoint(ref['id'], ref) # delete the endpoint self.delete('/endpoints/%s' % ref['id']) # make sure it's deleted (GET should return Not Found) self.get( '/endpoints/%s' % ref['id'], expected_status=http.client.NOT_FOUND ) def test_endpoint_create_with_valid_url(self): """Create endpoint with valid url should be tested,too.""" # list one valid url is enough, no need to list too much valid_url = 'http://127.0.0.1:8774/v1.1/$(project_id)s' ref = unit.new_endpoint_ref( self.service_id, interface='public', region_id=self.region_id, url=valid_url, ) self.post('/endpoints', body={'endpoint': ref}) def test_endpoint_create_with_valid_url_project_id(self): """Create endpoint with valid url should be tested,too.""" valid_url = 'http://127.0.0.1:8774/v1.1/$(project_id)s' ref = unit.new_endpoint_ref( self.service_id, interface='public', region_id=self.region_id, url=valid_url, ) self.post('/endpoints', body={'endpoint': ref}) def test_endpoint_create_with_invalid_url(self): """Test the invalid cases: substitutions is not exactly right.""" invalid_urls = [ # using a substitution that is not whitelisted - KeyError 'http://127.0.0.1:8774/v1.1/$(nonexistent)s', # invalid formatting - ValueError 'http://127.0.0.1:8774/v1.1/$(project_id)', 'http://127.0.0.1:8774/v1.1/$(project_id)t', 'http://127.0.0.1:8774/v1.1/$(project_id', # invalid type specifier - TypeError # admin_url is a string not an int 'http://127.0.0.1:8774/v1.1/$(admin_url)d', ] ref = unit.new_endpoint_ref(self.service_id) for invalid_url in invalid_urls: ref['url'] = invalid_url self.post( '/endpoints', body={'endpoint': ref}, expected_status=http.client.BAD_REQUEST, ) class TestMultiRegion(test_v3.RestfulTestCase): def test_catalog_with_multi_region_reports_all_endpoints(self): # Create two separate regions first_region = self.post( '/regions', body={'region': unit.new_region_ref()} ).json_body['region'] second_region = self.post( '/regions', body={'region': unit.new_region_ref()} ).json_body['region'] # Create two services with the same type but separate name. first_service = self.post( '/services', body={'service': unit.new_service_ref(type='foobar')} ).json_body['service'] second_service = self.post( '/services', body={'service': unit.new_service_ref(type='foobar')} ).json_body['service'] # Create an endpoint for each service first_endpoint = self.post( '/endpoints', body={ 'endpoint': unit.new_endpoint_ref( first_service['id'], interface='public', region_id=first_region['id'], ) }, ).json_body['endpoint'] second_endpoint = self.post( '/endpoints', body={ 'endpoint': unit.new_endpoint_ref( second_service['id'], interface='public', region_id=second_region['id'], ) }, ).json_body['endpoint'] # Assert the endpoints and services from each region are in the # catalog. found_first_endpoint = False found_second_endpoint = False catalog = self.get('/auth/catalog/').json_body['catalog'] for service in catalog: if service['id'] == first_service['id']: endpoint = service['endpoints'][0] self.assertEqual(endpoint['id'], first_endpoint['id']) self.assertEqual(endpoint['region_id'], first_region['id']) found_first_endpoint = True elif service['id'] == second_service['id']: endpoint = service['endpoints'][0] self.assertEqual(endpoint['id'], second_endpoint['id']) self.assertEqual(endpoint['region_id'], second_region['id']) found_second_endpoint = True self.assertTrue(found_first_endpoint) self.assertTrue(found_second_endpoint) class TestCatalogAPISQL(unit.TestCase): """Test for the catalog Manager against the SQL backend.""" def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() service = unit.new_service_ref() self.service_id = service['id'] PROVIDERS.catalog_api.create_service(self.service_id, service) self.create_endpoint(service_id=self.service_id) PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) def create_endpoint(self, service_id, **kwargs): endpoint = unit.new_endpoint_ref( service_id=service_id, region_id=None, **kwargs ) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) return endpoint def config_overrides(self): super().config_overrides() self.config_fixture.config(group='catalog', driver='sql') def test_get_catalog_ignores_endpoints_with_invalid_urls(self): user_id = uuid.uuid4().hex # create a project since the project should exist if we want to # filter the catalog by the project or replace the url with a # valid project id. domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) # the only endpoint in the catalog is the one created in setUp catalog = PROVIDERS.catalog_api.get_v3_catalog(user_id, project['id']) self.assertEqual(1, len(catalog[0]['endpoints'])) # it's also the only endpoint in the backend self.assertEqual(1, len(PROVIDERS.catalog_api.list_endpoints())) # create a new, invalid endpoint - malformed type declaration self.create_endpoint( self.service_id, url='http://keystone/%(project_id)' ) # create a new, invalid endpoint - nonexistent key self.create_endpoint( self.service_id, url='http://keystone/%(you_wont_find_me)s' ) # verify that the invalid endpoints don't appear in the catalog catalog = PROVIDERS.catalog_api.get_v3_catalog(user_id, project['id']) self.assertEqual(1, len(catalog[0]['endpoints'])) # all three appear in the backend self.assertEqual(3, len(PROVIDERS.catalog_api.list_endpoints())) # create another valid endpoint - project_id will be replaced self.create_endpoint( self.service_id, url='http://keystone/%(project_id)s' ) # there are two valid endpoints, positive check catalog = PROVIDERS.catalog_api.get_v3_catalog(user_id, project['id']) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(2)) # If the URL has no 'project_id' to substitute, we will skip the # endpoint which contains this kind of URL, negative check. project_id = None catalog = PROVIDERS.catalog_api.get_v3_catalog(user_id, project_id) self.assertThat(catalog[0]['endpoints'], matchers.HasLength(1)) def test_get_catalog_always_returns_service_name(self): user_id = uuid.uuid4().hex # create a project since the project should exist if we want to # filter the catalog by the project or replace the url with a # valid project id. domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) # create a service, with a name named_svc = unit.new_service_ref() PROVIDERS.catalog_api.create_service(named_svc['id'], named_svc) self.create_endpoint(service_id=named_svc['id']) # create a service, with no name unnamed_svc = unit.new_service_ref(name=None) del unnamed_svc['name'] PROVIDERS.catalog_api.create_service(unnamed_svc['id'], unnamed_svc) self.create_endpoint(service_id=unnamed_svc['id']) catalog = PROVIDERS.catalog_api.get_v3_catalog(user_id, project['id']) named_endpoint = [ ep for ep in catalog if ep['type'] == named_svc['type'] ][0] self.assertEqual(named_svc['name'], named_endpoint['name']) unnamed_endpoint = [ ep for ep in catalog if ep['type'] == unnamed_svc['type'] ][0] self.assertEqual('', unnamed_endpoint['name']) # TODO(dstanek): this needs refactoring with the test above, but we are in a # crunch so that will happen in a future patch. class TestCatalogAPISQLRegions(unit.TestCase): """Test for the catalog Manager against the SQL backend.""" def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='catalog', driver='sql') def test_get_catalog_returns_proper_endpoints_with_no_region(self): service = unit.new_service_ref() service_id = service['id'] PROVIDERS.catalog_api.create_service(service_id, service) endpoint = unit.new_endpoint_ref(service_id=service_id, region_id=None) del endpoint['region_id'] PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) # create a project since the project should exist if we want to # filter the catalog by the project or replace the url with a # valid project id. domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) user_id = uuid.uuid4().hex catalog = PROVIDERS.catalog_api.get_v3_catalog(user_id, project['id']) self.assertValidCatalogEndpoint( catalog[0]['endpoints'][0], ref=endpoint ) def test_get_catalog_returns_proper_endpoints_with_region(self): service = unit.new_service_ref() service_id = service['id'] PROVIDERS.catalog_api.create_service(service_id, service) endpoint = unit.new_endpoint_ref(service_id=service_id) region = unit.new_region_ref(id=endpoint['region_id']) PROVIDERS.catalog_api.create_region(region) PROVIDERS.catalog_api.create_endpoint(endpoint['id'], endpoint) endpoint = PROVIDERS.catalog_api.get_endpoint(endpoint['id']) user_id = uuid.uuid4().hex # create a project since the project should exist if we want to # filter the catalog by the project or replace the url with a # valid project id. domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) catalog = PROVIDERS.catalog_api.get_v3_catalog(user_id, project['id']) self.assertValidCatalogEndpoint( catalog[0]['endpoints'][0], ref=endpoint ) def assertValidCatalogEndpoint(self, entity, ref=None): keys = ['description', 'id', 'interface', 'name', 'region_id', 'url'] for k in keys: self.assertEqual(ref.get(k), entity[k], k) self.assertEqual(entity['region_id'], entity['region']) class TestCatalogAPITemplatedProject(test_v3.RestfulTestCase): """Templated Catalog doesn't support full API. Eg. No region/endpoint creation. """ def config_overrides(self): super().config_overrides() self.config_fixture.config(group='catalog', driver='templated') def load_fixtures(self, fixtures): self.load_sample_data(create_region_and_endpoints=False) def test_project_delete(self): """Deleting a project should not result in an 500 ISE. Deleting a project will create a notification, which the EndpointFilter functionality will use to clean up any project->endpoint and project->endpoint_group relationships. The templated catalog does not support such relationships, but the act of attempting to delete them should not cause a NotImplemented exception to be exposed to an API caller. Deleting an endpoint has a similar notification and clean up mechanism, but since we do not allow deletion of endpoints with the templated catalog, there is no testing to do for that action. """ self.delete(f'/projects/{self.project_id}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_credential.py0000664000175000017500000011737600000000000023617 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import hashlib import http.client import json from unittest import mock import urllib import uuid from keystoneclient.contrib.ec2 import utils as ec2_utils from oslo_db import exception as oslo_db_exception from testtools import matchers from keystone.api import ec2tokens from keystone.common import provider_api from keystone.common import utils from keystone.credential.providers import fernet as credential_fernet from keystone import exception from keystone import oauth1 from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs CRED_TYPE_EC2 = ec2tokens.CRED_TYPE_EC2 class CredentialBaseTestCase(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) def _create_dict_blob_credential(self): blob, credential = unit.new_ec2_credential( user_id=self.user['id'], project_id=self.project_id ) # Store the blob as a dict *not* JSON ref bug #1259584 # This means we can test the dict->json workaround, added # as part of the bugfix for backwards compatibility works. credential['blob'] = blob credential_id = credential['id'] # Create direct via the DB API to avoid validation failure PROVIDERS.credential_api.create_credential(credential_id, credential) return json.dumps(blob), credential_id def _test_get_token(self, access, secret): """Test signature validation with the access/secret provided.""" signer = ec2_utils.Ec2Signer(secret) params = { 'SignatureMethod': 'HmacSHA256', 'SignatureVersion': '2', 'AWSAccessKeyId': access, } request = { 'host': 'foo', 'verb': 'GET', 'path': '/bar', 'params': params, } signature = signer.generate(request) # Now make a request to validate the signed dummy request via the # ec2tokens API. This proves the v3 ec2 credentials actually work. sig_ref = { 'access': access, 'signature': signature, 'host': 'foo', 'verb': 'GET', 'path': '/bar', 'params': params, } r = self.post( '/ec2tokens', body={'ec2Credentials': sig_ref}, expected_status=http.client.OK, ) self.assertValidTokenResponse(r) return r.result['token'] class CredentialTestCase(CredentialBaseTestCase): """Test credential CRUD.""" def setUp(self): super().setUp() self.credential = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id ) PROVIDERS.credential_api.create_credential( self.credential['id'], self.credential ) def test_credential_api_delete_credentials_for_project(self): PROVIDERS.credential_api.delete_credentials_for_project( self.project_id ) # Test that the credential that we created in .setUp no longer exists # once we delete all credentials for self.project_id self.assertRaises( exception.CredentialNotFound, PROVIDERS.credential_api.get_credential, credential_id=self.credential['id'], ) def test_credential_api_delete_credentials_for_user(self): PROVIDERS.credential_api.delete_credentials_for_user(self.user_id) # Test that the credential that we created in .setUp no longer exists # once we delete all credentials for self.user_id self.assertRaises( exception.CredentialNotFound, PROVIDERS.credential_api.get_credential, credential_id=self.credential['id'], ) def test_list_credentials(self): """Call ``GET /credentials``.""" r = self.get('/credentials') self.assertValidCredentialListResponse(r, ref=self.credential) def test_list_credentials_filtered_by_user_id(self): """Call ``GET /credentials?user_id={user_id}``.""" credential = unit.new_credential_ref(user_id=uuid.uuid4().hex) PROVIDERS.credential_api.create_credential( credential['id'], credential ) r = self.get('/credentials?user_id=%s' % self.user['id']) self.assertValidCredentialListResponse(r, ref=self.credential) for cred in r.result['credentials']: self.assertEqual(self.user['id'], cred['user_id']) def test_list_credentials_filtered_by_type(self): """Call ``GET /credentials?type={type}``.""" PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.role_id ) token = self.get_system_scoped_token() # The type ec2 was chosen, instead of a random string, # because the type must be in the list of supported types ec2_credential = unit.new_credential_ref( user_id=uuid.uuid4().hex, project_id=self.project_id, type=CRED_TYPE_EC2, ) ec2_resp = PROVIDERS.credential_api.create_credential( ec2_credential['id'], ec2_credential ) # The type cert was chosen for the same reason as ec2 r = self.get('/credentials?type=cert', token=token) # Testing the filter for two different types self.assertValidCredentialListResponse(r, ref=self.credential) for cred in r.result['credentials']: self.assertEqual('cert', cred['type']) r_ec2 = self.get('/credentials?type=ec2', token=token) self.assertThat(r_ec2.result['credentials'], matchers.HasLength(1)) cred_ec2 = r_ec2.result['credentials'][0] self.assertValidCredentialListResponse(r_ec2, ref=ec2_resp) self.assertEqual(CRED_TYPE_EC2, cred_ec2['type']) self.assertEqual(ec2_credential['id'], cred_ec2['id']) def test_list_credentials_filtered_by_type_and_user_id(self): """Call ``GET /credentials?user_id={user_id}&type={type}``.""" user1_id = uuid.uuid4().hex user2_id = uuid.uuid4().hex PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.role_id ) token = self.get_system_scoped_token() # Creating credentials for two different users credential_user1_ec2 = unit.new_credential_ref( user_id=user1_id, type=CRED_TYPE_EC2 ) credential_user1_cert = unit.new_credential_ref(user_id=user1_id) credential_user2_cert = unit.new_credential_ref(user_id=user2_id) PROVIDERS.credential_api.create_credential( credential_user1_ec2['id'], credential_user1_ec2 ) PROVIDERS.credential_api.create_credential( credential_user1_cert['id'], credential_user1_cert ) PROVIDERS.credential_api.create_credential( credential_user2_cert['id'], credential_user2_cert ) r = self.get( '/credentials?user_id=%s&type=ec2' % user1_id, token=token ) self.assertValidCredentialListResponse(r, ref=credential_user1_ec2) self.assertThat(r.result['credentials'], matchers.HasLength(1)) cred = r.result['credentials'][0] self.assertEqual(CRED_TYPE_EC2, cred['type']) self.assertEqual(user1_id, cred['user_id']) def test_create_credential(self): """Call ``POST /credentials``.""" ref = unit.new_credential_ref(user_id=self.user['id']) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) def test_get_credential(self): """Call ``GET /credentials/{credential_id}``.""" r = self.get( '/credentials/%(credential_id)s' % {'credential_id': self.credential['id']} ) self.assertValidCredentialResponse(r, self.credential) def test_update_credential(self): """Call ``PATCH /credentials/{credential_id}``.""" ref = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id ) del ref['id'] r = self.patch( '/credentials/%(credential_id)s' % {'credential_id': self.credential['id']}, body={'credential': ref}, ) self.assertValidCredentialResponse(r, ref) def test_update_credential_to_ec2_type(self): """Call ``PATCH /credentials/{credential_id}``.""" # Create a credential without providing a project_id ref = unit.new_credential_ref(user_id=self.user['id']) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Updating the credential to ec2 requires a project_id update_ref = {'type': 'ec2', 'project_id': self.project_id} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, ) def test_update_credential_to_ec2_missing_project_id(self): """Call ``PATCH /credentials/{credential_id}``.""" # Create a credential without providing a project_id ref = unit.new_credential_ref(user_id=self.user['id']) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Updating such credential to ec2 type without providing a project_id # will fail update_ref = {'type': 'ec2'} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) def test_update_credential_to_ec2_with_previously_set_project_id(self): """Call ``PATCH /credentials/{credential_id}``.""" # Create a credential providing a project_id ref = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id ) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Since the created credential above already has a project_id, the # update request will not fail update_ref = {'type': 'ec2'} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, ) def test_update_credential_non_owner(self): """Call ``PATCH /credentials/{credential_id}``.""" alt_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) alt_user_id = alt_user['id'] alt_project = unit.new_project_ref(domain_id=self.domain_id) alt_project_id = alt_project['id'] PROVIDERS.resource_api.create_project(alt_project['id'], alt_project) alt_role = unit.new_role_ref(name='reader') alt_role_id = alt_role['id'] PROVIDERS.role_api.create_role(alt_role_id, alt_role) PROVIDERS.assignment_api.add_role_to_user_and_project( alt_user_id, alt_project_id, alt_role_id ) auth = self.build_authentication_request( user_id=alt_user_id, password=alt_user['password'], project_id=alt_project_id, ) ref = unit.new_credential_ref( user_id=alt_user_id, project_id=alt_project_id ) r = self.post('/credentials', auth=auth, body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Cannot change the credential to be owned by another user update_ref = {'user_id': self.user_id, 'project_id': self.project_id} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, expected_status=403, auth=auth, body={'credential': update_ref}, ) def test_update_ec2_credential_change_trust_id(self): """Call ``PATCH /credentials/{credential_id}``.""" blob, ref = unit.new_ec2_credential( user_id=self.user['id'], project_id=self.project_id ) blob['trust_id'] = uuid.uuid4().hex ref['blob'] = json.dumps(blob) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Try changing to a different trust blob['trust_id'] = uuid.uuid4().hex update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) # Try removing the trust del blob['trust_id'] update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) def test_update_ec2_credential_change_app_cred_id(self): """Call ``PATCH /credentials/{credential_id}``.""" blob, ref = unit.new_ec2_credential( user_id=self.user['id'], project_id=self.project_id ) blob['app_cred_id'] = uuid.uuid4().hex ref['blob'] = json.dumps(blob) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Try changing to a different app cred blob['app_cred_id'] = uuid.uuid4().hex update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) # Try removing the app cred del blob['app_cred_id'] update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) def test_update_ec2_credential_change_access_token_id(self): """Call ``PATCH /credentials/{credential_id}``.""" blob, ref = unit.new_ec2_credential( user_id=self.user['id'], project_id=self.project_id ) blob['access_token_id'] = uuid.uuid4().hex ref['blob'] = json.dumps(blob) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Try changing to a different access token blob['access_token_id'] = uuid.uuid4().hex update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) # Try removing the access token del blob['access_token_id'] update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) def test_update_ec2_credential_change_access_id(self): """Call ``PATCH /credentials/{credential_id}``.""" blob, ref = unit.new_ec2_credential( user_id=self.user['id'], project_id=self.project_id ) blob['access_id'] = uuid.uuid4().hex ref['blob'] = json.dumps(blob) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) credential_id = r.result.get('credential')['id'] # Try changing to a different access_id blob['access_id'] = uuid.uuid4().hex update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) # Try removing the access_id del blob['access_id'] update_ref = {'blob': json.dumps(blob)} self.patch( '/credentials/%(credential_id)s' % {'credential_id': credential_id}, body={'credential': update_ref}, expected_status=http.client.BAD_REQUEST, ) def test_delete_credential(self): """Call ``DELETE /credentials/{credential_id}``.""" self.delete( '/credentials/%(credential_id)s' % {'credential_id': self.credential['id']} ) def test_delete_credential_retries_on_deadlock(self): patcher = mock.patch( 'sqlalchemy.orm.query.Query.delete', autospec=True ) class FakeDeadlock: def __init__(self, mock_patcher): self.deadlock_count = 2 self.mock_patcher = mock_patcher self.patched = True def __call__(self, *args, **kwargs): if self.deadlock_count > 1: self.deadlock_count -= 1 else: self.mock_patcher.stop() self.patched = False raise oslo_db_exception.DBDeadlock sql_delete_mock = patcher.start() side_effect = FakeDeadlock(patcher) sql_delete_mock.side_effect = side_effect try: PROVIDERS.credential_api.delete_credentials_for_user( user_id=self.user['id'] ) finally: if side_effect.patched: patcher.stop() # initial attempt + 1 retry self.assertEqual(sql_delete_mock.call_count, 2) def test_create_ec2_credential(self): """Call ``POST /credentials`` for creating ec2 credential.""" blob, ref = unit.new_ec2_credential( user_id=self.user['id'], project_id=self.project_id ) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) # Assert credential id is same as hash of access key id for # ec2 credentials access = blob['access'].encode('utf-8') self.assertEqual( hashlib.sha256(access).hexdigest(), r.result['credential']['id'] ) # Create second ec2 credential with the same access key id and check # for conflict. self.post( '/credentials', body={'credential': ref}, expected_status=http.client.CONFLICT, ) def test_get_ec2_dict_blob(self): """Ensure non-JSON blob data is correctly converted.""" expected_blob, credential_id = self._create_dict_blob_credential() r = self.get(f'/credentials/{credential_id}') # use json.loads to transform the blobs back into Python dictionaries # to avoid problems with the keys being in different orders. self.assertEqual( json.loads(expected_blob), json.loads(r.result['credential']['blob']), ) def test_list_ec2_dict_blob(self): """Ensure non-JSON blob data is correctly converted.""" expected_blob, credential_id = self._create_dict_blob_credential() list_r = self.get('/credentials') list_creds = list_r.result['credentials'] list_ids = [r['id'] for r in list_creds] self.assertIn(credential_id, list_ids) # use json.loads to transform the blobs back into Python dictionaries # to avoid problems with the keys being in different orders. for r in list_creds: if r['id'] == credential_id: self.assertEqual( json.loads(expected_blob), json.loads(r['blob']) ) def test_create_non_ec2_credential(self): """Test creating non-ec2 credential. Call ``POST /credentials``. """ blob, ref = unit.new_cert_credential(user_id=self.user['id']) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) # Assert credential id is not same as hash of access key id for # non-ec2 credentials access = blob['access'].encode('utf-8') self.assertNotEqual( hashlib.sha256(access).hexdigest(), r.result['credential']['id'] ) def test_create_ec2_credential_with_missing_project_id(self): """Test Creating ec2 credential with missing project_id. Call ``POST /credentials``. """ _, ref = unit.new_ec2_credential( user_id=self.user['id'], project_id=None ) # Assert bad request status when missing project_id self.post( '/credentials', body={'credential': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_ec2_credential_with_invalid_blob(self): """Test creating ec2 credential with invalid blob. Call ``POST /credentials``. """ ref = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id, blob='{"abc":"def"d}', type=CRED_TYPE_EC2, ) # Assert bad request status when request contains invalid blob response = self.post( '/credentials', body={'credential': ref}, expected_status=http.client.BAD_REQUEST, ) self.assertValidErrorResponse(response) def test_create_credential_with_admin_token(self): # Make sure we can create credential with the static admin token ref = unit.new_credential_ref(user_id=self.user['id']) r = self.post( '/credentials', body={'credential': ref}, token=self.get_admin_token(), ) self.assertValidCredentialResponse(r, ref) class TestCredentialTrustScoped(CredentialBaseTestCase): """Test credential with trust scoped token.""" def setUp(self): super().setUp() self.trustee_user = unit.new_user_ref(domain_id=self.domain_id) password = self.trustee_user['password'] self.trustee_user = PROVIDERS.identity_api.create_user( self.trustee_user ) self.trustee_user['password'] = password self.trustee_user_id = self.trustee_user['id'] self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='trust') def test_trust_scoped_ec2_credential(self): """Test creating trust scoped ec2 credential. Call ``POST /credentials``. """ # Create the trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) del ref['id'] r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) # Get a trust scoped token auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) r = self.v3_create_token(auth_data) self.assertValidProjectScopedTokenResponse(r, self.user) trust_id = r.result['token']['OS-TRUST:trust']['id'] token_id = r.headers.get('X-Subject-Token') # Create the credential with the trust scoped token blob, ref = unit.new_ec2_credential( user_id=self.user_id, project_id=self.project_id ) r = self.post('/credentials', body={'credential': ref}, token=token_id) # We expect the response blob to contain the trust_id ret_ref = ref.copy() ret_blob = blob.copy() ret_blob['trust_id'] = trust_id ret_ref['blob'] = json.dumps(ret_blob) self.assertValidCredentialResponse(r, ref=ret_ref) # Assert credential id is same as hash of access key id for # ec2 credentials access = blob['access'].encode('utf-8') self.assertEqual( hashlib.sha256(access).hexdigest(), r.result['credential']['id'] ) # Create a role assignment to ensure that it is ignored and only the # trust-delegated roles are used role = unit.new_role_ref(name='reader') role_id = role['id'] PROVIDERS.role_api.create_role(role_id, role) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_id, self.project_id, role_id ) ret_blob = json.loads(r.result['credential']['blob']) ec2token = self._test_get_token( access=ret_blob['access'], secret=ret_blob['secret'] ) ec2_roles = [role['id'] for role in ec2token['roles']] self.assertIn(self.role_id, ec2_roles) self.assertNotIn(role_id, ec2_roles) # Create second ec2 credential with the same access key id and check # for conflict. self.post( '/credentials', body={'credential': ref}, token=token_id, expected_status=http.client.CONFLICT, ) class TestCredentialAppCreds(CredentialBaseTestCase): """Test credential with application credential token.""" def setUp(self): super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) def test_app_cred_ec2_credential(self): """Test creating ec2 credential from an application credential. Call ``POST /credentials``. """ # Create the app cred ref = unit.new_application_credential_ref(roles=[{'id': self.role_id}]) del ref['id'] r = self.post( '/users/%s/application_credentials' % self.user_id, body={'application_credential': ref}, ) app_cred = r.result['application_credential'] # Get an application credential token auth_data = self.build_authentication_request( app_cred_id=app_cred['id'], secret=app_cred['secret'] ) r = self.v3_create_token(auth_data) token_id = r.headers.get('X-Subject-Token') # Create the credential with the app cred token blob, ref = unit.new_ec2_credential( user_id=self.user_id, project_id=self.project_id ) r = self.post('/credentials', body={'credential': ref}, token=token_id) # We expect the response blob to contain the app_cred_id ret_ref = ref.copy() ret_blob = blob.copy() ret_blob['app_cred_id'] = app_cred['id'] ret_ref['blob'] = json.dumps(ret_blob) self.assertValidCredentialResponse(r, ref=ret_ref) # Assert credential id is same as hash of access key id for # ec2 credentials access = blob['access'].encode('utf-8') self.assertEqual( hashlib.sha256(access).hexdigest(), r.result['credential']['id'] ) # Create a role assignment to ensure that it is ignored and only the # roles in the app cred are used role = unit.new_role_ref(name='reader') role_id = role['id'] PROVIDERS.role_api.create_role(role_id, role) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_id, self.project_id, role_id ) ret_blob = json.loads(r.result['credential']['blob']) ec2token = self._test_get_token( access=ret_blob['access'], secret=ret_blob['secret'] ) ec2_roles = [role['id'] for role in ec2token['roles']] self.assertIn(self.role_id, ec2_roles) self.assertNotIn(role_id, ec2_roles) # Create second ec2 credential with the same access key id and check # for conflict. self.post( '/credentials', body={'credential': ref}, token=token_id, expected_status=http.client.CONFLICT, ) class TestCredentialAccessToken(CredentialBaseTestCase): """Test credential with access token.""" def setUp(self): super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) self.base_url = 'http://localhost/v3' def _urllib_parse_qs_text_keys(self, content): results = urllib.parse.parse_qs(content) return {key.decode('utf-8'): value for key, value in results.items()} def _create_single_consumer(self): endpoint = '/OS-OAUTH1/consumers' ref = {'description': uuid.uuid4().hex} resp = self.post(endpoint, body={'consumer': ref}) return resp.result['consumer'] def _create_request_token(self, consumer, project_id, base_url=None): endpoint = '/OS-OAUTH1/request_token' client = oauth1.Client( consumer['key'], client_secret=consumer['secret'], signature_method=oauth1.SIG_HMAC, callback_uri="oob", ) headers = {'requested_project_id': project_id} if not base_url: base_url = self.base_url url, headers, body = client.sign( base_url + endpoint, http_method='POST', headers=headers ) return endpoint, headers def _create_access_token(self, consumer, token, base_url=None): endpoint = '/OS-OAUTH1/access_token' client = oauth1.Client( consumer['key'], client_secret=consumer['secret'], resource_owner_key=token.key, resource_owner_secret=token.secret, signature_method=oauth1.SIG_HMAC, verifier=token.verifier, ) if not base_url: base_url = self.base_url url, headers, body = client.sign( base_url + endpoint, http_method='POST' ) headers.update({'Content-Type': 'application/json'}) return endpoint, headers def _get_oauth_token(self, consumer, token): client = oauth1.Client( consumer['key'], client_secret=consumer['secret'], resource_owner_key=token.key, resource_owner_secret=token.secret, signature_method=oauth1.SIG_HMAC, ) endpoint = '/auth/tokens' url, headers, body = client.sign( self.base_url + endpoint, http_method='POST' ) headers.update({'Content-Type': 'application/json'}) ref = {'auth': {'identity': {'oauth1': {}, 'methods': ['oauth1']}}} return endpoint, headers, ref def _authorize_request_token(self, request_id): if isinstance(request_id, bytes): request_id = request_id.decode() return '/OS-OAUTH1/authorize/%s' % (request_id) def _get_access_token(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = self._urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] request_token = oauth1.Token(request_key, request_secret) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http.client.OK) verifier = resp.result['token']['oauth_verifier'] request_token.set_verifier(verifier) url, headers = self._create_access_token(consumer, request_token) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = self._urllib_parse_qs_text_keys(content.result) access_key = credentials['oauth_token'][0] access_secret = credentials['oauth_token_secret'][0] access_token = oauth1.Token(access_key, access_secret) url, headers, body = self._get_oauth_token(consumer, access_token) content = self.post(url, headers=headers, body=body) return access_key, content.headers['X-Subject-Token'] def test_access_token_ec2_credential(self): """Test creating ec2 credential from an oauth access token. Call ``POST /credentials``. """ access_key, token_id = self._get_access_token() # Create the credential with the access token blob, ref = unit.new_ec2_credential( user_id=self.user_id, project_id=self.project_id ) r = self.post('/credentials', body={'credential': ref}, token=token_id) # We expect the response blob to contain the access_token_id ret_ref = ref.copy() ret_blob = blob.copy() ret_blob['access_token_id'] = access_key.decode('utf-8') ret_ref['blob'] = json.dumps(ret_blob) self.assertValidCredentialResponse(r, ref=ret_ref) # Assert credential id is same as hash of access key id for # ec2 credentials access = blob['access'].encode('utf-8') self.assertEqual( hashlib.sha256(access).hexdigest(), r.result['credential']['id'] ) # Create a role assignment to ensure that it is ignored and only the # roles in the access token are used role = unit.new_role_ref(name='reader') role_id = role['id'] PROVIDERS.role_api.create_role(role_id, role) PROVIDERS.assignment_api.add_role_to_user_and_project( self.user_id, self.project_id, role_id ) ret_blob = json.loads(r.result['credential']['blob']) ec2token = self._test_get_token( access=ret_blob['access'], secret=ret_blob['secret'] ) ec2_roles = [role['id'] for role in ec2token['roles']] self.assertIn(self.role_id, ec2_roles) self.assertNotIn(role_id, ec2_roles) class TestCredentialEc2(CredentialBaseTestCase): """Test v3 credential compatibility with ec2tokens.""" def test_ec2_credential_signature_validate(self): """Test signature validation with a v3 ec2 credential.""" blob, ref = unit.new_ec2_credential( user_id=self.user['id'], project_id=self.project_id ) r = self.post('/credentials', body={'credential': ref}) self.assertValidCredentialResponse(r, ref) # Assert credential id is same as hash of access key id access = blob['access'].encode('utf-8') self.assertEqual( hashlib.sha256(access).hexdigest(), r.result['credential']['id'] ) cred_blob = json.loads(r.result['credential']['blob']) self.assertEqual(blob, cred_blob) self._test_get_token( access=cred_blob['access'], secret=cred_blob['secret'] ) def test_ec2_credential_signature_validate_legacy(self): """Test signature validation with a legacy v3 ec2 credential.""" cred_json, _ = self._create_dict_blob_credential() cred_blob = json.loads(cred_json) self._test_get_token( access=cred_blob['access'], secret=cred_blob['secret'] ) def _get_ec2_cred_uri(self): return '/users/%s/credentials/OS-EC2' % self.user_id def _get_ec2_cred(self): uri = self._get_ec2_cred_uri() r = self.post(uri, body={'tenant_id': self.project_id}) return r.result['credential'] def test_ec2_create_credential(self): """Test ec2 credential creation.""" ec2_cred = self._get_ec2_cred() self.assertEqual(self.user_id, ec2_cred['user_id']) self.assertEqual(self.project_id, ec2_cred['tenant_id']) self.assertIsNone(ec2_cred['trust_id']) self._test_get_token( access=ec2_cred['access'], secret=ec2_cred['secret'] ) uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']]) self.assertThat(ec2_cred['links']['self'], matchers.EndsWith(uri)) def test_ec2_get_credential(self): ec2_cred = self._get_ec2_cred() uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']]) r = self.get(uri) self.assertDictEqual(ec2_cred, r.result['credential']) self.assertThat(ec2_cred['links']['self'], matchers.EndsWith(uri)) def test_ec2_cannot_get_non_ec2_credential(self): access_key = uuid.uuid4().hex cred_id = utils.hash_access_key(access_key) non_ec2_cred = unit.new_credential_ref( user_id=self.user_id, project_id=self.project_id ) non_ec2_cred['id'] = cred_id PROVIDERS.credential_api.create_credential(cred_id, non_ec2_cred) uri = '/'.join([self._get_ec2_cred_uri(), access_key]) # if access_key is not found, ec2 controller raises Unauthorized # exception self.get(uri, expected_status=http.client.UNAUTHORIZED) def test_ec2_list_credentials(self): """Test ec2 credential listing.""" self._get_ec2_cred() uri = self._get_ec2_cred_uri() r = self.get(uri) cred_list = r.result['credentials'] self.assertEqual(1, len(cred_list)) self.assertThat(r.result['links']['self'], matchers.EndsWith(uri)) # non-EC2 credentials won't be fetched non_ec2_cred = unit.new_credential_ref( user_id=self.user_id, project_id=self.project_id ) non_ec2_cred['type'] = uuid.uuid4().hex PROVIDERS.credential_api.create_credential( non_ec2_cred['id'], non_ec2_cred ) r = self.get(uri) cred_list_2 = r.result['credentials'] # still one element because non-EC2 credentials are not returned. self.assertEqual(1, len(cred_list_2)) self.assertEqual(cred_list[0], cred_list_2[0]) def test_ec2_delete_credential(self): """Test ec2 credential deletion.""" ec2_cred = self._get_ec2_cred() uri = '/'.join([self._get_ec2_cred_uri(), ec2_cred['access']]) cred_from_credential_api = ( PROVIDERS.credential_api.list_credentials_for_user( self.user_id, type=CRED_TYPE_EC2 ) ) self.assertEqual(1, len(cred_from_credential_api)) self.delete(uri) self.assertRaises( exception.CredentialNotFound, PROVIDERS.credential_api.get_credential, cred_from_credential_api[0]['id'], ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_domain_config.py0000664000175000017500000013352300000000000024271 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client import uuid from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class DomainConfigTestCase(test_v3.RestfulTestCase): """Test domain config support.""" def setUp(self): super().setUp() self.domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domain['id'], self.domain) self.config = { 'ldap': { 'url': uuid.uuid4().hex, 'user_tree_dn': uuid.uuid4().hex, }, 'identity': {'driver': uuid.uuid4().hex}, } def test_create_config(self): """Call ``PUT /domains/{domain_id}/config``.""" url = '/domains/{domain_id}/config'.format(domain_id=self.domain['id']) r = self.put( url, body={'config': self.config}, expected_status=http.client.CREATED, ) res = PROVIDERS.domain_config_api.get_config(self.domain['id']) self.assertEqual(self.config, r.result['config']) self.assertEqual(self.config, res) def test_create_config_invalid_domain(self): """Call ``PUT /domains/{domain_id}/config``. While creating Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ invalid_domain_id = uuid.uuid4().hex url = f'/domains/{invalid_domain_id}/config' self.put( url, body={'config': self.config}, expected_status=exception.DomainNotFound.code, ) def test_create_config_twice(self): """Check multiple creates don't throw error.""" self.put( '/domains/{domain_id}/config'.format(domain_id=self.domain['id']), body={'config': self.config}, expected_status=http.client.CREATED, ) self.put( '/domains/{domain_id}/config'.format(domain_id=self.domain['id']), body={'config': self.config}, expected_status=http.client.OK, ) def test_delete_config(self): """Call ``DELETE /domains{domain_id}/config``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) self.delete( '/domains/{domain_id}/config'.format(domain_id=self.domain['id']) ) self.get( '/domains/{domain_id}/config'.format(domain_id=self.domain['id']), expected_status=exception.DomainConfigNotFound.code, ) def test_delete_config_invalid_domain(self): """Call ``DELETE /domains{domain_id}/config``. While deleting Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) invalid_domain_id = uuid.uuid4().hex self.delete( f'/domains/{invalid_domain_id}/config', expected_status=exception.DomainNotFound.code, ) def test_delete_config_by_group(self): """Call ``DELETE /domains{domain_id}/config/{group}``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) self.delete( '/domains/%(domain_id)s/config/ldap' % {'domain_id': self.domain['id']} ) res = PROVIDERS.domain_config_api.get_config(self.domain['id']) self.assertNotIn('ldap', res) def test_delete_config_by_group_invalid_domain(self): """Call ``DELETE /domains{domain_id}/config/{group}``. While deleting Identity API-based domain config by group with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) invalid_domain_id = uuid.uuid4().hex self.delete( '/domains/%(domain_id)s/config/ldap' % {'domain_id': invalid_domain_id}, expected_status=exception.DomainNotFound.code, ) def test_get_head_config(self): """Call ``GET & HEAD for /domains{domain_id}/config``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) url = '/domains/{domain_id}/config'.format(domain_id=self.domain['id']) r = self.get(url) self.assertEqual(self.config, r.result['config']) self.head(url, expected_status=http.client.OK) def test_get_head_config_by_group(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) url = '/domains/{domain_id}/config/ldap'.format( domain_id=self.domain['id'] ) r = self.get(url) self.assertEqual({'ldap': self.config['ldap']}, r.result['config']) self.head(url, expected_status=http.client.OK) def test_get_head_config_by_group_invalid_domain(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}``. While retrieving Identity API-based domain config by group with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) invalid_domain_id = uuid.uuid4().hex url = '/domains/{domain_id}/config/ldap'.format( domain_id=invalid_domain_id ) self.get(url, expected_status=exception.DomainNotFound.code) self.head(url, expected_status=exception.DomainNotFound.code) def test_get_head_config_by_option(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}/{option}``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) url = '/domains/{domain_id}/config/ldap/url'.format( domain_id=self.domain['id'] ) r = self.get(url) self.assertEqual( {'url': self.config['ldap']['url']}, r.result['config'] ) self.head(url, expected_status=http.client.OK) def test_get_head_config_by_option_invalid_domain(self): """Call ``GET & HEAD /domains{domain_id}/config/{group}/{option}``. While retrieving Identity API-based domain config by option with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) invalid_domain_id = uuid.uuid4().hex url = '/domains/{domain_id}/config/ldap/url'.format( domain_id=invalid_domain_id ) self.get(url, expected_status=exception.DomainNotFound.code) self.head(url, expected_status=exception.DomainNotFound.code) def test_get_head_non_existant_config(self): """Call ``GET /domains{domain_id}/config when no config defined``.""" url = '/domains/{domain_id}/config'.format(domain_id=self.domain['id']) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_get_head_non_existant_config_invalid_domain(self): """Call ``GET & HEAD /domains/{domain_id}/config with invalid domain``. While retrieving non-existent Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response 404 domain not found. """ invalid_domain_id = uuid.uuid4().hex url = f'/domains/{invalid_domain_id}/config' self.get(url, expected_status=exception.DomainNotFound.code) self.head(url, expected_status=exception.DomainNotFound.code) def test_get_head_non_existant_config_group(self): """Call ``GET /domains/{domain_id}/config/{group_not_exist}``.""" config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) url = '/domains/{domain_id}/config/identity'.format( domain_id=self.domain['id'] ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_get_head_non_existant_config_group_invalid_domain(self): """Call ``GET & HEAD /domains/{domain_id}/config/{group}``. While retrieving non-existent Identity API-based domain config group with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) invalid_domain_id = uuid.uuid4().hex url = '/domains/{domain_id}/config/identity'.format( domain_id=invalid_domain_id ) self.get(url, expected_status=exception.DomainNotFound.code) self.head(url, expected_status=exception.DomainNotFound.code) def test_get_head_non_existant_config_option(self): """Test that Not Found is returned when option doesn't exist. Call ``GET & HEAD /domains/{domain_id}/config/{group}/{opt_not_exist}`` and ensure a Not Found is returned because the option isn't defined within the group. """ config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) url = '/domains/{domain_id}/config/ldap/user_tree_dn'.format( domain_id=self.domain['id'] ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_get_head_non_existant_config_option_with_invalid_domain(self): """Test that Domain Not Found is returned with invalid domain. Call ``GET & HEAD /domains/{domain_id}/config/{group}/{opt_not_exist}`` While retrieving non-existent Identity API-based domain config option with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ config = {'ldap': {'url': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) invalid_domain_id = uuid.uuid4().hex url = '/domains/{domain_id}/config/ldap/user_tree_dn'.format( domain_id=invalid_domain_id ) self.get(url, expected_status=exception.DomainNotFound.code) self.head(url, expected_status=exception.DomainNotFound.code) def test_update_config(self): """Call ``PATCH /domains/{domain_id}/config``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) new_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}, } r = self.patch( '/domains/{domain_id}/config'.format(domain_id=self.domain['id']), body={'config': new_config}, ) res = PROVIDERS.domain_config_api.get_config(self.domain['id']) expected_config = copy.deepcopy(self.config) expected_config['ldap']['url'] = new_config['ldap']['url'] expected_config['identity']['driver'] = new_config['identity'][ 'driver' ] self.assertEqual(expected_config, r.result['config']) self.assertEqual(expected_config, res) def test_update_config_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config``. While updating Identity API-based domain config with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) new_config = { 'ldap': {'url': uuid.uuid4().hex}, 'identity': {'driver': uuid.uuid4().hex}, } invalid_domain_id = uuid.uuid4().hex self.patch( f'/domains/{invalid_domain_id}/config', body={'config': new_config}, expected_status=exception.DomainNotFound.code, ) def test_update_config_group(self): """Call ``PATCH /domains/{domain_id}/config/{group}``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) new_config = { 'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex} } r = self.patch( '/domains/%(domain_id)s/config/ldap' % {'domain_id': self.domain['id']}, body={'config': new_config}, ) res = PROVIDERS.domain_config_api.get_config(self.domain['id']) expected_config = copy.deepcopy(self.config) expected_config['ldap']['url'] = new_config['ldap']['url'] expected_config['ldap']['user_filter'] = new_config['ldap'][ 'user_filter' ] self.assertEqual(expected_config, r.result['config']) self.assertEqual(expected_config, res) def test_update_config_group_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{group}``. While updating Identity API-based domain config group with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) new_config = { 'ldap': {'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex} } invalid_domain_id = uuid.uuid4().hex self.patch( '/domains/%(domain_id)s/config/ldap' % {'domain_id': invalid_domain_id}, body={'config': new_config}, expected_status=exception.DomainNotFound.code, ) def test_update_config_invalid_group(self): """Call ``PATCH /domains/{domain_id}/config/{invalid_group}``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) # Trying to update a group that is neither whitelisted or sensitive # should result in Forbidden. invalid_group = uuid.uuid4().hex new_config = { invalid_group: { 'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex, } } self.patch( '/domains/%(domain_id)s/config/%(invalid_group)s' % {'domain_id': self.domain['id'], 'invalid_group': invalid_group}, body={'config': new_config}, expected_status=http.client.FORBIDDEN, ) # Trying to update a valid group, but one that is not in the current # config should result in NotFound config = {'ldap': {'suffix': uuid.uuid4().hex}} PROVIDERS.domain_config_api.create_config(self.domain['id'], config) new_config = {'identity': {'driver': uuid.uuid4().hex}} self.patch( '/domains/%(domain_id)s/config/identity' % {'domain_id': self.domain['id']}, body={'config': new_config}, expected_status=http.client.NOT_FOUND, ) def test_update_config_invalid_group_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{invalid_group}``. While updating Identity API-based domain config with an invalid group and an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) invalid_group = uuid.uuid4().hex new_config = { invalid_group: { 'url': uuid.uuid4().hex, 'user_filter': uuid.uuid4().hex, } } invalid_domain_id = uuid.uuid4().hex self.patch( '/domains/%(domain_id)s/config/%(invalid_group)s' % {'domain_id': invalid_domain_id, 'invalid_group': invalid_group}, body={'config': new_config}, expected_status=exception.DomainNotFound.code, ) def test_update_config_option(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{option}``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) new_config = {'url': uuid.uuid4().hex} r = self.patch( '/domains/%(domain_id)s/config/ldap/url' % {'domain_id': self.domain['id']}, body={'config': new_config}, ) res = PROVIDERS.domain_config_api.get_config(self.domain['id']) expected_config = copy.deepcopy(self.config) expected_config['ldap']['url'] = new_config['url'] self.assertEqual(expected_config, r.result['config']) self.assertEqual(expected_config, res) def test_update_config_option_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{option}``. While updating Identity API-based domain config option with an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) new_config = {'url': uuid.uuid4().hex} invalid_domain_id = uuid.uuid4().hex self.patch( '/domains/%(domain_id)s/config/ldap/url' % {'domain_id': invalid_domain_id}, body={'config': new_config}, expected_status=exception.DomainNotFound.code, ) def test_update_config_invalid_option(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{invalid}``.""" PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) invalid_option = uuid.uuid4().hex new_config = {'ldap': {invalid_option: uuid.uuid4().hex}} # Trying to update an option that is neither whitelisted or sensitive # should result in Forbidden. self.patch( '/domains/%(domain_id)s/config/ldap/%(invalid_option)s' % { 'domain_id': self.domain['id'], 'invalid_option': invalid_option, }, body={'config': new_config}, expected_status=http.client.FORBIDDEN, ) # Trying to update a valid option, but one that is not in the current # config should result in NotFound new_config = {'suffix': uuid.uuid4().hex} self.patch( '/domains/%(domain_id)s/config/ldap/suffix' % {'domain_id': self.domain['id']}, body={'config': new_config}, expected_status=http.client.NOT_FOUND, ) def test_update_config_invalid_option_invalid_domain(self): """Call ``PATCH /domains/{domain_id}/config/{group}/{invalid}``. While updating Identity API-based domain config with an invalid option and an invalid domain id provided, the request shall be rejected with a response, 404 domain not found. """ PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) invalid_option = uuid.uuid4().hex new_config = {'ldap': {invalid_option: uuid.uuid4().hex}} invalid_domain_id = uuid.uuid4().hex self.patch( '/domains/%(domain_id)s/config/ldap/%(invalid_option)s' % { 'domain_id': invalid_domain_id, 'invalid_option': invalid_option, }, body={'config': new_config}, expected_status=exception.DomainNotFound.code, ) def test_get_head_config_default(self): """Call ``GET & HEAD /domains/config/default``.""" # Create a config that overrides a few of the options so that we can # check that only the defaults are returned. PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) url = '/domains/config/default' r = self.get(url) default_config = r.result['config'] for group in default_config: for option in default_config[group]: self.assertEqual( getattr(getattr(CONF, group), option), default_config[group][option], ) self.head(url, expected_status=http.client.OK) def test_get_head_config_default_by_group(self): """Call ``GET & HEAD /domains/config/{group}/default``.""" # Create a config that overrides a few of the options so that we can # check that only the defaults are returned. PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) url = '/domains/config/ldap/default' r = self.get(url) default_config = r.result['config'] for option in default_config['ldap']: self.assertEqual( getattr(CONF.ldap, option), default_config['ldap'][option] ) self.head(url, expected_status=http.client.OK) def test_get_head_config_default_by_option(self): """Call ``GET & HEAD /domains/config/{group}/{option}/default``.""" # Create a config that overrides a few of the options so that we can # check that only the defaults are returned. PROVIDERS.domain_config_api.create_config( self.domain['id'], self.config ) url = '/domains/config/ldap/url/default' r = self.get(url) default_config = r.result['config'] self.assertEqual(CONF.ldap.url, default_config['url']) self.head(url, expected_status=http.client.OK) def test_get_head_config_default_by_invalid_group(self): """Call ``GET & HEAD for /domains/config/{bad-group}/default``.""" # First try a valid group, but one we don't support for domain config self.get( '/domains/config/resource/default', expected_status=http.client.FORBIDDEN, ) self.head( '/domains/config/resource/default', expected_status=http.client.FORBIDDEN, ) # Now try a totally invalid group url = '/domains/config/%s/default' % uuid.uuid4().hex self.get(url, expected_status=http.client.FORBIDDEN) self.head(url, expected_status=http.client.FORBIDDEN) def test_get_head_config_default_for_unsupported_group(self): # It should not be possible to expose configuration information for # groups that the domain configuration API backlists explicitly. Doing # so would be a security vulnerability because it would leak sensitive # information over the API. self.get( '/domains/config/ldap/password/default', expected_status=http.client.FORBIDDEN, ) self.head( '/domains/config/ldap/password/default', expected_status=http.client.FORBIDDEN, ) def test_get_head_config_default_for_invalid_option(self): """Returning invalid configuration options is invalid.""" url = '/domains/config/ldap/%s/default' % uuid.uuid4().hex self.get(url, expected_status=http.client.FORBIDDEN) self.head(url, expected_status=http.client.FORBIDDEN) class SecurityRequirementsTestCase(test_v3.RestfulTestCase): def setUp(self): super().setUp() # Create a user in the default domain self.non_admin_user = unit.create_user( PROVIDERS.identity_api, CONF.identity.default_domain_id ) # Create an admin in the default domain self.admin_user = unit.create_user( PROVIDERS.identity_api, CONF.identity.default_domain_id ) # Create a project in the default domain and a non-admin role self.project = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(self.project['id'], self.project) self.non_admin_role = unit.new_role_ref(name='not_admin') PROVIDERS.role_api.create_role( self.non_admin_role['id'], self.non_admin_role ) # Give the non-admin user a role on the project PROVIDERS.assignment_api.add_role_to_user_and_project( self.non_admin_user['id'], self.project['id'], self.role['id'] ) # Give the user the admin role on the project, which is technically # `self.role` because RestfulTestCase sets that up for us. PROVIDERS.assignment_api.add_role_to_user_and_project( self.admin_user['id'], self.project['id'], self.role_id ) def _get_non_admin_token(self): non_admin_auth_data = self.build_authentication_request( user_id=self.non_admin_user['id'], password=self.non_admin_user['password'], project_id=self.project['id'], ) return self.get_requested_token(non_admin_auth_data) def _get_admin_token(self): non_admin_auth_data = self.build_authentication_request( user_id=self.admin_user['id'], password=self.admin_user['password'], project_id=self.project['id'], ) return self.get_requested_token(non_admin_auth_data) def test_get_head_security_compliance_config_for_default_domain(self): """Ask for all security compliance configuration options. Support for enforcing security compliance per domain currently doesn't exist. Make sure when we ask for security compliance information, it's only for the default domain and that it only returns whitelisted options. """ password_regex = uuid.uuid4().hex password_regex_description = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex=password_regex ) self.config_fixture.config( group='security_compliance', password_regex_description=password_regex_description, ) expected_response = { 'security_compliance': { 'password_regex': password_regex, 'password_regex_description': password_regex_description, } } url = '/domains/{domain_id}/config/{group}'.format( domain_id=CONF.identity.default_domain_id, group='security_compliance', ) # Make sure regular users and administrators can get security # requirement information. regular_response = self.get(url, token=self._get_non_admin_token()) self.assertEqual(regular_response.result['config'], expected_response) admin_response = self.get(url, token=self._get_admin_token()) self.assertEqual(admin_response.result['config'], expected_response) # Ensure HEAD requests behave the same way self.head( url, token=self._get_non_admin_token(), expected_status=http.client.OK, ) self.head( url, token=self._get_admin_token(), expected_status=http.client.OK ) def test_get_security_compliance_config_for_non_default_domain_fails(self): """Getting security compliance opts for other domains should fail. Support for enforcing security compliance rules per domain currently does not exist, so exposing security compliance information for any domain other than the default domain should not be allowed. """ # Create a new domain that is not the default domain domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Set the security compliance configuration options password_regex = uuid.uuid4().hex password_regex_description = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex=password_regex ) self.config_fixture.config( group='security_compliance', password_regex_description=password_regex_description, ) url = '/domains/{domain_id}/config/{group}'.format( domain_id=domain['id'], group='security_compliance', ) # Make sure regular users and administrators are forbidden from doing # this. self.get( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.get( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) # Ensure HEAD requests behave the same way self.head( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.head( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_get_non_whitelisted_security_compliance_opt_fails(self): """We only support exposing a subset of security compliance options. Given that security compliance information is sensitive in nature, we should make sure that only the options we want to expose are readable via the API. """ # Set a security compliance configuration that isn't whitelisted self.config_fixture.config( group='security_compliance', lockout_failure_attempts=1 ) url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group='security_compliance', option='lockout_failure_attempts', ) # Make sure regular users and administrators are unable to ask for # sensitive information. self.get( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.get( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) # Ensure HEAD requests behave the same way self.head( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.head( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_get_security_compliance_password_regex(self): """Ask for the security compliance password regular expression.""" password_regex = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex=password_regex ) group = 'security_compliance' option = 'password_regex' url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group=group, option=option, ) # Make sure regular users and administrators can ask for the # password regular expression. regular_response = self.get(url, token=self._get_non_admin_token()) self.assertEqual( regular_response.result['config'][option], password_regex ) admin_response = self.get(url, token=self._get_admin_token()) self.assertEqual( admin_response.result['config'][option], password_regex ) # Ensure HEAD requests behave the same way self.head( url, token=self._get_non_admin_token(), expected_status=http.client.OK, ) self.head( url, token=self._get_admin_token(), expected_status=http.client.OK ) def test_get_security_compliance_password_regex_description(self): """Ask for the security compliance password regex description.""" password_regex_description = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', password_regex_description=password_regex_description, ) group = 'security_compliance' option = 'password_regex_description' url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group=group, option=option, ) # Make sure regular users and administrators can ask for the # password regular expression. regular_response = self.get(url, token=self._get_non_admin_token()) self.assertEqual( regular_response.result['config'][option], password_regex_description, ) admin_response = self.get(url, token=self._get_admin_token()) self.assertEqual( admin_response.result['config'][option], password_regex_description ) # Ensure HEAD requests behave the same way self.head( url, token=self._get_non_admin_token(), expected_status=http.client.OK, ) self.head( url, token=self._get_admin_token(), expected_status=http.client.OK ) def test_get_security_compliance_password_regex_returns_none(self): """When an option isn't set, we should explicitly return None.""" group = 'security_compliance' option = 'password_regex' url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group=group, option=option, ) # Make sure regular users and administrators can ask for the password # regular expression, but since it isn't set the returned value should # be None. regular_response = self.get(url, token=self._get_non_admin_token()) self.assertIsNone(regular_response.result['config'][option]) admin_response = self.get(url, token=self._get_admin_token()) self.assertIsNone(admin_response.result['config'][option]) # Ensure HEAD requests behave the same way self.head( url, token=self._get_non_admin_token(), expected_status=http.client.OK, ) self.head( url, token=self._get_admin_token(), expected_status=http.client.OK ) def test_get_security_compliance_password_regex_desc_returns_none(self): """When an option isn't set, we should explicitly return None.""" group = 'security_compliance' option = 'password_regex_description' url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group=group, option=option, ) # Make sure regular users and administrators can ask for the password # regular expression description, but since it isn't set the returned # value should be None. regular_response = self.get(url, token=self._get_non_admin_token()) self.assertIsNone(regular_response.result['config'][option]) admin_response = self.get(url, token=self._get_admin_token()) self.assertIsNone(admin_response.result['config'][option]) # Ensure HEAD requests behave the same way self.head( url, token=self._get_non_admin_token(), expected_status=http.client.OK, ) self.head( url, token=self._get_admin_token(), expected_status=http.client.OK ) def test_get_security_compliance_config_with_user_from_other_domain(self): """Make sure users from other domains can access password requirements. Even though a user is in a separate domain, they should be able to see the security requirements for the deployment. This is because security compliance is not yet implemented on a per domain basis. Once that happens, then this should no longer be possible since a user should only care about the security compliance requirements for the domain that they are in. """ # Make a new domain domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Create a user in the new domain user = unit.create_user(PROVIDERS.identity_api, domain['id']) # Create a project in the new domain project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) # Give the new user a non-admin role on the project PROVIDERS.assignment_api.add_role_to_user_and_project( user['id'], project['id'], self.non_admin_role['id'] ) # Set our security compliance config values, we do this after we've # created our test user otherwise password validation will fail with a # uuid type regex. password_regex = uuid.uuid4().hex password_regex_description = uuid.uuid4().hex group = 'security_compliance' self.config_fixture.config(group=group, password_regex=password_regex) self.config_fixture.config( group=group, password_regex_description=password_regex_description ) # Get a token for the newly created user scoped to the project in the # non-default domain and use it to get the password security # requirements. user_token = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id'], ) user_token = self.get_requested_token(user_token) url = '/domains/{domain_id}/config/{group}'.format( domain_id=CONF.identity.default_domain_id, group=group, ) response = self.get(url, token=user_token) self.assertEqual( response.result['config'][group]['password_regex'], password_regex ) self.assertEqual( response.result['config'][group]['password_regex_description'], password_regex_description, ) # Ensure HEAD requests behave the same way self.head(url, token=user_token, expected_status=http.client.OK) def test_update_security_compliance_config_group_fails(self): """Make sure that updates to the entire security group section fail. We should only allow the ability to modify a deployments security compliance rules through configuration. Especially since it's only enforced on the default domain. """ new_config = { 'security_compliance': { 'password_regex': uuid.uuid4().hex, 'password_regex_description': uuid.uuid4().hex, } } url = '/domains/{domain_id}/config/{group}'.format( domain_id=CONF.identity.default_domain_id, group='security_compliance', ) # Make sure regular users and administrators aren't allowed to modify # security compliance configuration through the API. self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_update_security_compliance_password_regex_fails(self): """Make sure any updates to security compliance options fail.""" group = 'security_compliance' option = 'password_regex' url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group=group, option=option, ) new_config = {group: {option: uuid.uuid4().hex}} # Make sure regular users and administrators aren't allowed to modify # security compliance configuration through the API. self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_update_security_compliance_password_regex_description_fails(self): """Make sure any updates to security compliance options fail.""" group = 'security_compliance' option = 'password_regex_description' url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group=group, option=option, ) new_config = {group: {option: uuid.uuid4().hex}} # Make sure regular users and administrators aren't allowed to modify # security compliance configuration through the API. self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_update_non_whitelisted_security_compliance_option_fails(self): """Updating security compliance options through the API is not allowed. Requests to update anything in the security compliance group through the API should be Forbidden. This ensures that we are covering cases where the option being updated isn't in the white list. """ group = 'security_compliance' option = 'lockout_failure_attempts' url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group=group, option=option, ) new_config = {group: {option: 1}} # Make sure this behavior is not possible for regular users or # administrators. self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.patch( url, body={'config': new_config}, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_delete_security_compliance_group_fails(self): """The security compliance group shouldn't be deleteable.""" url = '/domains/{domain_id}/config/{group}/'.format( domain_id=CONF.identity.default_domain_id, group='security_compliance', ) # Make sure regular users and administrators can't delete the security # compliance configuration group. self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_delete_security_compliance_password_regex_fails(self): """The security compliance options shouldn't be deleteable.""" url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group='security_compliance', option='password_regex', ) # Make sure regular users and administrators can't delete the security # compliance configuration group. self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_delete_security_compliance_password_regex_description_fails(self): """The security compliance options shouldn't be deleteable.""" url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group='security_compliance', option='password_regex_description', ) # Make sure regular users and administrators can't delete the security # compliance configuration group. self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) def test_delete_non_whitelisted_security_compliance_options_fails(self): """The security compliance options shouldn't be deleteable.""" url = '/domains/{domain_id}/config/{group}/{option}'.format( domain_id=CONF.identity.default_domain_id, group='security_compliance', option='lockout_failure_attempts', ) # Make sure regular users and administrators can't delete the security # compliance configuration group. self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_non_admin_token(), ) self.delete( url, expected_status=http.client.FORBIDDEN, token=self._get_admin_token(), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_endpoint_policy.py0000664000175000017500000002336400000000000024675 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client from testtools import matchers from keystone.common import provider_api from keystone.tests import unit from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs class EndpointPolicyTestCase(test_v3.RestfulTestCase): """Test endpoint policy CRUD. In general, the controller layer of the endpoint policy extension is really just marshalling the data around the underlying manager calls. Given that the manager layer is tested in depth by the backend tests, the tests we execute here concentrate on ensuring we are correctly passing and presenting the data. """ def setUp(self): super().setUp() self.policy = unit.new_policy_ref() PROVIDERS.policy_api.create_policy(self.policy['id'], self.policy) self.service = unit.new_service_ref() PROVIDERS.catalog_api.create_service(self.service['id'], self.service) self.endpoint = unit.new_endpoint_ref( self.service['id'], enabled=True, interface='public', region_id=self.region_id, ) PROVIDERS.catalog_api.create_endpoint( self.endpoint['id'], self.endpoint ) self.region = unit.new_region_ref() PROVIDERS.catalog_api.create_region(self.region) def assert_head_and_get_return_same_response(self, url, expected_status): self.get(url, expected_status=expected_status) self.head(url, expected_status=expected_status) # endpoint policy crud tests def _crud_test(self, url): # Test when the resource does not exist also ensures # that there is not a false negative after creation. self.assert_head_and_get_return_same_response( url, expected_status=http.client.NOT_FOUND ) self.put(url) # test that the new resource is accessible. self.assert_head_and_get_return_same_response( url, expected_status=http.client.NO_CONTENT ) self.delete(url) # test that the deleted resource is no longer accessible self.assert_head_and_get_return_same_response( url, expected_status=http.client.NOT_FOUND ) def test_crud_for_policy_for_explicit_endpoint(self): """PUT, HEAD and DELETE for explicit endpoint policy.""" url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints/%(endpoint_id)s' ) % { 'policy_id': self.policy['id'], 'endpoint_id': self.endpoint['id'], } self._crud_test(url) def test_crud_for_policy_for_service(self): """PUT, HEAD and DELETE for service endpoint policy.""" url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s' ) % {'policy_id': self.policy['id'], 'service_id': self.service['id']} self._crud_test(url) def test_crud_for_policy_for_region_and_service(self): """PUT, HEAD and DELETE for region and service endpoint policy.""" url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s/regions/%(region_id)s' ) % { 'policy_id': self.policy['id'], 'service_id': self.service['id'], 'region_id': self.region['id'], } self._crud_test(url) def test_get_policy_for_endpoint(self): """GET /endpoints/{endpoint_id}/policy.""" self.put( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints/%(endpoint_id)s' % { 'policy_id': self.policy['id'], 'endpoint_id': self.endpoint['id'], } ) self.head( '/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY' '/policy' % {'endpoint_id': self.endpoint['id']}, expected_status=http.client.OK, ) r = self.get( '/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY' '/policy' % {'endpoint_id': self.endpoint['id']} ) self.assertValidPolicyResponse(r, ref=self.policy) def test_list_endpoints_for_policy(self): """GET & HEAD /policies/%(policy_id}/endpoints.""" url = '/policies/{policy_id}/OS-ENDPOINT-POLICY/endpoints'.format( policy_id=self.policy['id'] ) self.put(url + '/' + self.endpoint['id']) r = self.get(url) self.assertValidEndpointListResponse(r, ref=self.endpoint) self.assertThat(r.result.get('endpoints'), matchers.HasLength(1)) self.head(url, expected_status=http.client.OK) def test_endpoint_association_cleanup_when_endpoint_deleted(self): url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/endpoints/%(endpoint_id)s' ) % { 'policy_id': self.policy['id'], 'endpoint_id': self.endpoint['id'], } self.put(url) self.head(url) self.delete( '/endpoints/{endpoint_id}'.format(endpoint_id=self.endpoint['id']) ) self.head(url, expected_status=http.client.NOT_FOUND) def test_region_service_association_cleanup_when_region_deleted(self): url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s/regions/%(region_id)s' ) % { 'policy_id': self.policy['id'], 'service_id': self.service['id'], 'region_id': self.region['id'], } self.put(url) self.head(url) self.delete('/regions/{region_id}'.format(region_id=self.region['id'])) self.head(url, expected_status=http.client.NOT_FOUND) def test_region_service_association_cleanup_when_service_deleted(self): url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s/regions/%(region_id)s' ) % { 'policy_id': self.policy['id'], 'service_id': self.service['id'], 'region_id': self.region['id'], } self.put(url) self.head(url) self.delete( '/services/{service_id}'.format(service_id=self.service['id']) ) self.head(url, expected_status=http.client.NOT_FOUND) def test_service_association_cleanup_when_service_deleted(self): url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s' ) % {'policy_id': self.policy['id'], 'service_id': self.service['id']} self.put(url) self.get(url, expected_status=http.client.NO_CONTENT) self.delete( '/policies/{policy_id}'.format(policy_id=self.policy['id']) ) self.head(url, expected_status=http.client.NOT_FOUND) def test_service_association_cleanup_when_policy_deleted(self): url = ( '/policies/%(policy_id)s/OS-ENDPOINT-POLICY' '/services/%(service_id)s' ) % {'policy_id': self.policy['id'], 'service_id': self.service['id']} self.put(url) self.get(url, expected_status=http.client.NO_CONTENT) self.delete( '/services/{service_id}'.format(service_id=self.service['id']) ) self.head(url, expected_status=http.client.NOT_FOUND) class JsonHomeTests(test_v3.JsonHomeTestMixin): EXTENSION_LOCATION = ( 'https://docs.openstack.org/api/openstack-identity/3' '/ext/OS-ENDPOINT-POLICY/1.0/rel' ) PARAM_LOCATION = ( 'https://docs.openstack.org/api/openstack-identity/3/param' ) JSON_HOME_DATA = { EXTENSION_LOCATION + '/endpoint_policy': { 'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/' 'policy', 'href-vars': { 'endpoint_id': PARAM_LOCATION + '/endpoint_id', }, }, EXTENSION_LOCATION + '/policy_endpoints': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', }, }, EXTENSION_LOCATION + '/endpoint_policy_association': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'endpoints/{endpoint_id}', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', 'endpoint_id': PARAM_LOCATION + '/endpoint_id', }, }, EXTENSION_LOCATION + '/service_policy_association': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', 'service_id': PARAM_LOCATION + '/service_id', }, }, EXTENSION_LOCATION + '/region_and_service_policy_association': { 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/' 'services/{service_id}/regions/{region_id}', 'href-vars': { 'policy_id': PARAM_LOCATION + '/policy_id', 'service_id': PARAM_LOCATION + '/service_id', 'region_id': PARAM_LOCATION + '/region_id', }, }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_federation.py0000664000175000017500000060613300000000000023617 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import http.client import os import random import re import subprocess from unittest import mock import urllib import uuid import fixtures import flask from lxml import etree from oslo_serialization import jsonutils import saml2 from saml2 import saml from saml2 import sigver from saml2 import xmldsig from testtools import matchers from keystone.api._shared import authentication from keystone.api import auth as auth_api from keystone.common import driver_hints from keystone.common import provider_api from keystone.common import render_token import keystone.conf from keystone import exception from keystone.federation import idp as keystone_idp from keystone.models import token_model from keystone import notifications from keystone.tests import unit from keystone.tests.unit import core from keystone.tests.unit import federation_fixtures from keystone.tests.unit import ksfixtures from keystone.tests.unit import mapping_fixtures from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs ROOTDIR = os.path.dirname(os.path.abspath(__file__)) XMLDIR = os.path.join(ROOTDIR, 'saml2/') def dummy_validator(*args, **kwargs): pass class FederatedSetupMixin: ACTION = 'authenticate' IDP = 'ORG_IDP' PROTOCOL = 'saml2' AUTH_METHOD = 'saml2' USER = 'user@ORGANIZATION' ASSERTION_PREFIX = 'PREFIX_' IDP_WITH_REMOTE = 'ORG_IDP_REMOTE' REMOTE_IDS = ['entityID_IDP1', 'entityID_IDP2'] REMOTE_ID_ATTR = uuid.uuid4().hex UNSCOPED_V3_SAML2_REQ = { "identity": { "methods": [AUTH_METHOD], AUTH_METHOD: {"identity_provider": IDP, "protocol": PROTOCOL}, } } def _check_domains_are_valid(self, token): domain = PROVIDERS.resource_api.get_domain(self.idp['domain_id']) self.assertEqual(domain['id'], token['user']['domain']['id']) self.assertEqual(domain['name'], token['user']['domain']['name']) def _project(self, project): return (project['id'], project['name']) def _roles(self, roles): return {(r['id'], r['name']) for r in roles} def _check_projects_and_roles(self, token, roles, projects): """Check whether the projects and the roles match.""" token_roles = token.get('roles') if token_roles is None: raise AssertionError('Roles not found in the token') token_roles = self._roles(token_roles) roles_ref = self._roles(roles) self.assertEqual(token_roles, roles_ref) token_projects = token.get('project') if token_projects is None: raise AssertionError('Projects not found in the token') token_projects = self._project(token_projects) projects_ref = self._project(projects) self.assertEqual(token_projects, projects_ref) def _check_scoped_token_attributes(self, token): for obj in ( 'user', 'catalog', 'expires_at', 'issued_at', 'methods', 'roles', ): self.assertIn(obj, token) os_federation = token['user']['OS-FEDERATION'] self.assertIn('groups', os_federation) self.assertIn('identity_provider', os_federation) self.assertIn('protocol', os_federation) self.assertThat(os_federation, matchers.HasLength(3)) self.assertEqual(self.IDP, os_federation['identity_provider']['id']) self.assertEqual(self.PROTOCOL, os_federation['protocol']['id']) def _check_project_scoped_token_attributes(self, token, project_id): self.assertEqual(project_id, token['project']['id']) self._check_scoped_token_attributes(token) def _check_domain_scoped_token_attributes(self, token, domain_id): self.assertEqual(domain_id, token['domain']['id']) self._check_scoped_token_attributes(token) def assertValidMappedUser(self, token): """Check if user object meets all the criteria.""" user = token['user'] self.assertIn('id', user) self.assertIn('name', user) self.assertIn('domain', user) self.assertIn('groups', user['OS-FEDERATION']) self.assertIn('identity_provider', user['OS-FEDERATION']) self.assertIn('protocol', user['OS-FEDERATION']) # Make sure user_name is url safe self.assertEqual(urllib.parse.quote(user['name']), user['name']) def _issue_unscoped_token( self, idp=None, assertion='EMPLOYEE_ASSERTION', environment=None ): environment = environment or {} environment.update(getattr(mapping_fixtures, assertion)) with self.make_request(environ=environment): if idp is None: idp = self.IDP r = authentication.federated_authenticate_for_token( protocol_id=self.PROTOCOL, identity_provider=idp ) return r def idp_ref(self, id=None): idp = { 'id': id or uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, } return idp def proto_ref(self, mapping_id=None): proto = { 'id': uuid.uuid4().hex, 'mapping_id': mapping_id or uuid.uuid4().hex, } return proto def mapping_ref(self, rules=None): return { 'id': uuid.uuid4().hex, 'rules': rules or self.rules['rules'], 'schema_version': "1.0", } def _scope_request(self, unscoped_token_id, scope, scope_id): return { 'auth': { 'identity': { 'methods': ['token'], 'token': {'id': unscoped_token_id}, }, 'scope': {scope: {'id': scope_id}}, } } def _inject_assertion(self, variant): assertion = getattr(mapping_fixtures, variant) flask.request.environ.update(assertion) def load_federation_sample_data(self): """Inject additional data.""" # Create and add domains self.domainA = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainA['id'], self.domainA) self.domainB = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainB['id'], self.domainB) self.domainC = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainC['id'], self.domainC) self.domainD = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domainD['id'], self.domainD) # Create and add projects self.proj_employees = unit.new_project_ref( domain_id=self.domainA['id'] ) PROVIDERS.resource_api.create_project( self.proj_employees['id'], self.proj_employees ) self.proj_customers = unit.new_project_ref( domain_id=self.domainA['id'] ) PROVIDERS.resource_api.create_project( self.proj_customers['id'], self.proj_customers ) self.project_all = unit.new_project_ref(domain_id=self.domainA['id']) PROVIDERS.resource_api.create_project( self.project_all['id'], self.project_all ) self.project_inherited = unit.new_project_ref( domain_id=self.domainD['id'] ) PROVIDERS.resource_api.create_project( self.project_inherited['id'], self.project_inherited ) # Create and add groups self.group_employees = unit.new_group_ref(domain_id=self.domainA['id']) self.group_employees = PROVIDERS.identity_api.create_group( self.group_employees ) self.group_customers = unit.new_group_ref(domain_id=self.domainA['id']) self.group_customers = PROVIDERS.identity_api.create_group( self.group_customers ) self.group_admins = unit.new_group_ref(domain_id=self.domainA['id']) self.group_admins = PROVIDERS.identity_api.create_group( self.group_admins ) # Create and add roles self.role_employee = unit.new_role_ref() PROVIDERS.role_api.create_role( self.role_employee['id'], self.role_employee ) self.role_customer = unit.new_role_ref() PROVIDERS.role_api.create_role( self.role_customer['id'], self.role_customer ) self.role_admin = unit.new_role_ref() PROVIDERS.role_api.create_role(self.role_admin['id'], self.role_admin) # Employees can access # * proj_employees # * project_all PROVIDERS.assignment_api.create_grant( self.role_employee['id'], group_id=self.group_employees['id'], project_id=self.proj_employees['id'], ) PROVIDERS.assignment_api.create_grant( self.role_employee['id'], group_id=self.group_employees['id'], project_id=self.project_all['id'], ) # Customers can access # * proj_customers PROVIDERS.assignment_api.create_grant( self.role_customer['id'], group_id=self.group_customers['id'], project_id=self.proj_customers['id'], ) # Admins can access: # * proj_customers # * proj_employees # * project_all PROVIDERS.assignment_api.create_grant( self.role_admin['id'], group_id=self.group_admins['id'], project_id=self.proj_customers['id'], ) PROVIDERS.assignment_api.create_grant( self.role_admin['id'], group_id=self.group_admins['id'], project_id=self.proj_employees['id'], ) PROVIDERS.assignment_api.create_grant( self.role_admin['id'], group_id=self.group_admins['id'], project_id=self.project_all['id'], ) # Customers can access: # * domain A PROVIDERS.assignment_api.create_grant( self.role_customer['id'], group_id=self.group_customers['id'], domain_id=self.domainA['id'], ) # Customers can access projects via inheritance: # * domain D PROVIDERS.assignment_api.create_grant( self.role_customer['id'], group_id=self.group_customers['id'], domain_id=self.domainD['id'], inherited_to_projects=True, ) # Employees can access: # * domain A # * domain B PROVIDERS.assignment_api.create_grant( self.role_employee['id'], group_id=self.group_employees['id'], domain_id=self.domainA['id'], ) PROVIDERS.assignment_api.create_grant( self.role_employee['id'], group_id=self.group_employees['id'], domain_id=self.domainB['id'], ) # Admins can access: # * domain A # * domain B # * domain C PROVIDERS.assignment_api.create_grant( self.role_admin['id'], group_id=self.group_admins['id'], domain_id=self.domainA['id'], ) PROVIDERS.assignment_api.create_grant( self.role_admin['id'], group_id=self.group_admins['id'], domain_id=self.domainB['id'], ) PROVIDERS.assignment_api.create_grant( self.role_admin['id'], group_id=self.group_admins['id'], domain_id=self.domainC['id'], ) self.rules = { 'rules': [ { 'local': [ {'group': {'id': self.group_employees['id']}}, {'user': {'name': '{0}', 'id': '{1}'}}, ], 'remote': [ {'type': 'UserName'}, { 'type': 'Email', }, {'type': 'orgPersonType', 'any_one_of': ['Employee']}, ], }, { 'local': [ {'group': {'id': self.group_employees['id']}}, {'user': {'name': '{0}', 'id': '{1}'}}, ], 'remote': [ {'type': self.ASSERTION_PREFIX + 'UserName'}, { 'type': self.ASSERTION_PREFIX + 'Email', }, { 'type': self.ASSERTION_PREFIX + 'orgPersonType', 'any_one_of': ['SuperEmployee'], }, ], }, { 'local': [ {'group': {'id': self.group_customers['id']}}, {'user': {'name': '{0}', 'id': '{1}'}}, ], 'remote': [ {'type': 'UserName'}, {'type': 'Email'}, {'type': 'orgPersonType', 'any_one_of': ['Customer']}, ], }, { 'local': [ {'group': {'id': self.group_admins['id']}}, {'group': {'id': self.group_employees['id']}}, {'group': {'id': self.group_customers['id']}}, {'user': {'name': '{0}', 'id': '{1}'}}, ], 'remote': [ {'type': 'UserName'}, {'type': 'Email'}, { 'type': 'orgPersonType', 'any_one_of': ['Admin', 'Chief'], }, ], }, { 'local': [ {'group': {'id': uuid.uuid4().hex}}, {'group': {'id': self.group_customers['id']}}, {'user': {'name': '{0}', 'id': '{1}'}}, ], 'remote': [ { 'type': 'UserName', }, { 'type': 'Email', }, {'type': 'FirstName', 'any_one_of': ['Jill']}, {'type': 'LastName', 'any_one_of': ['Smith']}, ], }, { 'local': [ {'group': {'id': 'this_group_no_longer_exists'}}, {'user': {'name': '{0}', 'id': '{1}'}}, ], 'remote': [ { 'type': 'UserName', }, { 'type': 'Email', }, { 'type': 'Email', 'any_one_of': ['testacct@example.com'], }, {'type': 'orgPersonType', 'any_one_of': ['Tester']}, ], }, # rules with local group names { "local": [ {'user': {'name': '{0}', 'id': '{1}'}}, { "group": { "name": self.group_customers['name'], "domain": {"name": self.domainA['name']}, } }, ], "remote": [ { 'type': 'UserName', }, { 'type': 'Email', }, { "type": "orgPersonType", "any_one_of": ["CEO", "CTO"], }, ], }, { "local": [ {'user': {'name': '{0}', 'id': '{1}'}}, { "group": { "name": self.group_admins['name'], "domain": {"id": self.domainA['id']}, } }, ], "remote": [ { "type": "UserName", }, { "type": "Email", }, {"type": "orgPersonType", "any_one_of": ["Managers"]}, ], }, { "local": [ {"user": {"name": "{0}", "id": "{1}"}}, { "group": { "name": "NON_EXISTING", "domain": {"id": self.domainA['id']}, } }, ], "remote": [ { "type": "UserName", }, { "type": "Email", }, {"type": "UserName", "any_one_of": ["IamTester"]}, ], }, { "local": [ { "user": { "type": "local", "name": self.user['name'], "domain": {"id": self.user['domain_id']}, } }, {"group": {"id": self.group_customers['id']}}, ], "remote": [{"type": "UserType", "any_one_of": ["random"]}], }, { "local": [ { "user": { "type": "local", "name": self.user['name'], "domain": {"id": uuid.uuid4().hex}, } } ], "remote": [ {"type": "Position", "any_one_of": ["DirectorGeneral"]} ], }, # rules for users with no groups { "local": [{'user': {'name': '{0}', 'id': '{1}'}}], "remote": [ { 'type': 'UserName', }, { 'type': 'Email', }, { 'type': 'orgPersonType', 'any_one_of': ['NoGroupsOrg'], }, ], }, ] } # Add unused IdP first so it is indexed first (#1838592) self.dummy_idp = self.idp_ref() PROVIDERS.federation_api.create_idp( self.dummy_idp['id'], self.dummy_idp ) # Add IDP self.idp = self.idp_ref(id=self.IDP) PROVIDERS.federation_api.create_idp(self.idp['id'], self.idp) # Add IDP with remote self.idp_with_remote = self.idp_ref(id=self.IDP_WITH_REMOTE) self.idp_with_remote['remote_ids'] = self.REMOTE_IDS PROVIDERS.federation_api.create_idp( self.idp_with_remote['id'], self.idp_with_remote ) # Add a mapping self.mapping = self.mapping_ref() PROVIDERS.federation_api.create_mapping( self.mapping['id'], self.mapping ) # Add protocols self.proto_saml = self.proto_ref(mapping_id=self.mapping['id']) self.proto_saml['id'] = self.PROTOCOL PROVIDERS.federation_api.create_protocol( self.idp['id'], self.proto_saml['id'], self.proto_saml ) # Add protocols IDP with remote PROVIDERS.federation_api.create_protocol( self.idp_with_remote['id'], self.proto_saml['id'], self.proto_saml ) # Add unused protocol to go with unused IdP (#1838592) self.proto_dummy = self.proto_ref(mapping_id=self.mapping['id']) PROVIDERS.federation_api.create_protocol( self.dummy_idp['id'], self.proto_dummy['id'], self.proto_dummy ) with self.make_request(): self.tokens = {} VARIANTS = ( 'EMPLOYEE_ASSERTION', 'CUSTOMER_ASSERTION', 'ADMIN_ASSERTION', ) for variant in VARIANTS: self._inject_assertion(variant) r = authentication.authenticate_for_token( self.UNSCOPED_V3_SAML2_REQ ) self.tokens[variant] = r.id self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN = ( self._scope_request( uuid.uuid4().hex, 'project', self.proj_customers['id'] ) ) self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE = ( self._scope_request( self.tokens['EMPLOYEE_ASSERTION'], 'project', self.proj_employees['id'], ) ) self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'project', self.proj_employees['id'], ) self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'project', self.proj_customers['id'], ) self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER = ( self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'project', self.proj_employees['id'], ) ) self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER = ( self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'project', self.project_inherited['id'], ) ) self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainA['id'] ) self.TOKEN_SCOPE_DOMAIN_B_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainB['id'] ) self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER = self._scope_request( self.tokens['CUSTOMER_ASSERTION'], 'domain', self.domainD['id'] ) self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'domain', self.domainA['id'] ) self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'domain', self.domainB['id'] ) self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN = self._scope_request( self.tokens['ADMIN_ASSERTION'], 'domain', self.domainC['id'] ) class FederatedIdentityProviderTests(test_v3.RestfulTestCase): """A test class for Identity Providers.""" idp_keys = ['description', 'enabled'] default_body = {'description': None, 'enabled': True} def base_url(self, suffix=None): if suffix is not None: return '/OS-FEDERATION/identity_providers/' + str(suffix) return '/OS-FEDERATION/identity_providers' def _fetch_attribute_from_response( self, resp, parameter, assert_is_not_none=True ): """Fetch single attribute from TestResponse object.""" result = resp.result.get(parameter) if assert_is_not_none: self.assertIsNotNone(result) return result def _create_and_decapsulate_response(self, body=None): """Create IdP and fetch it's random id along with entity.""" default_resp = self._create_default_idp(body=body) idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) self.assertIsNotNone(idp) idp_id = idp.get('id') return (idp_id, idp) def _get_idp(self, idp_id): """Fetch IdP entity based on its id.""" url = self.base_url(suffix=idp_id) resp = self.get(url) return resp def _create_default_idp( self, body=None, expected_status=http.client.CREATED ): """Create default IdP.""" url = self.base_url(suffix=uuid.uuid4().hex) if body is None: body = self._http_idp_input() resp = self.put( url, body={'identity_provider': body}, expected_status=expected_status, ) return resp def _http_idp_input(self): """Create default input dictionary for IdP data.""" body = self.default_body.copy() body['description'] = uuid.uuid4().hex return body def _assign_protocol_to_idp( self, idp_id=None, proto=None, url=None, mapping_id=None, validate=True, **kwargs, ): if url is None: url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') if idp_id is None: idp_id, _ = self._create_and_decapsulate_response() if proto is None: proto = uuid.uuid4().hex if mapping_id is None: mapping_id = uuid.uuid4().hex self._create_mapping(mapping_id) body = {'mapping_id': mapping_id} url = url % {'idp_id': idp_id, 'protocol_id': proto} resp = self.put(url, body={'protocol': body}, **kwargs) if validate: self.assertValidResponse( resp, 'protocol', dummy_validator, keys_to_check=['id', 'mapping_id'], ref={'id': proto, 'mapping_id': mapping_id}, ) return (resp, idp_id, proto) def _get_protocol(self, idp_id, protocol_id): url = f'{idp_id}/protocols/{protocol_id}' url = self.base_url(suffix=url) r = self.get(url) return r def _create_mapping(self, mapping_id): mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER mapping['id'] = mapping_id url = '/OS-FEDERATION/mappings/%s' % mapping_id self.put( url, body={'mapping': mapping}, expected_status=http.client.CREATED ) def assertIdpDomainCreated(self, idp_id, domain_id): domain = PROVIDERS.resource_api.get_domain(domain_id) self.assertEqual(domain_id, domain['name']) self.assertIn(idp_id, domain['description']) def test_create_idp_without_domain_id(self): """Create the IdentityProvider entity associated to remote_ids.""" keys_to_check = list(self.idp_keys) body = self.default_body.copy() body['description'] = uuid.uuid4().hex resp = self._create_default_idp(body=body) self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body, ) attr = self._fetch_attribute_from_response(resp, 'identity_provider') self.assertIdpDomainCreated(attr['id'], attr['domain_id']) def test_create_idp_with_domain_id(self): keys_to_check = list(self.idp_keys) keys_to_check.append('domain_id') body = self.default_body.copy() body['description'] = uuid.uuid4().hex domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) body['domain_id'] = domain['id'] resp = self._create_default_idp(body=body) self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body, ) def test_create_idp_domain_id_none(self): keys_to_check = list(self.idp_keys) body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['domain_id'] = None resp = self._create_default_idp(body=body) self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body, ) attr = self._fetch_attribute_from_response(resp, 'identity_provider') self.assertIdpDomainCreated(attr['id'], attr['domain_id']) def test_conflicting_idp_cleans_up_auto_generated_domain(self): # NOTE(lbragstad): Create an identity provider, save its ID, and count # the number of domains. resp = self._create_default_idp() idp_id = resp.json_body['identity_provider']['id'] domains = PROVIDERS.resource_api.list_domains() number_of_domains = len(domains) # Create an identity provider with the same ID to intentionally cause a # conflict, this is going to result in a domain getting created for the # new identity provider. The domain for the new identity provider is # going to be created before the conflict is raised from the database # layer. This makes sure the domain is cleaned up after a Conflict is # detected. resp = self.put( self.base_url(suffix=idp_id), body={'identity_provider': self.default_body.copy()}, expected_status=http.client.CONFLICT, ) domains = PROVIDERS.resource_api.list_domains() self.assertEqual(number_of_domains, len(domains)) def test_conflicting_idp_does_not_delete_existing_domain(self): # Create a new domain domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) # Create an identity provider and specify the domain body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['domain_id'] = domain['id'] resp = self._create_default_idp(body=body) idp = resp.json_body['identity_provider'] idp_id = idp['id'] self.assertEqual(idp['domain_id'], domain['id']) # Create an identity provider with the same domain and ID to ensure a # Conflict is raised and then to verify the existing domain not deleted # below body = self.default_body.copy() body['domain_id'] = domain['id'] resp = self.put( self.base_url(suffix=idp_id), body={'identity_provider': body}, expected_status=http.client.CONFLICT, ) # Make sure the domain specified in the second request was not deleted, # since it wasn't auto-generated self.assertIsNotNone(PROVIDERS.resource_api.get_domain(domain['id'])) def test_create_multi_idp_to_one_domain(self): # create domain and add domain_id to keys to check domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) keys_to_check = list(self.idp_keys) keys_to_check.append('domain_id') # create idp with the domain_id body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['domain_id'] = domain['id'] idp1 = self._create_default_idp(body=body) self.assertValidResponse( idp1, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body, ) # create a 2nd idp with the same domain_id url = self.base_url(suffix=uuid.uuid4().hex) body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['domain_id'] = domain['id'] idp2 = self.put( url, body={'identity_provider': body}, expected_status=http.client.CREATED, ) self.assertValidResponse( idp2, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body, ) self.assertEqual( idp1.result['identity_provider']['domain_id'], idp2.result['identity_provider']['domain_id'], ) def test_cannot_update_idp_domain(self): # create new idp body = self.default_body.copy() default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') self.assertIsNotNone(idp_id) # create domain and try to update the idp's domain domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) body['domain_id'] = domain['id'] body = {'identity_provider': body} url = self.base_url(suffix=idp_id) self.patch(url, body=body, expected_status=http.client.BAD_REQUEST) def test_create_idp_with_nonexistent_domain_id_fails(self): body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['domain_id'] = uuid.uuid4().hex self._create_default_idp( body=body, expected_status=http.client.NOT_FOUND ) def test_create_idp_remote(self): """Create the IdentityProvider entity associated to remote_ids.""" keys_to_check = list(self.idp_keys) keys_to_check.append('remote_ids') body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['remote_ids'] = [ uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex, ] resp = self._create_default_idp(body=body) self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body, ) attr = self._fetch_attribute_from_response(resp, 'identity_provider') self.assertIdpDomainCreated(attr['id'], attr['domain_id']) def test_create_idp_remote_repeated(self): """Create two IdentityProvider entities with some remote_ids. A remote_id is the same for both so the second IdP is not created because of the uniqueness of the remote_ids Expect HTTP 409 Conflict code for the latter call. """ body = self.default_body.copy() repeated_remote_id = uuid.uuid4().hex body['remote_ids'] = [ uuid.uuid4().hex, uuid.uuid4().hex, uuid.uuid4().hex, repeated_remote_id, ] self._create_default_idp(body=body) url = self.base_url(suffix=uuid.uuid4().hex) body['remote_ids'] = [uuid.uuid4().hex, repeated_remote_id] resp = self.put( url, body={'identity_provider': body}, expected_status=http.client.CONFLICT, ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Duplicate remote ID', resp_data.get('error', {}).get('message') ) def test_create_idp_remote_empty(self): """Create an IdP with empty remote_ids.""" keys_to_check = list(self.idp_keys) keys_to_check.append('remote_ids') body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['remote_ids'] = [] resp = self._create_default_idp(body=body) self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=body, ) def test_create_idp_remote_none(self): """Create an IdP with a None remote_ids.""" keys_to_check = list(self.idp_keys) keys_to_check.append('remote_ids') body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['remote_ids'] = None resp = self._create_default_idp(body=body) expected = body.copy() expected['remote_ids'] = [] self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=expected, ) def test_create_idp_authorization_ttl(self): keys_to_check = list(self.idp_keys) keys_to_check.append('authorization_ttl') body = self.default_body.copy() body['description'] = uuid.uuid4().hex body['authorization_ttl'] = 10080 resp = self._create_default_idp(body) expected = body.copy() self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=keys_to_check, ref=expected, ) def test_update_idp_remote_ids(self): """Update IdP's remote_ids parameter.""" body = self.default_body.copy() body['remote_ids'] = [uuid.uuid4().hex] default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) self.assertIsNotNone(idp_id) body['remote_ids'] = [uuid.uuid4().hex, uuid.uuid4().hex] body = {'identity_provider': body} resp = self.patch(url, body=body) updated_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) body = body['identity_provider'] self.assertEqual( sorted(body['remote_ids']), sorted(updated_idp.get('remote_ids')) ) resp = self.get(url) returned_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) self.assertEqual( sorted(body['remote_ids']), sorted(returned_idp.get('remote_ids')) ) def test_update_idp_clean_remote_ids(self): """Update IdP's remote_ids parameter with an empty list.""" body = self.default_body.copy() body['remote_ids'] = [uuid.uuid4().hex] default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) self.assertIsNotNone(idp_id) body['remote_ids'] = [] body = {'identity_provider': body} resp = self.patch(url, body=body) updated_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) body = body['identity_provider'] self.assertEqual( sorted(body['remote_ids']), sorted(updated_idp.get('remote_ids')) ) resp = self.get(url) returned_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) self.assertEqual( sorted(body['remote_ids']), sorted(returned_idp.get('remote_ids')) ) def test_update_idp_remote_repeated(self): """Update an IdentityProvider entity reusing a remote_id. A remote_id is the same for both so the second IdP is not updated because of the uniqueness of the remote_ids. Expect HTTP 409 Conflict code for the latter call. """ # Create first identity provider body = self.default_body.copy() repeated_remote_id = uuid.uuid4().hex body['remote_ids'] = [uuid.uuid4().hex, repeated_remote_id] self._create_default_idp(body=body) # Create second identity provider (without remote_ids) body = self.default_body.copy() default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) body['remote_ids'] = [repeated_remote_id] resp = self.patch( url, body={'identity_provider': body}, expected_status=http.client.CONFLICT, ) resp_data = jsonutils.loads(resp.body) self.assertIn('Duplicate remote ID', resp_data['error']['message']) def test_update_idp_authorization_ttl(self): body = self.default_body.copy() body['authorization_ttl'] = 10080 default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) self.assertIsNotNone(idp_id) body['authorization_ttl'] = None body = {'identity_provider': body} resp = self.patch(url, body=body) updated_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) body = body['identity_provider'] self.assertEqual( body['authorization_ttl'], updated_idp.get('authorization_ttl') ) resp = self.get(url) returned_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) self.assertEqual( body['authorization_ttl'], returned_idp.get('authorization_ttl') ) def test_list_head_idps(self, iterations=5): """List all available IdentityProviders. This test collects ids of created IdPs and intersects it with the list of all available IdPs. List of all IdPs can be a superset of IdPs created in this test, because other tests also create IdPs. """ def get_id(resp): r = self._fetch_attribute_from_response(resp, 'identity_provider') return r.get('id') ids = [] for _ in range(iterations): id = get_id(self._create_default_idp()) ids.append(id) ids = set(ids) keys_to_check = self.idp_keys keys_to_check.append('domain_id') url = self.base_url() resp = self.get(url) self.assertValidListResponse( resp, 'identity_providers', dummy_validator, keys_to_check=keys_to_check, ) entities = self._fetch_attribute_from_response( resp, 'identity_providers' ) entities_ids = {e['id'] for e in entities} ids_intersection = entities_ids.intersection(ids) self.assertEqual(ids_intersection, ids) self.head(url, expected_status=http.client.OK) def test_filter_list_head_idp_by_id(self): def get_id(resp): r = self._fetch_attribute_from_response(resp, 'identity_provider') return r.get('id') idp1_id = get_id(self._create_default_idp()) idp2_id = get_id(self._create_default_idp()) # list the IdP, should get two IdP. url = self.base_url() resp = self.get(url) entities = self._fetch_attribute_from_response( resp, 'identity_providers' ) entities_ids = [e['id'] for e in entities] self.assertCountEqual(entities_ids, [idp1_id, idp2_id]) # filter the IdP by ID. url = self.base_url() + '?id=' + idp1_id resp = self.get(url) filtered_service_list = resp.json['identity_providers'] self.assertThat(filtered_service_list, matchers.HasLength(1)) self.assertEqual(idp1_id, filtered_service_list[0].get('id')) self.head(url, expected_status=http.client.OK) def test_filter_list_head_idp_by_enabled(self): def get_id(resp): r = self._fetch_attribute_from_response(resp, 'identity_provider') return r.get('id') idp1_id = get_id(self._create_default_idp()) body = self.default_body.copy() body['enabled'] = False idp2_id = get_id(self._create_default_idp(body=body)) # list the IdP, should get two IdP. url = self.base_url() resp = self.get(url) entities = self._fetch_attribute_from_response( resp, 'identity_providers' ) entities_ids = [e['id'] for e in entities] self.assertCountEqual(entities_ids, [idp1_id, idp2_id]) # filter the IdP by 'enabled'. url = self.base_url() + '?enabled=True' resp = self.get(url) filtered_service_list = resp.json['identity_providers'] self.assertThat(filtered_service_list, matchers.HasLength(1)) self.assertEqual(idp1_id, filtered_service_list[0].get('id')) self.head(url, expected_status=http.client.OK) def test_check_idp_uniqueness(self): """Add same IdP twice. Expect HTTP 409 Conflict code for the latter call. """ url = self.base_url(suffix=uuid.uuid4().hex) body = self._http_idp_input() domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) body['domain_id'] = domain['id'] self.put( url, body={'identity_provider': body}, expected_status=http.client.CREATED, ) resp = self.put( url, body={'identity_provider': body}, expected_status=http.client.CONFLICT, ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Duplicate entry', resp_data.get('error', {}).get('message') ) def test_get_head_idp(self): """Create and later fetch IdP.""" body = self._http_idp_input() domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) body['domain_id'] = domain['id'] default_resp = self._create_default_idp(body=body) default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) resp = self.get(url) # Strip keys out of `body` dictionary. This is done # to be python 3 compatible body_keys = list(body) self.assertValidResponse( resp, 'identity_provider', dummy_validator, keys_to_check=body_keys, ref=body, ) self.head(url, expected_status=http.client.OK) def test_get_nonexisting_idp(self): """Fetch nonexisting IdP entity. Expected HTTP 404 Not Found status code. """ idp_id = uuid.uuid4().hex self.assertIsNotNone(idp_id) url = self.base_url(suffix=idp_id) self.get(url, expected_status=http.client.NOT_FOUND) def test_delete_existing_idp(self): """Create and later delete IdP. Expect HTTP 404 Not Found for the GET IdP call. """ default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') self.assertIsNotNone(idp_id) url = self.base_url(suffix=idp_id) self.delete(url) self.get(url, expected_status=http.client.NOT_FOUND) def test_delete_idp_also_deletes_assigned_protocols(self): """Deleting an IdP will delete its assigned protocol.""" # create default IdP default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp['id'] protocol_id = uuid.uuid4().hex url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') idp_url = self.base_url(suffix=idp_id) # assign protocol to IdP kwargs = {'expected_status': http.client.CREATED} resp, idp_id, proto = self._assign_protocol_to_idp( url=url, idp_id=idp_id, proto=protocol_id, **kwargs ) # removing IdP will remove the assigned protocol as well self.assertEqual( 1, len(PROVIDERS.federation_api.list_protocols(idp_id)) ) self.delete(idp_url) self.get(idp_url, expected_status=http.client.NOT_FOUND) self.assertEqual( 0, len(PROVIDERS.federation_api.list_protocols(idp_id)) ) def test_delete_nonexisting_idp(self): """Delete nonexisting IdP. Expect HTTP 404 Not Found for the GET IdP call. """ idp_id = uuid.uuid4().hex url = self.base_url(suffix=idp_id) self.delete(url, expected_status=http.client.NOT_FOUND) def test_update_idp_mutable_attributes(self): """Update IdP's mutable parameters.""" default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') url = self.base_url(suffix=idp_id) self.assertIsNotNone(idp_id) _enabled = not default_idp.get('enabled') body = { 'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex], 'description': uuid.uuid4().hex, 'enabled': _enabled, } body = {'identity_provider': body} resp = self.patch(url, body=body) updated_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) body = body['identity_provider'] for key in body.keys(): if isinstance(body[key], list): self.assertEqual( sorted(body[key]), sorted(updated_idp.get(key)) ) else: self.assertEqual(body[key], updated_idp.get(key)) resp = self.get(url) updated_idp = self._fetch_attribute_from_response( resp, 'identity_provider' ) for key in body.keys(): if isinstance(body[key], list): self.assertEqual( sorted(body[key]), sorted(updated_idp.get(key)) ) else: self.assertEqual(body[key], updated_idp.get(key)) def test_update_idp_immutable_attributes(self): """Update IdP's immutable parameters. Expect HTTP BAD REQUEST. """ default_resp = self._create_default_idp() default_idp = self._fetch_attribute_from_response( default_resp, 'identity_provider' ) idp_id = default_idp.get('id') self.assertIsNotNone(idp_id) body = self._http_idp_input() body['id'] = uuid.uuid4().hex body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex] url = self.base_url(suffix=idp_id) self.patch( url, body={'identity_provider': body}, expected_status=http.client.BAD_REQUEST, ) def test_update_nonexistent_idp(self): """Update nonexistent IdP. Expect HTTP 404 Not Found code. """ idp_id = uuid.uuid4().hex url = self.base_url(suffix=idp_id) body = self._http_idp_input() body['enabled'] = False body = {'identity_provider': body} self.patch(url, body=body, expected_status=http.client.NOT_FOUND) def test_assign_protocol_to_idp(self): """Assign a protocol to existing IdP.""" self._assign_protocol_to_idp(expected_status=http.client.CREATED) def test_protocol_composite_pk(self): """Test that Keystone can add two entities. The entities have identical names, however, attached to different IdPs. 1. Add IdP and assign it protocol with predefined name 2. Add another IdP and assign it a protocol with same name. Expect HTTP 201 code """ url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') kwargs = {'expected_status': http.client.CREATED} self._assign_protocol_to_idp(proto='saml2', url=url, **kwargs) self._assign_protocol_to_idp(proto='saml2', url=url, **kwargs) def test_protocol_idp_pk_uniqueness(self): """Test whether Keystone checks for unique idp/protocol values. Add same protocol twice, expect Keystone to reject a latter call and return HTTP 409 Conflict code. """ url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') kwargs = {'expected_status': http.client.CREATED} resp, idp_id, proto = self._assign_protocol_to_idp( proto='saml2', url=url, **kwargs ) kwargs = {'expected_status': http.client.CONFLICT} self._assign_protocol_to_idp( idp_id=idp_id, proto='saml2', validate=False, url=url, **kwargs ) def test_assign_protocol_to_nonexistent_idp(self): """Assign protocol to IdP that doesn't exist. Expect HTTP 404 Not Found code. """ idp_id = uuid.uuid4().hex kwargs = {'expected_status': http.client.NOT_FOUND} self._assign_protocol_to_idp( proto='saml2', idp_id=idp_id, validate=False, **kwargs ) def test_crud_protocol_without_protocol_id_in_url(self): # NOTE(morgan): This test is redundant but is added to ensure # the url routing error in bug 1817313 is explicitly covered. # create a protocol, but do not put the ID in the URL idp_id, _ = self._create_and_decapsulate_response() mapping_id = uuid.uuid4().hex self._create_mapping(mapping_id=mapping_id) protocol = {'id': uuid.uuid4().hex, 'mapping_id': mapping_id} with self.test_client() as c: token = self.get_scoped_token() # DELETE/PATCH/PUT on non-trailing `/` results in # METHOD_NOT_ALLOWED c.delete( '/v3/OS-FEDERATION/identity_providers/%(idp_id)s' '/protocols' % {'idp_id': idp_id}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) c.patch( '/v3/OS-FEDERATION/identity_providers/%(idp_id)s' '/protocols/' % {'idp_id': idp_id}, json={'protocol': protocol}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) c.put( '/v3/OS-FEDERATION/identity_providers/%(idp_id)s' '/protocols' % {'idp_id': idp_id}, json={'protocol': protocol}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) # DELETE/PATCH/PUT should raise 405 with trailing '/', it is # remapped to without the trailing '/' by the normalization # middleware. c.delete( '/v3/OS-FEDERATION/identity_providers/%(idp_id)s' '/protocols/' % {'idp_id': idp_id}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) c.patch( '/v3/OS-FEDERATION/identity_providers/%(idp_id)s' '/protocols/' % {'idp_id': idp_id}, json={'protocol': protocol}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) c.put( '/v3/OS-FEDERATION/identity_providers/%(idp_id)s' '/protocols/' % {'idp_id': idp_id}, json={'protocol': protocol}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) def test_get_head_protocol(self): """Create and later fetch protocol tied to IdP.""" resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http.client.CREATED ) proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id'] url = f"{idp_id}/protocols/{proto_id}" url = self.base_url(suffix=url) resp = self.get(url) reference = {'id': proto_id} # Strip keys out of `body` dictionary. This is done # to be python 3 compatible reference_keys = list(reference) self.assertValidResponse( resp, 'protocol', dummy_validator, keys_to_check=reference_keys, ref=reference, ) self.head(url, expected_status=http.client.OK) def test_list_head_protocols(self): """Create set of protocols and later list them. Compare input and output id sets. """ resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http.client.CREATED ) iterations = random.randint(0, 16) protocol_ids = [] for _ in range(iterations): resp, _, proto = self._assign_protocol_to_idp( idp_id=idp_id, expected_status=http.client.CREATED ) proto_id = self._fetch_attribute_from_response(resp, 'protocol') proto_id = proto_id['id'] protocol_ids.append(proto_id) url = "%s/protocols" % idp_id url = self.base_url(suffix=url) resp = self.get(url) self.assertValidListResponse( resp, 'protocols', dummy_validator, keys_to_check=['id'] ) entities = self._fetch_attribute_from_response(resp, 'protocols') entities = {entity['id'] for entity in entities} protocols_intersection = entities.intersection(protocol_ids) self.assertEqual(protocols_intersection, set(protocol_ids)) self.head(url, expected_status=http.client.OK) def test_update_protocols_attribute(self): """Update protocol's attribute.""" resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http.client.CREATED ) new_mapping_id = uuid.uuid4().hex self._create_mapping(mapping_id=new_mapping_id) url = f"{idp_id}/protocols/{proto}" url = self.base_url(suffix=url) body = {'mapping_id': new_mapping_id} resp = self.patch(url, body={'protocol': body}) self.assertValidResponse( resp, 'protocol', dummy_validator, keys_to_check=['id', 'mapping_id'], ref={'id': proto, 'mapping_id': new_mapping_id}, ) def test_delete_protocol(self): """Delete protocol. Expect HTTP 404 Not Found code for the GET call after the protocol is deleted. """ url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s') resp, idp_id, proto = self._assign_protocol_to_idp( expected_status=http.client.CREATED ) url = url % {'idp_id': idp_id, 'protocol_id': proto} self.delete(url) self.get(url, expected_status=http.client.NOT_FOUND) class MappingCRUDTests(test_v3.RestfulTestCase): """A class for testing CRUD operations for Mappings.""" MAPPING_URL = '/OS-FEDERATION/mappings/' def assertValidMappingListResponse(self, resp, *args, **kwargs): return self.assertValidListResponse( resp, 'mappings', self.assertValidMapping, keys_to_check=[], *args, **kwargs, ) def assertValidMappingResponse(self, resp, *args, **kwargs): return self.assertValidResponse( resp, 'mapping', self.assertValidMapping, keys_to_check=[], *args, **kwargs, ) def assertValidMapping(self, entity, ref=None): self.assertIsNotNone(entity.get('id')) self.assertIsNotNone(entity.get('rules')) if ref: self.assertEqual(entity['rules'], ref['rules']) return entity def _create_default_mapping_entry(self): url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put( url, body={'mapping': mapping_fixtures.MAPPING_LARGE}, expected_status=http.client.CREATED, ) return resp def _get_id_from_response(self, resp): r = resp.result.get('mapping') return r.get('id') def test_mapping_create(self): resp = self._create_default_mapping_entry() self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE) def test_mapping_list_head(self): url = self.MAPPING_URL self._create_default_mapping_entry() resp = self.get(url) entities = resp.result.get('mappings') self.assertIsNotNone(entities) self.assertResponseStatus(resp, http.client.OK) self.assertValidListLinks(resp.result.get('links')) self.assertEqual(1, len(entities)) self.head(url, expected_status=http.client.OK) def test_mapping_delete(self): url = self.MAPPING_URL + '%(mapping_id)s' resp = self._create_default_mapping_entry() mapping_id = self._get_id_from_response(resp) url = url % {'mapping_id': str(mapping_id)} resp = self.delete(url) self.assertResponseStatus(resp, http.client.NO_CONTENT) self.get(url, expected_status=http.client.NOT_FOUND) def test_mapping_get_head(self): url = self.MAPPING_URL + '%(mapping_id)s' resp = self._create_default_mapping_entry() mapping_id = self._get_id_from_response(resp) url = url % {'mapping_id': mapping_id} resp = self.get(url) self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE) self.head(url, expected_status=http.client.OK) def test_mapping_update(self): url = self.MAPPING_URL + '%(mapping_id)s' resp = self._create_default_mapping_entry() mapping_id = self._get_id_from_response(resp) url = url % {'mapping_id': mapping_id} resp = self.patch( url, body={'mapping': mapping_fixtures.MAPPING_SMALL} ) self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL) resp = self.get(url) self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL) def test_delete_mapping_dne(self): url = self.MAPPING_URL + uuid.uuid4().hex self.delete(url, expected_status=http.client.NOT_FOUND) def test_get_mapping_dne(self): url = self.MAPPING_URL + uuid.uuid4().hex self.get(url, expected_status=http.client.NOT_FOUND) def test_create_mapping_bad_requirements(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_BAD_REQ}, ) def test_create_mapping_no_rules(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_NO_RULES}, ) def test_create_mapping_no_remote_objects(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE}, ) def test_create_mapping_bad_value(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE}, ) def test_create_mapping_missing_local(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL}, ) def test_create_mapping_missing_type(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE}, ) def test_create_mapping_wrong_type(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE}, ) def test_create_mapping_extra_remote_properties_not_any_of(self): url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_NOT_ANY_OF self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping}, ) def test_create_mapping_extra_remote_properties_any_one_of(self): url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_ANY_ONE_OF self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping}, ) def test_create_mapping_extra_remote_properties_just_type(self): url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_EXTRA_REMOTE_PROPS_JUST_TYPE self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping}, ) def test_create_mapping_empty_map(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': {}} ) def test_create_mapping_extra_rules_properties(self): url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping_fixtures.MAPPING_EXTRA_RULES_PROPS}, ) def test_create_mapping_with_blacklist_and_whitelist(self): """Test for adding whitelist and blacklist in the rule. Server should respond with HTTP 400 Bad Request error upon discovering both ``whitelist`` and ``blacklist`` keywords in the same rule. """ url = self.MAPPING_URL + uuid.uuid4().hex mapping = mapping_fixtures.MAPPING_GROUPS_WHITELIST_AND_BLACKLIST self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': mapping}, ) def test_create_mapping_with_local_user_and_local_domain(self): url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put( url, body={'mapping': mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN}, expected_status=http.client.CREATED, ) self.assertValidMappingResponse( resp, mapping_fixtures.MAPPING_LOCAL_USER_LOCAL_DOMAIN ) def test_create_mapping_with_ephemeral(self): url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put( url, body={'mapping': mapping_fixtures.MAPPING_EPHEMERAL_USER}, expected_status=http.client.CREATED, ) self.assertValidMappingResponse( resp, mapping_fixtures.MAPPING_EPHEMERAL_USER ) def test_create_mapping_with_bad_user_type(self): url = self.MAPPING_URL + uuid.uuid4().hex # get a copy of a known good map bad_mapping = copy.deepcopy(mapping_fixtures.MAPPING_EPHEMERAL_USER) # now sabotage the user type bad_mapping['rules'][0]['local'][0]['user']['type'] = uuid.uuid4().hex self.put( url, expected_status=http.client.BAD_REQUEST, body={'mapping': bad_mapping}, ) def test_create_shadow_mapping_without_roles_fails(self): """Validate that mappings with projects contain roles when created.""" url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_ROLES}, expected_status=http.client.BAD_REQUEST, ) def test_update_shadow_mapping_without_roles_fails(self): """Validate that mappings with projects contain roles when updated.""" url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put( url, body={'mapping': mapping_fixtures.MAPPING_PROJECTS}, expected_status=http.client.CREATED, ) self.assertValidMappingResponse( resp, mapping_fixtures.MAPPING_PROJECTS ) self.patch( url, body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_ROLES}, expected_status=http.client.BAD_REQUEST, ) def test_create_shadow_mapping_without_name_fails(self): """Validate project mappings contain the project name when created.""" url = self.MAPPING_URL + uuid.uuid4().hex self.put( url, body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_NAME}, expected_status=http.client.BAD_REQUEST, ) def test_update_shadow_mapping_without_name_fails(self): """Validate project mappings contain the project name when updated.""" url = self.MAPPING_URL + uuid.uuid4().hex resp = self.put( url, body={'mapping': mapping_fixtures.MAPPING_PROJECTS}, expected_status=http.client.CREATED, ) self.assertValidMappingResponse( resp, mapping_fixtures.MAPPING_PROJECTS ) self.patch( url, body={'mapping': mapping_fixtures.MAPPING_PROJECTS_WITHOUT_NAME}, expected_status=http.client.BAD_REQUEST, ) class FederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin): def auth_plugin_config_override(self): methods = ['saml2', 'token'] super().auth_plugin_config_override(methods) def setUp(self): super().setUp() self._notifications = [] def fake_saml_notify( action, user_id, group_ids, identity_provider, protocol, token_id, outcome, ): note = { 'action': action, 'user_id': user_id, 'identity_provider': identity_provider, 'protocol': protocol, 'send_notification_called': True, } self._notifications.append(note) self.useFixture( fixtures.MockPatchObject( notifications, 'send_saml_audit_notification', fake_saml_notify ) ) def _assert_last_notify( self, action, identity_provider, protocol, user_id=None ): self.assertTrue(self._notifications) note = self._notifications[-1] if user_id: self.assertEqual(note['user_id'], user_id) self.assertEqual(note['action'], action) self.assertEqual(note['identity_provider'], identity_provider) self.assertEqual(note['protocol'], protocol) self.assertTrue(note['send_notification_called']) def load_fixtures(self, fixtures): super().load_fixtures(fixtures) self.load_federation_sample_data() def test_issue_unscoped_token_notify(self): self._issue_unscoped_token() self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL) def test_issue_unscoped_token(self): r = self._issue_unscoped_token() token_resp = render_token.render_token_response_from_model(r)['token'] self.assertValidMappedUser(token_resp) def test_default_domain_scoped_token(self): # Make sure federated users can get tokens scoped to the default # domain, which has a non-uuid ID by default (e.g., `default`). We want # to make sure the token provider handles string types properly if the # ID isn't compressed into byte format during validation. Turn off # cache on issue so that we validate the token online right after we # get it to make sure the token provider is called. self.config_fixture.config(group='token', cache_on_issue=False) # Grab an unscoped token to get a domain-scoped token with. token = self._issue_unscoped_token() # Give the user a direct role assignment on the default domain, so they # can get a federated domain-scoped token. PROVIDERS.assignment_api.create_grant( self.role_admin['id'], user_id=token.user_id, domain_id=CONF.identity.default_domain_id, ) # Get a token scoped to the default domain with an ID of `default`, # which isn't a uuid type, but we should be able to handle it # accordingly in the token formatters/providers. auth_request = { 'auth': { 'identity': {'methods': ['token'], 'token': {'id': token.id}}, 'scope': {'domain': {'id': CONF.identity.default_domain_id}}, } } r = self.v3_create_token(auth_request) domain_scoped_token_id = r.headers.get('X-Subject-Token') # Validate the token to make sure the token providers handle non-uuid # domain IDs properly. headers = {'X-Subject-Token': domain_scoped_token_id} self.get('/auth/tokens', token=domain_scoped_token_id, headers=headers) def test_issue_the_same_unscoped_token_with_user_deleted(self): r = self._issue_unscoped_token() token = render_token.render_token_response_from_model(r)['token'] user1 = token['user'] user_id1 = user1.pop('id') # delete the referenced user, and authenticate again. Keystone should # create another new shadow user. PROVIDERS.identity_api.delete_user(user_id1) r = self._issue_unscoped_token() token = render_token.render_token_response_from_model(r)['token'] user2 = token['user'] user_id2 = user2.pop('id') # Only the user_id is different. Other properties include # identity_provider, protocol, groups and domain are the same. self.assertIsNot(user_id2, user_id1) self.assertEqual(user1, user2) def test_issue_unscoped_token_disabled_idp(self): """Check if authentication works with disabled identity providers. Test plan: 1) Disable default IdP 2) Try issuing unscoped token for that IdP 3) Expect server to forbid authentication """ enabled_false = {'enabled': False} PROVIDERS.federation_api.update_idp(self.IDP, enabled_false) self.assertRaises(exception.Forbidden, self._issue_unscoped_token) def test_issue_unscoped_token_group_names_in_mapping(self): r = self._issue_unscoped_token(assertion='ANOTHER_CUSTOMER_ASSERTION') ref_groups = {self.group_customers['id'], self.group_admins['id']} token_groups = r.federated_groups token_groups = {group['id'] for group in token_groups} self.assertEqual(ref_groups, token_groups) def test_issue_unscoped_tokens_nonexisting_group(self): self._issue_unscoped_token(assertion='ANOTHER_TESTER_ASSERTION') def test_issue_unscoped_token_with_remote_no_attribute(self): self._issue_unscoped_token( idp=self.IDP_WITH_REMOTE, environment={self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}, ) def test_issue_unscoped_token_with_remote(self): self.config_fixture.config( group='federation', remote_id_attribute=self.REMOTE_ID_ATTR ) self._issue_unscoped_token( idp=self.IDP_WITH_REMOTE, environment={self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}, ) def test_issue_unscoped_token_with_saml2_remote(self): self.config_fixture.config( group='saml2', remote_id_attribute=self.REMOTE_ID_ATTR ) self._issue_unscoped_token( idp=self.IDP_WITH_REMOTE, environment={self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}, ) def test_issue_unscoped_token_with_remote_different(self): self.config_fixture.config( group='federation', remote_id_attribute=self.REMOTE_ID_ATTR ) self.assertRaises( exception.Forbidden, self._issue_unscoped_token, idp=self.IDP_WITH_REMOTE, environment={self.REMOTE_ID_ATTR: uuid.uuid4().hex}, ) def test_issue_unscoped_token_with_remote_default_overwritten(self): """Test that protocol remote_id_attribute has higher priority. Make sure the parameter stored under ``protocol`` section has higher priority over parameter from default ``federation`` configuration section. """ self.config_fixture.config( group='saml2', remote_id_attribute=self.REMOTE_ID_ATTR ) self.config_fixture.config( group='federation', remote_id_attribute=uuid.uuid4().hex ) self._issue_unscoped_token( idp=self.IDP_WITH_REMOTE, environment={self.REMOTE_ID_ATTR: self.REMOTE_IDS[0]}, ) def test_issue_unscoped_token_with_remote_unavailable(self): self.config_fixture.config( group='federation', remote_id_attribute=self.REMOTE_ID_ATTR ) self.assertRaises( exception.Unauthorized, self._issue_unscoped_token, idp=self.IDP_WITH_REMOTE, environment={uuid.uuid4().hex: uuid.uuid4().hex}, ) def test_issue_unscoped_token_with_remote_user_as_empty_string(self): # make sure that REMOTE_USER set as the empty string won't interfere self._issue_unscoped_token(environment={'REMOTE_USER': ''}) def test_issue_unscoped_token_no_groups(self): r = self._issue_unscoped_token(assertion='USER_NO_GROUPS_ASSERTION') token_groups = r.federated_groups self.assertEqual(0, len(token_groups)) def test_issue_scoped_token_no_groups(self): """Verify that token without groups cannot get scoped to project. This test is required because of bug 1677723. """ # issue unscoped token with no groups r = self._issue_unscoped_token(assertion='USER_NO_GROUPS_ASSERTION') token_groups = r.federated_groups self.assertEqual(0, len(token_groups)) unscoped_token = r.id # let admin get roles in a project self.proj_employees admin = unit.new_user_ref(CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(admin) PROVIDERS.assignment_api.create_grant( self.role_admin['id'], user_id=admin['id'], project_id=self.proj_employees['id'], ) # try to scope the token. It should fail scope = self._scope_request( unscoped_token, 'project', self.proj_employees['id'] ) self.v3_create_token(scope, expected_status=http.client.UNAUTHORIZED) def test_issue_unscoped_token_malformed_environment(self): """Test whether non string objects are filtered out. Put non string objects into the environment, inject correct assertion and try to get an unscoped token. Expect server not to fail on using split() method on non string objects and return token id in the HTTP header. """ environ = { 'malformed_object': object(), 'another_bad_idea': tuple(range(10)), 'yet_another_bad_param': dict(zip(uuid.uuid4().hex, range(32))), } environ.update(mapping_fixtures.EMPLOYEE_ASSERTION) with self.make_request(environ=environ): authentication.authenticate_for_token(self.UNSCOPED_V3_SAML2_REQ) def test_scope_to_project_once_notify(self): r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE ) user_id = r.json['token']['user']['id'] self._assert_last_notify(self.ACTION, self.IDP, self.PROTOCOL, user_id) def test_scope_to_project_once(self): r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE ) token_resp = r.result['token'] project_id = token_resp['project']['id'] self._check_project_scoped_token_attributes(token_resp, project_id) roles_ref = [self.role_employee] projects_ref = self.proj_employees self._check_projects_and_roles(token_resp, roles_ref, projects_ref) self.assertValidMappedUser(token_resp) def test_scope_token_with_idp_disabled(self): """Scope token issued by disabled IdP. Try scoping the token issued by an IdP which is disabled now. Expect server to refuse scoping operation. This test confirms correct behaviour when IdP was enabled and unscoped token was issued, but disabled before user tries to scope the token. Here we assume the unscoped token was already issued and start from the moment where IdP is being disabled and unscoped token is being used. Test plan: 1) Disable IdP 2) Try scoping unscoped token """ enabled_false = {'enabled': False} PROVIDERS.federation_api.update_idp(self.IDP, enabled_false) self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER, expected_status=http.client.FORBIDDEN, ) def test_validate_token_after_deleting_idp_raises_not_found(self): token = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN ) token_id = token.headers.get('X-Subject-Token') federated_info = token.json_body['token']['user']['OS-FEDERATION'] idp_id = federated_info['identity_provider']['id'] PROVIDERS.federation_api.delete_idp(idp_id) headers = {'X-Subject-Token': token_id} # NOTE(lbragstad): This raises a 404 NOT FOUND because the identity # provider is no longer present. We raise 404 NOT FOUND when we # validate a token and a project or domain no longer exists. self.get( '/auth/tokens/', token=token_id, headers=headers, expected_status=http.client.NOT_FOUND, ) def test_deleting_idp_cascade_deleting_fed_user(self): token = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN ) federated_info = token.json_body['token']['user']['OS-FEDERATION'] idp_id = federated_info['identity_provider']['id'] # There are three fed users (from 'EMPLOYEE_ASSERTION', # 'CUSTOMER_ASSERTION', 'ADMIN_ASSERTION') with the specified idp. hints = driver_hints.Hints() hints.add_filter('idp_id', idp_id) fed_users = PROVIDERS.shadow_users_api.get_federated_users(hints) self.assertEqual(3, len(fed_users)) idp_domain_id = PROVIDERS.federation_api.get_idp(idp_id)['domain_id'] for fed_user in fed_users: self.assertEqual(idp_domain_id, fed_user['domain_id']) # Delete the idp PROVIDERS.federation_api.delete_idp(idp_id) # The related federated user should be deleted as well. hints = driver_hints.Hints() hints.add_filter('idp_id', idp_id) fed_users = PROVIDERS.shadow_users_api.get_federated_users(hints) self.assertEqual([], fed_users) def test_scope_to_bad_project(self): """Scope unscoped token with a project we don't have access to.""" self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_CUSTOMER, expected_status=http.client.UNAUTHORIZED, ) def test_scope_to_project_multiple_times(self): """Try to scope the unscoped token multiple times. The new tokens should be scoped to: * Customers' project * Employees' project """ bodies = ( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN, self.TOKEN_SCOPE_PROJECT_CUSTOMER_FROM_ADMIN, ) project_ids = (self.proj_employees['id'], self.proj_customers['id']) for body, project_id_ref in zip(bodies, project_ids): r = self.v3_create_token(body) token_resp = r.result['token'] self._check_project_scoped_token_attributes( token_resp, project_id_ref ) def test_scope_to_project_with_duplicate_roles_returns_single_role(self): r = self.v3_create_token(self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN) # Even though the process of obtaining a token shows that there is a # role assignment on a project, we should attempt to create a duplicate # assignment somewhere. Do this by creating a direct role assignment # with each role against the project the token was scoped to. user_id = r.json_body['token']['user']['id'] project_id = r.json_body['token']['project']['id'] for role in r.json_body['token']['roles']: PROVIDERS.assignment_api.create_grant( role_id=role['id'], user_id=user_id, project_id=project_id ) # Ensure all roles in the token are unique even though we know there # should be duplicate role assignment from the assertions and the # direct role assignments we just created. r = self.v3_create_token(self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_ADMIN) known_role_ids = [] for role in r.json_body['token']['roles']: self.assertNotIn(role['id'], known_role_ids) known_role_ids.append(role['id']) def test_scope_to_project_with_only_inherited_roles(self): """Try to scope token whose only roles are inherited.""" r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_INHERITED_FROM_CUSTOMER ) token_resp = r.result['token'] self._check_project_scoped_token_attributes( token_resp, self.project_inherited['id'] ) roles_ref = [self.role_customer] projects_ref = self.project_inherited self._check_projects_and_roles(token_resp, roles_ref, projects_ref) self.assertValidMappedUser(token_resp) def test_scope_token_from_nonexistent_unscoped_token(self): """Try to scope token from non-existent unscoped token.""" self.v3_create_token( self.TOKEN_SCOPE_PROJECT_FROM_NONEXISTENT_TOKEN, expected_status=http.client.NOT_FOUND, ) def test_issue_token_from_rules_without_user(self): environ = copy.deepcopy(mapping_fixtures.BAD_TESTER_ASSERTION) with self.make_request(environ=environ): self.assertRaises( exception.Unauthorized, authentication.authenticate_for_token, self.UNSCOPED_V3_SAML2_REQ, ) def test_issue_token_with_nonexistent_group(self): """Inject assertion that matches rule issuing bad group id. Expect server to find out that some groups are missing in the backend and raise exception.MappedGroupNotFound exception. """ self.assertRaises( exception.MappedGroupNotFound, self._issue_unscoped_token, assertion='CONTRACTOR_ASSERTION', ) def test_scope_to_domain_once(self): r = self.v3_create_token(self.TOKEN_SCOPE_DOMAIN_A_FROM_CUSTOMER) token_resp = r.result['token'] self._check_domain_scoped_token_attributes( token_resp, self.domainA['id'] ) def test_scope_to_domain_multiple_tokens(self): """Issue multiple tokens scoping to different domains. The new tokens should be scoped to: * domainA * domainB * domainC """ bodies = ( self.TOKEN_SCOPE_DOMAIN_A_FROM_ADMIN, self.TOKEN_SCOPE_DOMAIN_B_FROM_ADMIN, self.TOKEN_SCOPE_DOMAIN_C_FROM_ADMIN, ) domain_ids = ( self.domainA['id'], self.domainB['id'], self.domainC['id'], ) for body, domain_id_ref in zip(bodies, domain_ids): r = self.v3_create_token(body) token_resp = r.result['token'] self._check_domain_scoped_token_attributes( token_resp, domain_id_ref ) def test_scope_to_domain_with_only_inherited_roles_fails(self): """Try to scope to a domain that has no direct roles.""" self.v3_create_token( self.TOKEN_SCOPE_DOMAIN_D_FROM_CUSTOMER, expected_status=http.client.UNAUTHORIZED, ) def test_list_projects(self): urls = ('/OS-FEDERATION/projects', '/auth/projects') token = ( self.tokens['CUSTOMER_ASSERTION'], self.tokens['EMPLOYEE_ASSERTION'], self.tokens['ADMIN_ASSERTION'], ) projects_refs = ( {self.proj_customers['id'], self.project_inherited['id']}, {self.proj_employees['id'], self.project_all['id']}, { self.proj_employees['id'], self.project_all['id'], self.proj_customers['id'], self.project_inherited['id'], }, ) for token, projects_ref in zip(token, projects_refs): for url in urls: r = self.get(url, token=token) projects_resp = r.result['projects'] projects = {p['id'] for p in projects_resp} self.assertEqual( projects_ref, projects, 'match failed for url %s' % url ) # TODO(samueldmq): Create another test class for role inheritance tests. # The advantage would be to reduce the complexity of this test class and # have tests specific to this functionality grouped, easing readability and # maintenability. def test_list_projects_for_inherited_project_assignment(self): # Create a subproject subproject_inherited = unit.new_project_ref( domain_id=self.domainD['id'], parent_id=self.project_inherited['id'], ) PROVIDERS.resource_api.create_project( subproject_inherited['id'], subproject_inherited ) # Create an inherited role assignment PROVIDERS.assignment_api.create_grant( role_id=self.role_employee['id'], group_id=self.group_employees['id'], project_id=self.project_inherited['id'], inherited_to_projects=True, ) # Define expected projects from employee assertion, which contain # the created subproject expected_project_ids = [ self.project_all['id'], self.proj_employees['id'], subproject_inherited['id'], ] # Assert expected projects for both available URLs for url in ('/OS-FEDERATION/projects', '/auth/projects'): r = self.get(url, token=self.tokens['EMPLOYEE_ASSERTION']) project_ids = [project['id'] for project in r.result['projects']] self.assertEqual(len(expected_project_ids), len(project_ids)) for expected_project_id in expected_project_ids: self.assertIn( expected_project_id, project_ids, 'Projects match failed for url %s' % url, ) def test_list_domains(self): urls = ('/OS-FEDERATION/domains', '/auth/domains') tokens = ( self.tokens['CUSTOMER_ASSERTION'], self.tokens['EMPLOYEE_ASSERTION'], self.tokens['ADMIN_ASSERTION'], ) # NOTE(henry-nash): domain D does not appear in the expected results # since it only had inherited roles (which only apply to projects # within the domain) domain_refs = ( {self.domainA['id']}, {self.domainA['id'], self.domainB['id']}, {self.domainA['id'], self.domainB['id'], self.domainC['id']}, ) for token, domains_ref in zip(tokens, domain_refs): for url in urls: r = self.get(url, token=token) domains_resp = r.result['domains'] domains = {p['id'] for p in domains_resp} self.assertEqual( domains_ref, domains, 'match failed for url %s' % url ) def test_full_workflow(self): """Test 'standard' workflow for granting access tokens. * Issue unscoped token * List available projects based on groups * Scope token to one of available projects """ r = self._issue_unscoped_token() token_resp = render_token.render_token_response_from_model(r)['token'] # NOTE(lbragstad): Ensure only 'saml2' is in the method list. self.assertListEqual(['saml2'], r.methods) self.assertValidMappedUser(token_resp) employee_unscoped_token_id = r.id r = self.get('/auth/projects', token=employee_unscoped_token_id) projects = r.result['projects'] random_project = random.randint(0, len(projects) - 1) project = projects[random_project] v3_scope_request = self._scope_request( employee_unscoped_token_id, 'project', project['id'] ) r = self.v3_create_token(v3_scope_request) token_resp = r.result['token'] self.assertIn('token', token_resp['methods']) self.assertIn('saml2', token_resp['methods']) self._check_project_scoped_token_attributes(token_resp, project['id']) def test_workflow_with_groups_deletion(self): """Test full workflow with groups deletion before token scoping. The test scenario is as follows: - Create group ``group`` - Create and assign roles to ``group`` and ``project_all`` - Patch mapping rules for existing IdP so it issues group id - Issue unscoped token with ``group``'s id - Delete group ``group`` - Scope token to ``project_all`` - Expect HTTP 500 response """ # create group and role group = unit.new_group_ref(domain_id=self.domainA['id']) group = PROVIDERS.identity_api.create_group(group) role = unit.new_role_ref() PROVIDERS.role_api.create_role(role['id'], role) # assign role to group and project_admins PROVIDERS.assignment_api.create_grant( role['id'], group_id=group['id'], project_id=self.project_all['id'] ) rules = { 'rules': [ { 'local': [ {'group': {'id': group['id']}}, {'user': {'name': '{0}'}}, ], 'remote': [ {'type': 'UserName'}, {'type': 'LastName', 'any_one_of': ['Account']}, ], } ] } PROVIDERS.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='TESTER_ASSERTION') # delete group PROVIDERS.identity_api.delete_group(group['id']) # scope token to project_all, expect HTTP 500 scoped_token = self._scope_request( r.id, 'project', self.project_all['id'] ) self.v3_create_token( scoped_token, expected_status=http.client.INTERNAL_SERVER_ERROR ) def test_lists_with_missing_group_in_backend(self): """Test a mapping that points to a group that does not exist. For explicit mappings, we expect the group to exist in the backend, but for lists, specifically blacklists, a missing group is expected as many groups will be specified by the IdP that are not Keystone groups. The test scenario is as follows: - Create group ``EXISTS`` - Set mapping rules for existing IdP with a blacklist that passes through as REMOTE_USER_GROUPS - Issue unscoped token with on group ``EXISTS`` id in it """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] group = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group = PROVIDERS.identity_api.create_group(group) rules = { 'rules': [ { "local": [{"user": {"name": "{0}", "id": "{0}"}}], "remote": [{"type": "REMOTE_USER"}], }, { "local": [ {"groups": "{0}", "domain": {"name": domain_name}} ], "remote": [ { "type": "REMOTE_USER_GROUPS", } ], }, ] } PROVIDERS.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_group_ids = r.federated_groups self.assertEqual(1, len(assigned_group_ids)) self.assertEqual(group['id'], assigned_group_ids[0]['id']) def test_empty_blacklist_passess_all_values(self): """Test a mapping with empty blacklist specified. Not adding a ``blacklist`` keyword to the mapping rules has the same effect as adding an empty ``blacklist``. In both cases, the mapping engine will not discard any groups that are associated with apache environment variables. This test checks scenario where an empty blacklist was specified. Expected result is to allow any value. The test scenario is as follows: - Create group ``EXISTS`` - Create group ``NO_EXISTS`` - Set mapping rules for existing IdP with a blacklist that passes through as REMOTE_USER_GROUPS - Issue unscoped token with groups ``EXISTS`` and ``NO_EXISTS`` assigned """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] # Add a group "EXISTS" group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group_exists = PROVIDERS.identity_api.create_group(group_exists) # Add a group "NO_EXISTS" group_no_exists = unit.new_group_ref( domain_id=domain_id, name='NO_EXISTS' ) group_no_exists = PROVIDERS.identity_api.create_group(group_no_exists) group_ids = {group_exists['id'], group_no_exists['id']} rules = { 'rules': [ { "local": [{"user": {"name": "{0}", "id": "{0}"}}], "remote": [{"type": "REMOTE_USER"}], }, { "local": [ {"groups": "{0}", "domain": {"name": domain_name}} ], "remote": [ {"type": "REMOTE_USER_GROUPS", "blacklist": []} ], }, ] } PROVIDERS.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_group_ids = r.federated_groups self.assertEqual(len(group_ids), len(assigned_group_ids)) for group in assigned_group_ids: self.assertIn(group['id'], group_ids) def test_not_adding_blacklist_passess_all_values(self): """Test a mapping without blacklist specified. Not adding a ``blacklist`` keyword to the mapping rules has the same effect as adding an empty ``blacklist``. In both cases all values will be accepted and passed. This test checks scenario where an blacklist was not specified. Expected result is to allow any value. The test scenario is as follows: - Create group ``EXISTS`` - Create group ``NO_EXISTS`` - Set mapping rules for existing IdP with a blacklist that passes through as REMOTE_USER_GROUPS - Issue unscoped token with on groups ``EXISTS`` and ``NO_EXISTS`` assigned """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] # Add a group "EXISTS" group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group_exists = PROVIDERS.identity_api.create_group(group_exists) # Add a group "NO_EXISTS" group_no_exists = unit.new_group_ref( domain_id=domain_id, name='NO_EXISTS' ) group_no_exists = PROVIDERS.identity_api.create_group(group_no_exists) group_ids = {group_exists['id'], group_no_exists['id']} rules = { 'rules': [ { "local": [{"user": {"name": "{0}", "id": "{0}"}}], "remote": [{"type": "REMOTE_USER"}], }, { "local": [ {"groups": "{0}", "domain": {"name": domain_name}} ], "remote": [ { "type": "REMOTE_USER_GROUPS", } ], }, ] } PROVIDERS.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_group_ids = r.federated_groups self.assertEqual(len(group_ids), len(assigned_group_ids)) for group in assigned_group_ids: self.assertIn(group['id'], group_ids) def test_empty_whitelist_discards_all_values(self): """Test that empty whitelist blocks all the values. Not adding a ``whitelist`` keyword to the mapping value is different than adding empty whitelist. The former case will simply pass all the values, whereas the latter would discard all the values. This test checks scenario where an empty whitelist was specified. The expected result is that no groups are matched. The test scenario is as follows: - Create group ``EXISTS`` - Set mapping rules for existing IdP with an empty whitelist that whould discard any values from the assertion - Try issuing unscoped token, no groups were matched and that the federated user does not have any group assigned. """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] group = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group = PROVIDERS.identity_api.create_group(group) rules = { 'rules': [ { "local": [{"user": {"name": "{0}", "id": "{0}"}}], "remote": [{"type": "REMOTE_USER"}], }, { "local": [ {"groups": "{0}", "domain": {"name": domain_name}} ], "remote": [ {"type": "REMOTE_USER_GROUPS", "whitelist": []} ], }, ] } PROVIDERS.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_groups = r.federated_groups self.assertEqual(len(assigned_groups), 0) def test_not_setting_whitelist_accepts_all_values(self): """Test that not setting whitelist passes. Not adding a ``whitelist`` keyword to the mapping value is different than adding empty whitelist. The former case will simply pass all the values, whereas the latter would discard all the values. This test checks a scenario where a ``whitelist`` was not specified. Expected result is that no groups are ignored. The test scenario is as follows: - Create group ``EXISTS`` - Set mapping rules for existing IdP with an empty whitelist that whould discard any values from the assertion - Issue an unscoped token and make sure ephemeral user is a member of two groups. """ domain_id = self.domainA['id'] domain_name = self.domainA['name'] # Add a group "EXISTS" group_exists = unit.new_group_ref(domain_id=domain_id, name='EXISTS') group_exists = PROVIDERS.identity_api.create_group(group_exists) # Add a group "NO_EXISTS" group_no_exists = unit.new_group_ref( domain_id=domain_id, name='NO_EXISTS' ) group_no_exists = PROVIDERS.identity_api.create_group(group_no_exists) group_ids = {group_exists['id'], group_no_exists['id']} rules = { 'rules': [ { "local": [{"user": {"name": "{0}", "id": "{0}"}}], "remote": [{"type": "REMOTE_USER"}], }, { "local": [ {"groups": "{0}", "domain": {"name": domain_name}} ], "remote": [ { "type": "REMOTE_USER_GROUPS", } ], }, ] } PROVIDERS.federation_api.update_mapping(self.mapping['id'], rules) r = self._issue_unscoped_token(assertion='UNMATCHED_GROUP_ASSERTION') assigned_group_ids = r.federated_groups self.assertEqual(len(group_ids), len(assigned_group_ids)) for group in assigned_group_ids: self.assertIn(group['id'], group_ids) def test_assertion_prefix_parameter(self): """Test parameters filtering based on the prefix. With ``assertion_prefix`` set to fixed, non default value, issue an unscoped token from assertion EMPLOYEE_ASSERTION_PREFIXED. Expect server to return unscoped token. """ self.config_fixture.config( group='federation', assertion_prefix=self.ASSERTION_PREFIX ) self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION_PREFIXED') def test_assertion_prefix_parameter_expect_fail(self): """Test parameters filtering based on the prefix. With ``assertion_prefix`` default value set to empty string issue an unscoped token from assertion EMPLOYEE_ASSERTION. Next, configure ``assertion_prefix`` to value ``UserName``. Try issuing unscoped token with EMPLOYEE_ASSERTION. Expect server to raise exception.Unathorized exception. """ self._issue_unscoped_token() self.config_fixture.config( group='federation', assertion_prefix='UserName' ) self.assertRaises(exception.Unauthorized, self._issue_unscoped_token) def test_unscoped_token_has_user_domain(self): r = self._issue_unscoped_token() self._check_domains_are_valid( render_token.render_token_response_from_model(r)['token'] ) def test_scoped_token_has_user_domain(self): r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE ) self._check_domains_are_valid(r.json_body['token']) def test_issue_unscoped_token_for_local_user(self): r = self._issue_unscoped_token(assertion='LOCAL_USER_ASSERTION') self.assertListEqual(['saml2'], r.methods) self.assertEqual(self.user['id'], r.user_id) self.assertEqual(self.user['name'], r.user['name']) self.assertEqual(self.domain['id'], r.user_domain['id']) # Make sure the token is not scoped self.assertIsNone(r.domain_id) self.assertIsNone(r.project_id) self.assertTrue(r.unscoped) def test_issue_token_for_local_user_user_not_found(self): self.assertRaises( exception.Unauthorized, self._issue_unscoped_token, assertion='ANOTHER_LOCAL_USER_ASSERTION', ) def test_user_name_and_id_in_federation_token(self): r = self._issue_unscoped_token(assertion='EMPLOYEE_ASSERTION') self.assertEqual( mapping_fixtures.EMPLOYEE_ASSERTION['UserName'], r.user['name'] ) self.assertNotEqual(r.user['name'], r.user_id) r = self.v3_create_token( self.TOKEN_SCOPE_PROJECT_EMPLOYEE_FROM_EMPLOYEE ) token = r.json_body['token'] self.assertEqual( mapping_fixtures.EMPLOYEE_ASSERTION['UserName'], token['user']['name'], ) self.assertNotEqual(token['user']['name'], token['user']['id']) def test_issue_unscoped_token_with_remote_different_from_protocol(self): protocol = PROVIDERS.federation_api.get_protocol( self.IDP_WITH_REMOTE, self.PROTOCOL ) protocol['remote_id_attribute'] = uuid.uuid4().hex PROVIDERS.federation_api.update_protocol( self.IDP_WITH_REMOTE, protocol['id'], protocol ) self._issue_unscoped_token( idp=self.IDP_WITH_REMOTE, environment={protocol['remote_id_attribute']: self.REMOTE_IDS[0]}, ) self.assertRaises( exception.Unauthorized, self._issue_unscoped_token, idp=self.IDP_WITH_REMOTE, environment={uuid.uuid4().hex: self.REMOTE_IDS[0]}, ) def test_issue_token_for_ephemeral_user_with_remote_domain(self): """Test ephemeral user is created in the domain set by assertion. Shadow user may belong to the domain set by the assertion data. To verify that: - precreate domain used later in the assertion - update mapping to unclude user domain name coming from assertion - auth user - verify user domain is not the IDP domain """ domain_ref = unit.new_domain_ref(name="user_domain") PROVIDERS.resource_api.create_domain(domain_ref["id"], domain_ref) PROVIDERS.federation_api.update_mapping( self.mapping["id"], mapping_fixtures.MAPPING_EPHEMERAL_USER_REMOTE_DOMAIN, ) r = self._issue_unscoped_token(assertion='USER_WITH_DOMAIN_ASSERTION') self.assertEqual(r.user_domain["id"], domain_ref["id"]) self.assertNotEqual(r.user_domain["id"], self.idp["domain_id"]) class FernetFederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin): AUTH_METHOD = 'token' def load_fixtures(self, fixtures): super().load_fixtures(fixtures) self.load_federation_sample_data() def config_overrides(self): super().config_overrides() self.config_fixture.config(group='token', provider='fernet') self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) def auth_plugin_config_override(self): methods = ['saml2', 'token', 'password'] super().auth_plugin_config_override(methods) def test_federated_unscoped_token(self): resp = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(resp)['token'] ) def test_federated_unscoped_token_with_multiple_groups(self): assertion = 'ANOTHER_CUSTOMER_ASSERTION' resp = self._issue_unscoped_token(assertion=assertion) self.assertValidMappedUser( render_token.render_token_response_from_model(resp)['token'] ) def test_validate_federated_unscoped_token(self): resp = self._issue_unscoped_token() unscoped_token = resp.id # assert that the token we received is valid self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token}) def test_fernet_full_workflow(self): """Test 'standard' workflow for granting Fernet access tokens. * Issue unscoped token * List available projects based on groups * Scope token to one of available projects """ resp = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(resp)['token'] ) unscoped_token = resp.id resp = self.get('/auth/projects', token=unscoped_token) projects = resp.result['projects'] random_project = random.randint(0, len(projects) - 1) project = projects[random_project] v3_scope_request = self._scope_request( unscoped_token, 'project', project['id'] ) resp = self.v3_create_token(v3_scope_request) token_resp = resp.result['token'] self._check_project_scoped_token_attributes(token_resp, project['id']) class JWSFederatedTokenTests(test_v3.RestfulTestCase, FederatedSetupMixin): AUTH_METHOD = 'token' def load_fixtures(self, fixtures): super().load_fixtures(fixtures) self.load_federation_sample_data() def config_overrides(self): super().config_overrides() self.config_fixture.config(group='token', provider='jws') self.useFixture(ksfixtures.JWSKeyRepository(self.config_fixture)) def auth_plugin_config_override(self): methods = ['saml2', 'token', 'password'] super().auth_plugin_config_override(methods) def test_federated_unscoped_token(self): token_model = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(token_model)['token'] ) def test_federated_unscoped_token_with_multiple_groups(self): assertion = 'ANOTHER_CUSTOMER_ASSERTION' token_model = self._issue_unscoped_token(assertion=assertion) self.assertValidMappedUser( render_token.render_token_response_from_model(token_model)['token'] ) def test_validate_federated_unscoped_token(self): token_model = self._issue_unscoped_token() unscoped_token = token_model.id # assert that the token we received is valid self.get('/auth/tokens/', headers={'X-Subject-Token': unscoped_token}) def test_jws_full_workflow(self): """Test 'standard' workflow for granting JWS tokens. * Issue unscoped token * List available projects based on groups * Scope token to one of available projects """ token_model = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(token_model)['token'] ) unscoped_token = token_model.id resp = self.get('/auth/projects', token=unscoped_token) projects = resp.result['projects'] random_project = random.randint(0, len(projects) - 1) project = projects[random_project] v3_scope_request = self._scope_request( unscoped_token, 'project', project['id'] ) resp = self.v3_create_token(v3_scope_request) token_resp = resp.result['token'] self._check_project_scoped_token_attributes(token_resp, project['id']) class FederatedTokenTestsMethodToken(FederatedTokenTests): """Test federation operation with unified scoping auth method. Test all the operations with auth method set to ``token`` as a new, unified way for scoping all the tokens. """ AUTH_METHOD = 'token' def auth_plugin_config_override(self): methods = ['saml2', 'token'] super(FederatedTokenTests, self).auth_plugin_config_override(methods) def test_full_workflow(self): """Test 'standard' workflow for granting access tokens. * Issue unscoped token * List available projects based on groups * Scope token to one of available projects """ r = self._issue_unscoped_token() token_resp = render_token.render_token_response_from_model(r)['token'] # NOTE(lbragstad): Ensure only 'saml2' is in the method list. self.assertListEqual(['saml2'], r.methods) self.assertValidMappedUser(token_resp) employee_unscoped_token_id = r.id r = self.get('/auth/projects', token=employee_unscoped_token_id) projects = r.result['projects'] random_project = random.randint(0, len(projects) - 1) project = projects[random_project] v3_scope_request = self._scope_request( employee_unscoped_token_id, 'project', project['id'] ) r = self.v3_create_token(v3_scope_request) token_resp = r.result['token'] self.assertIn('token', token_resp['methods']) self.assertIn('saml2', token_resp['methods']) self._check_project_scoped_token_attributes(token_resp, project['id']) class FederatedUserTests(test_v3.RestfulTestCase, FederatedSetupMixin): """Test for federated users. Tests new shadow users functionality """ def auth_plugin_config_override(self): methods = ['saml2', 'token'] super().auth_plugin_config_override(methods) def load_fixtures(self, fixtures): super().load_fixtures(fixtures) self.load_federation_sample_data() def test_user_id_persistense(self): """Ensure user_id is persistend for multiple federated authn calls.""" r = self._issue_unscoped_token() user_id = r.user_id self.assertNotEmpty(PROVIDERS.identity_api.get_user(user_id)) r = self._issue_unscoped_token() user_id2 = r.user_id self.assertNotEmpty(PROVIDERS.identity_api.get_user(user_id2)) self.assertEqual(user_id, user_id2) def test_user_role_assignment(self): # create project and role project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) # authenticate via saml get back a user id user_id, unscoped_token = self._authenticate_via_saml() # exchange an unscoped token for a scoped token; resulting in # unauthorized because the user doesn't have any role assignments v3_scope_request = self._scope_request( unscoped_token, 'project', project_ref['id'] ) r = self.v3_create_token( v3_scope_request, expected_status=http.client.UNAUTHORIZED ) # assign project role to federated user PROVIDERS.assignment_api.add_role_to_user_and_project( user_id, project_ref['id'], role_ref['id'] ) # exchange an unscoped token for a scoped token r = self.v3_create_token( v3_scope_request, expected_status=http.client.CREATED ) scoped_token = r.headers['X-Subject-Token'] # ensure user can access resource based on role assignment path = '/projects/{project_id}'.format(project_id=project_ref['id']) r = self.v3_request( path=path, method='GET', expected_status=http.client.OK, token=scoped_token, ) self.assertValidProjectResponse(r, project_ref) # create a 2nd project project_ref2 = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project_ref2['id'], project_ref2) # ensure the user cannot access the 2nd resource (forbidden) path = '/projects/{project_id}'.format(project_id=project_ref2['id']) r = self.v3_request( path=path, method='GET', expected_status=http.client.FORBIDDEN, token=scoped_token, ) def test_domain_scoped_user_role_assignment(self): # create domain and role domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) # authenticate via saml get back a user id user_id, unscoped_token = self._authenticate_via_saml() # exchange an unscoped token for a scoped token; resulting in # unauthorized because the user doesn't have any role assignments v3_scope_request = self._scope_request( unscoped_token, 'domain', domain_ref['id'] ) r = self.v3_create_token( v3_scope_request, expected_status=http.client.UNAUTHORIZED ) # assign domain role to user PROVIDERS.assignment_api.create_grant( role_ref['id'], user_id=user_id, domain_id=domain_ref['id'] ) # exchange an unscoped token for domain scoped token and test r = self.v3_create_token( v3_scope_request, expected_status=http.client.CREATED ) self.assertIsNotNone(r.headers.get('X-Subject-Token')) token_resp = r.result['token'] self.assertIn('domain', token_resp) def test_auth_projects_matches_federation_projects(self): # create project and role project_ref = unit.new_project_ref( domain_id=CONF.identity.default_domain_id ) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) # authenticate via saml get back a user id user_id, unscoped_token = self._authenticate_via_saml() # assign project role to federated user PROVIDERS.assignment_api.add_role_to_user_and_project( user_id, project_ref['id'], role_ref['id'] ) # get auth projects r = self.get('/auth/projects', token=unscoped_token) auth_projects = r.result['projects'] # get federation projects r = self.get('/OS-FEDERATION/projects', token=unscoped_token) fed_projects = r.result['projects'] # compare self.assertCountEqual(auth_projects, fed_projects) def test_auth_projects_matches_federation_projects_with_group_assign(self): # create project, role, group domain_id = CONF.identity.default_domain_id project_ref = unit.new_project_ref(domain_id=domain_id) PROVIDERS.resource_api.create_project(project_ref['id'], project_ref) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) group_ref = unit.new_group_ref(domain_id=domain_id) group_ref = PROVIDERS.identity_api.create_group(group_ref) # authenticate via saml get back a user id user_id, unscoped_token = self._authenticate_via_saml() # assign role to group at project PROVIDERS.assignment_api.create_grant( role_ref['id'], group_id=group_ref['id'], project_id=project_ref['id'], domain_id=domain_id, ) # add user to group PROVIDERS.identity_api.add_user_to_group( user_id=user_id, group_id=group_ref['id'] ) # get auth projects r = self.get('/auth/projects', token=unscoped_token) auth_projects = r.result['projects'] # get federation projects r = self.get('/OS-FEDERATION/projects', token=unscoped_token) fed_projects = r.result['projects'] # compare self.assertCountEqual(auth_projects, fed_projects) def test_auth_domains_matches_federation_domains(self): # create domain and role domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) # authenticate via saml get back a user id and token user_id, unscoped_token = self._authenticate_via_saml() # assign domain role to user PROVIDERS.assignment_api.create_grant( role_ref['id'], user_id=user_id, domain_id=domain_ref['id'] ) # get auth domains r = self.get('/auth/domains', token=unscoped_token) auth_domains = r.result['domains'] # get federation domains r = self.get('/OS-FEDERATION/domains', token=unscoped_token) fed_domains = r.result['domains'] # compare self.assertCountEqual(auth_domains, fed_domains) def test_auth_domains_matches_federation_domains_with_group_assign(self): # create role, group, and domain domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain_ref['id'], domain_ref) role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) group_ref = unit.new_group_ref(domain_id=domain_ref['id']) group_ref = PROVIDERS.identity_api.create_group(group_ref) # authenticate via saml get back a user id and token user_id, unscoped_token = self._authenticate_via_saml() # assign domain role to group PROVIDERS.assignment_api.create_grant( role_ref['id'], group_id=group_ref['id'], domain_id=domain_ref['id'], ) # add user to group PROVIDERS.identity_api.add_user_to_group( user_id=user_id, group_id=group_ref['id'] ) # get auth domains r = self.get('/auth/domains', token=unscoped_token) auth_domains = r.result['domains'] # get federation domains r = self.get('/OS-FEDERATION/domains', token=unscoped_token) fed_domains = r.result['domains'] # compare self.assertCountEqual(auth_domains, fed_domains) def test_list_head_domains_for_user_duplicates(self): # create role role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) # authenticate via saml get back a user id and token user_id, unscoped_token = self._authenticate_via_saml() # get federation group domains r = self.get('/OS-FEDERATION/domains', token=unscoped_token) group_domains = r.result['domains'] domain_from_group = group_domains[0] self.head( '/OS-FEDERATION/domains', token=unscoped_token, expected_status=http.client.OK, ) # assign group domain and role to user, this should create a # duplicate domain PROVIDERS.assignment_api.create_grant( role_ref['id'], user_id=user_id, domain_id=domain_from_group['id'] ) # get user domains via /OS-FEDERATION/domains and test for duplicates r = self.get('/OS-FEDERATION/domains', token=unscoped_token) user_domains = r.result['domains'] user_domain_ids = [] for domain in user_domains: self.assertNotIn(domain['id'], user_domain_ids) user_domain_ids.append(domain['id']) # get user domains via /auth/domains and test for duplicates r = self.get('/auth/domains', token=unscoped_token) user_domains = r.result['domains'] user_domain_ids = [] for domain in user_domains: self.assertNotIn(domain['id'], user_domain_ids) user_domain_ids.append(domain['id']) def test_list_head_projects_for_user_duplicates(self): # create role role_ref = unit.new_role_ref() PROVIDERS.role_api.create_role(role_ref['id'], role_ref) # authenticate via saml get back a user id and token user_id, unscoped_token = self._authenticate_via_saml() # get federation group projects r = self.get('/OS-FEDERATION/projects', token=unscoped_token) group_projects = r.result['projects'] project_from_group = group_projects[0] self.head( '/OS-FEDERATION/projects', token=unscoped_token, expected_status=http.client.OK, ) # assign group project and role to user, this should create a # duplicate project PROVIDERS.assignment_api.add_role_to_user_and_project( user_id, project_from_group['id'], role_ref['id'] ) # get user projects via /OS-FEDERATION/projects and test for duplicates r = self.get('/OS-FEDERATION/projects', token=unscoped_token) user_projects = r.result['projects'] user_project_ids = [] for project in user_projects: self.assertNotIn(project['id'], user_project_ids) user_project_ids.append(project['id']) # get user projects via /auth/projects and test for duplicates r = self.get('/auth/projects', token=unscoped_token) user_projects = r.result['projects'] user_project_ids = [] for project in user_projects: self.assertNotIn(project['id'], user_project_ids) user_project_ids.append(project['id']) def test_delete_protocol_after_federated_authentication(self): # Create a protocol protocol = self.proto_ref(mapping_id=self.mapping['id']) PROVIDERS.federation_api.create_protocol( self.IDP, protocol['id'], protocol ) # Authenticate to create a new federated_user entry with a foreign # key pointing to the protocol r = self._issue_unscoped_token() user_id = r.user_id self.assertNotEmpty(PROVIDERS.identity_api.get_user(user_id)) # Now we should be able to delete the protocol PROVIDERS.federation_api.delete_protocol(self.IDP, protocol['id']) def _authenticate_via_saml(self): r = self._issue_unscoped_token() unscoped_token = r.id token_resp = render_token.render_token_response_from_model(r)['token'] self.assertValidMappedUser(token_resp) return r.user_id, unscoped_token class ShadowMappingTests(test_v3.RestfulTestCase, FederatedSetupMixin): """Test class dedicated to auto-provisioning resources at login. A shadow mapping is a mapping that contains extra properties about that specific federated user's situation based on attributes from the assertion. For example, a shadow mapping can tell us that a user should have specific role assignments on certain projects within a domain. When a federated user authenticates, the shadow mapping will create these entities before returning the authenticated response to the user. This test class is dedicated to testing specific aspects of shadow mapping when performing federated authentication. """ def setUp(self): super().setUp() # update the mapping we have already setup to have specific projects # and roles. PROVIDERS.federation_api.update_mapping( self.mapping['id'], mapping_fixtures.MAPPING_PROJECTS ) # The shadow mapping we're using in these tests contain a role named # `member` and `observer` for the sake of using something other than # `admin`. We'll need to create those before hand, otherwise the # mapping will fail during authentication because the roles defined in # the mapping do not exist yet. The shadow mapping mechanism currently # doesn't support creating roles on-the-fly, but this could change in # the future after we get some feedback from shadow mapping being used # in real deployments. We also want to make sure we are dealing with # global roles and not domain-scoped roles. We have specific tests # below that test that behavior and the setup is done in the test. member_role_ref = unit.new_role_ref(name='member') assert member_role_ref['domain_id'] is None self.member_role = PROVIDERS.role_api.create_role( member_role_ref['id'], member_role_ref ) observer_role_ref = unit.new_role_ref(name='observer') assert observer_role_ref['domain_id'] is None self.observer_role = PROVIDERS.role_api.create_role( observer_role_ref['id'], observer_role_ref ) # This is a mapping of the project name to the role that is supposed to # be assigned to the user on that project from the shadow mapping. self.expected_results = { 'Production': 'observer', 'Staging': 'member', 'Project for tbo': 'admin', } def auth_plugin_config_override(self): methods = ['saml2', 'token'] super().auth_plugin_config_override(methods) def load_fixtures(self, fixtures): super().load_fixtures(fixtures) self.load_federation_sample_data() def test_shadow_mapping_creates_projects(self): projects = PROVIDERS.resource_api.list_projects() for project in projects: self.assertNotIn(project['name'], self.expected_results) response = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(response)['token'] ) unscoped_token = response.id response = self.get('/auth/projects', token=unscoped_token) projects = response.json_body['projects'] for project in projects: project = PROVIDERS.resource_api.get_project_by_name( project['name'], self.idp['domain_id'] ) self.assertIn(project['name'], self.expected_results) def test_shadow_mapping_create_projects_role_assignments(self): response = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(response)['token'] ) unscoped_token = response.id response = self.get('/auth/projects', token=unscoped_token) projects = response.json_body['projects'] for project in projects: # Ask for a scope token to each project in the mapping. Each token # should contain a different role so let's check that is right, # too. scope = self._scope_request( unscoped_token, 'project', project['id'] ) response = self.v3_create_token(scope) project_name = response.json_body['token']['project']['name'] roles = response.json_body['token']['roles'] self.assertEqual( self.expected_results[project_name], roles[0]['name'] ) def test_shadow_mapping_does_not_create_roles(self): # If a role required by the mapping does not exist, then we should fail # the mapping since shadow mapping currently does not support creating # mappings on-the-fly. PROVIDERS.role_api.delete_role(self.observer_role['id']) self.assertRaises(exception.RoleNotFound, self._issue_unscoped_token) def test_shadow_mapping_creates_project_in_identity_provider_domain(self): response = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(response)['token'] ) unscoped_token = response.id response = self.get('/auth/projects', token=unscoped_token) projects = response.json_body['projects'] for project in projects: self.assertEqual(project['domain_id'], self.idp['domain_id']) def test_shadow_mapping_is_idempotent(self): """Test that projects remain idempotent for every federated auth.""" response = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(response)['token'] ) unscoped_token = response.id response = self.get('/auth/projects', token=unscoped_token) project_ids = [p['id'] for p in response.json_body['projects']] response = self._issue_unscoped_token() unscoped_token = response.id response = self.get('/auth/projects', token=unscoped_token) projects = response.json_body['projects'] for project in projects: self.assertIn(project['id'], project_ids) def test_roles_outside_idp_domain_fail_mapping(self): # Create a new domain d = unit.new_domain_ref() new_domain = PROVIDERS.resource_api.create_domain(d['id'], d) # Delete the member role and recreate it in a different domain PROVIDERS.role_api.delete_role(self.member_role['id']) member_role_ref = unit.new_role_ref( name='member', domain_id=new_domain['id'] ) PROVIDERS.role_api.create_role(member_role_ref['id'], member_role_ref) self.assertRaises( exception.DomainSpecificRoleNotWithinIdPDomain, self._issue_unscoped_token, ) def test_roles_in_idp_domain_can_be_assigned_from_mapping(self): # Delete the member role and recreate it in the domain of the idp PROVIDERS.role_api.delete_role(self.member_role['id']) member_role_ref = unit.new_role_ref( name='member', domain_id=self.idp['domain_id'] ) PROVIDERS.role_api.create_role(member_role_ref['id'], member_role_ref) response = self._issue_unscoped_token() user_id = response.user_id unscoped_token = response.id response = self.get('/auth/projects', token=unscoped_token) projects = response.json_body['projects'] staging_project = PROVIDERS.resource_api.get_project_by_name( 'Staging', self.idp['domain_id'] ) for project in projects: # Even though the mapping successfully assigned the Staging project # a member role for our user, the /auth/projects response doesn't # include projects with only domain-specific role assignments. self.assertNotEqual(project['name'], 'Staging') domain_role_assignments = ( PROVIDERS.assignment_api.list_role_assignments( user_id=user_id, project_id=staging_project['id'], strip_domain_roles=False, ) ) self.assertEqual( staging_project['id'], domain_role_assignments[0]['project_id'] ) self.assertEqual(user_id, domain_role_assignments[0]['user_id']) def test_mapping_with_groups_includes_projects_with_group_assignment(self): # create a group called Observers observer_group = unit.new_group_ref( domain_id=self.idp['domain_id'], name='Observers' ) observer_group = PROVIDERS.identity_api.create_group(observer_group) # make sure the Observers group has a role on the finance project finance_project = unit.new_project_ref( domain_id=self.idp['domain_id'], name='Finance' ) finance_project = PROVIDERS.resource_api.create_project( finance_project['id'], finance_project ) PROVIDERS.assignment_api.create_grant( self.observer_role['id'], group_id=observer_group['id'], project_id=finance_project['id'], ) # update the mapping group_rule = { 'group': { 'name': 'Observers', 'domain': {'id': self.idp['domain_id']}, } } updated_mapping = copy.deepcopy(mapping_fixtures.MAPPING_PROJECTS) updated_mapping['rules'][0]['local'].append(group_rule) PROVIDERS.federation_api.update_mapping( self.mapping['id'], updated_mapping ) response = self._issue_unscoped_token() # user_id = response.json_body['token']['user']['id'] unscoped_token = response.id response = self.get('/auth/projects', token=unscoped_token) projects = response.json_body['projects'] self.expected_results = { # These assignments are all a result of a direct mapping from the # shadow user to the newly created project. 'Production': 'observer', 'Staging': 'member', 'Project for tbo': 'admin', # This is a result of the mapping engine maintaining its old # behavior. 'Finance': 'observer', } for project in projects: # Ask for a scope token to each project in the mapping. Each token # should contain a different role so let's check that is right, # too. scope = self._scope_request( unscoped_token, 'project', project['id'] ) response = self.v3_create_token(scope) project_name = response.json_body['token']['project']['name'] roles = response.json_body['token']['roles'] self.assertEqual( self.expected_results[project_name], roles[0]['name'] ) def test_user_gets_only_assigned_roles(self): # in bug 1677723 user could get roles outside of what was assigned # to them. This test verifies that this is no longer true. # Authenticate once to create the projects response = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(response)['token'] ) # Assign admin role to newly-created project to another user staging_project = PROVIDERS.resource_api.get_project_by_name( 'Staging', self.idp['domain_id'] ) admin = unit.new_user_ref(CONF.identity.default_domain_id) PROVIDERS.identity_api.create_user(admin) PROVIDERS.assignment_api.create_grant( self.role_admin['id'], user_id=admin['id'], project_id=staging_project['id'], ) # Authenticate again with the federated user and verify roles response = self._issue_unscoped_token() self.assertValidMappedUser( render_token.render_token_response_from_model(response)['token'] ) unscoped_token = response.id scope = self._scope_request( unscoped_token, 'project', staging_project['id'] ) response = self.v3_create_token(scope) roles = response.json_body['token']['roles'] role_ids = [r['id'] for r in roles] self.assertNotIn(self.role_admin['id'], role_ids) class JsonHomeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-FEDERATION' '/1.0/rel/identity_provider': { 'href-template': '/OS-FEDERATION/identity_providers/{idp_id}', 'href-vars': { 'idp_id': 'https://docs.openstack.org/api/openstack-identity/3' '/ext/OS-FEDERATION/1.0/param/idp_id' }, }, } def _is_xmlsec1_installed(): p = subprocess.Popen( ['which', 'xmlsec1'], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) # invert the return code return not bool(p.wait()) def _load_xml(filename): with open(os.path.join(XMLDIR, filename)) as xml: return xml.read() class SAMLGenerationTests(test_v3.RestfulTestCase): SP_AUTH_URL = ( 'http://beta.com:5000/v3/OS-FEDERATION/identity_providers' '/BETA/protocols/saml2/auth' ) ASSERTION_FILE = 'signed_saml2_assertion.xml' # The values of the following variables match the attributes values found # in ASSERTION_FILE ISSUER = 'https://acme.com/FIM/sps/openstack/saml20' RECIPIENT = 'http://beta.com/Shibboleth.sso/SAML2/POST' SUBJECT = 'test_user' SUBJECT_DOMAIN = 'user_domain' ROLES = ['admin', 'member'] PROJECT = 'development' PROJECT_DOMAIN = 'project_domain' GROUPS = [ 'JSON:{"name":"group1","domain":{"name":"Default"}}', 'JSON:{"name":"group2","domain":{"name":"Default"}}', ] SAML_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2' ECP_GENERATION_ROUTE = '/auth/OS-FEDERATION/saml2/ecp' ASSERTION_VERSION = "2.0" SERVICE_PROVDIER_ID = 'ACME' def setUp(self): super().setUp() self.signed_assertion = saml2.create_class_from_xml_string( saml.Assertion, _load_xml(self.ASSERTION_FILE) ) self.sp = core.new_service_provider_ref( auth_url=self.SP_AUTH_URL, sp_url=self.RECIPIENT ) url = '/OS-FEDERATION/service_providers/' + self.SERVICE_PROVDIER_ID self.put( url, body={'service_provider': self.sp}, expected_status=http.client.CREATED, ) def test_samlize_token_values(self): """Test the SAML generator produces a SAML object. Test the SAML generator directly by passing known arguments, the result should be a SAML object that consistently includes attributes based on the known arguments that were passed in. """ with mock.patch.object( keystone_idp, '_sign_assertion', return_value=self.signed_assertion ): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token( self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN, self.GROUPS, ) assertion = response.assertion self.assertIsNotNone(assertion) self.assertIsInstance(assertion, saml.Assertion) issuer = response.issuer self.assertEqual(self.RECIPIENT, response.destination) self.assertEqual(self.ISSUER, issuer.text) user_attribute = assertion.attribute_statement[0].attribute[0] self.assertEqual(self.SUBJECT, user_attribute.attribute_value[0].text) user_domain_attribute = assertion.attribute_statement[0].attribute[1] self.assertEqual( self.SUBJECT_DOMAIN, user_domain_attribute.attribute_value[0].text ) role_attribute = assertion.attribute_statement[0].attribute[2] for attribute_value in role_attribute.attribute_value: self.assertIn(attribute_value.text, self.ROLES) project_attribute = assertion.attribute_statement[0].attribute[3] self.assertEqual( self.PROJECT, project_attribute.attribute_value[0].text ) project_domain_attribute = assertion.attribute_statement[0].attribute[ 4 ] self.assertEqual( self.PROJECT_DOMAIN, project_domain_attribute.attribute_value[0].text, ) group_attribute = assertion.attribute_statement[0].attribute[5] for attribute_value in group_attribute.attribute_value: self.assertIn(attribute_value.text, self.GROUPS) def test_comma_in_certfile_path(self): self.config_fixture.config( group='saml', certfile=CONF.saml.certfile + ',' ) generator = keystone_idp.SAMLGenerator() self.assertRaises( exception.UnexpectedError, generator.samlize_token, self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN, self.GROUPS, ) def test_comma_in_keyfile_path(self): self.config_fixture.config( group='saml', keyfile=CONF.saml.keyfile + ',' ) generator = keystone_idp.SAMLGenerator() self.assertRaises( exception.UnexpectedError, generator.samlize_token, self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN, self.GROUPS, ) def test_verify_assertion_object(self): """Test that the Assertion object is built properly. The Assertion doesn't need to be signed in this test, so _sign_assertion method is patched and doesn't alter the assertion. """ with mock.patch.object( keystone_idp, '_sign_assertion', side_effect=lambda x: x ): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token( self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN, self.GROUPS, ) assertion = response.assertion self.assertEqual(self.ASSERTION_VERSION, assertion.version) def test_valid_saml_xml(self): """Test the generated SAML object can become valid XML. Test the generator directly by passing known arguments, the result should be a SAML object that consistently includes attributes based on the known arguments that were passed in. """ with mock.patch.object( keystone_idp, '_sign_assertion', return_value=self.signed_assertion ): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token( self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN, self.GROUPS, ) saml_str = response.to_string() response = etree.fromstring(saml_str) issuer = response[0] assertion = response[2] self.assertEqual(self.RECIPIENT, response.get('Destination')) self.assertEqual(self.ISSUER, issuer.text) user_attribute = assertion[4][0] self.assertEqual(self.SUBJECT, user_attribute[0].text) user_domain_attribute = assertion[4][1] self.assertEqual(self.SUBJECT_DOMAIN, user_domain_attribute[0].text) role_attribute = assertion[4][2] for attribute_value in role_attribute: self.assertIn(attribute_value.text, self.ROLES) project_attribute = assertion[4][3] self.assertEqual(self.PROJECT, project_attribute[0].text) project_domain_attribute = assertion[4][4] self.assertEqual(self.PROJECT_DOMAIN, project_domain_attribute[0].text) group_attribute = assertion[4][5] for attribute_value in group_attribute: self.assertIn(attribute_value.text, self.GROUPS) def test_assertion_using_explicit_namespace_prefixes(self): def mocked_subprocess_check_output(*popenargs, **kwargs): # the last option is the assertion file to be signed if popenargs[0] != ['/usr/bin/which', CONF.saml.xmlsec1_binary]: filename = popenargs[0][-1] with open(filename) as f: assertion_content = f.read() # since we are not testing the signature itself, we can return # the assertion as is without signing it return assertion_content with mock.patch.object( subprocess, 'check_output', side_effect=mocked_subprocess_check_output, ): generator = keystone_idp.SAMLGenerator() response = generator.samlize_token( self.ISSUER, self.RECIPIENT, self.SUBJECT, self.SUBJECT_DOMAIN, self.ROLES, self.PROJECT, self.PROJECT_DOMAIN, self.GROUPS, ) assertion_xml = response.assertion.to_string() # The expected values in the assertions bellow need to be 'str' in # Python 2 and 'bytes' in Python 3 # make sure we have the proper tag and prefix for the assertion # namespace self.assertIn(b'. Test Plan: - Attempt to get all entities back by passing a two-term attribute - Attempt to piggyback filter to damage DB (e.g. drop table) """ self._set_policy( { "identity:list_users": [], "identity:list_groups": [], "identity:create_group": [], } ) url_by_name = "/users?name=anything' or 'x'='x" r = self.get(url_by_name, auth=self.auth) self.assertEqual(0, len(r.result.get('users'))) # See if we can add a SQL command...use the group table instead of the # user table since 'user' is reserved word for SQLAlchemy. group = unit.new_group_ref(domain_id=self.domainB['id']) group = PROVIDERS.identity_api.create_group(group) url_by_name = "/users?name=x'; drop table group" r = self.get(url_by_name, auth=self.auth) # Check group table is still there... url_by_name = "/groups" r = self.get(url_by_name, auth=self.auth) self.assertGreater(len(r.result.get('groups')), 0) class IdentityPasswordExpiryFilteredTestCase( filtering.FilterTests, test_v3.RestfulTestCase ): """Test password expiring filter on the v3 Identity API.""" def setUp(self): """Setup for Identity Filter Test Cases.""" self.config_fixture = self.useFixture(config_fixture.Config(CONF)) super().setUp() def load_sample_data(self): """Create sample data for password expiry tests. The test environment will consist of a single domain, containing a single project. It will create three users and one group. Each user is going to be given a role assignment on the project and the domain. Two of the three users are going to be placed into the group, which won't have any role assignments to either the project or the domain. """ self._populate_default_domain() self.domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domain['id'], self.domain) self.domain_id = self.domain['id'] self.project = unit.new_project_ref(domain_id=self.domain_id) self.project_id = self.project['id'] self.project = PROVIDERS.resource_api.create_project( self.project_id, self.project ) self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = PROVIDERS.identity_api.create_group(self.group) self.group_id = self.group['id'] # Creates three users each with password expiration offset # by one day, starting with the current time frozen. self.starttime = timeutils.utcnow() with freezegun.freeze_time(self.starttime): self.config_fixture.config( group='security_compliance', password_expires_days=1 ) self.user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) self.config_fixture.config( group='security_compliance', password_expires_days=2 ) self.user2 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) self.config_fixture.config( group='security_compliance', password_expires_days=3 ) self.user3 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) self.role = unit.new_role_ref(name='admin') PROVIDERS.role_api.create_role(self.role['id'], self.role) self.role_id = self.role['id'] # Grant admin role to the users created. PROVIDERS.assignment_api.create_grant( self.role_id, user_id=self.user['id'], domain_id=self.domain_id ) PROVIDERS.assignment_api.create_grant( self.role_id, user_id=self.user2['id'], domain_id=self.domain_id ) PROVIDERS.assignment_api.create_grant( self.role_id, user_id=self.user3['id'], domain_id=self.domain_id ) PROVIDERS.assignment_api.create_grant( self.role_id, user_id=self.user['id'], project_id=self.project_id ) PROVIDERS.assignment_api.create_grant( self.role_id, user_id=self.user2['id'], project_id=self.project_id ) PROVIDERS.assignment_api.create_grant( self.role_id, user_id=self.user3['id'], project_id=self.project_id ) # Add the last two users to the group. PROVIDERS.identity_api.add_user_to_group( self.user2['id'], self.group_id ) PROVIDERS.identity_api.add_user_to_group( self.user3['id'], self.group_id ) def _list_users_by_password_expires_at(self, time, operator=None): """Call `list_users` with `password_expires_at` filter. GET /users?password_expires_at={operator}:{timestamp} """ url = '/users?password_expires_at=' if operator: url += operator + ':' url += str(time) return url def _list_users_by_multiple_password_expires_at( self, first_time, first_operator, second_time, second_operator ): """Call `list_users` with two `password_expires_at` filters. GET /users?password_expires_at={operator}:{timestamp}& {operator}:{timestamp} """ url = '/users?password_expires_at={}:{}&password_expires_at={}:{}'.format( first_operator, first_time, second_operator, second_time, ) return url def _format_timestamp(self, timestamp): return timestamp.strftime("%Y-%m-%dT%H:%M:%SZ") def test_list_users_by_password_expires_at(self): """Ensure users can be filtered on no operator, eq and neq. GET /users?password_expires_at={timestamp} GET /users?password_expires_at=eq:{timestamp} """ expire_at_url = self._list_users_by_password_expires_at( self._format_timestamp(self.starttime + datetime.timedelta(days=2)) ) resp_users = self.get(expire_at_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) # Same call as above, only explicitly stating equals expire_at_url = self._list_users_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'eq', ) resp_users = self.get(expire_at_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) expire_at_url = self._list_users_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'neq', ) resp_users = self.get(expire_at_url).result.get('users') self.assertEqual(self.user['id'], resp_users[0]['id']) self.assertEqual(self.user3['id'], resp_users[1]['id']) def test_list_users_by_password_expires_before(self): """Ensure users can be filtered on lt and lte. GET /users?password_expires_at=lt:{timestamp} GET /users?password_expires_at=lte:{timestamp} """ expire_before_url = self._list_users_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2, seconds=1) ), 'lt', ) resp_users = self.get(expire_before_url).result.get('users') self.assertEqual(self.user['id'], resp_users[0]['id']) self.assertEqual(self.user2['id'], resp_users[1]['id']) expire_before_url = self._list_users_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'lte', ) resp_users = self.get(expire_before_url).result.get('users') self.assertEqual(self.user['id'], resp_users[0]['id']) self.assertEqual(self.user2['id'], resp_users[1]['id']) def test_list_users_by_password_expires_after(self): """Ensure users can be filtered on gt and gte. GET /users?password_expires_at=gt:{timestamp} GET /users?password_expires_at=gte:{timestamp} """ expire_after_url = self._list_users_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2, seconds=1) ), 'gt', ) resp_users = self.get(expire_after_url).result.get('users') self.assertEqual(self.user3['id'], resp_users[0]['id']) expire_after_url = self._list_users_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'gte', ) resp_users = self.get(expire_after_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) self.assertEqual(self.user3['id'], resp_users[1]['id']) def test_list_users_by_password_expires_interval(self): """Ensure users can be filtered on time intervals. GET /users?password_expires_at=lt:{timestamp}>:{timestamp} GET /users?password_expires_at=lte:{timestamp}>e:{timestamp} Time intervals are defined by using lt or lte and gt or gte, where the lt/lte time is greater than the gt/gte time. """ expire_interval_url = self._list_users_by_multiple_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=3) ), 'lt', self._format_timestamp( self.starttime + datetime.timedelta(days=1) ), 'gt', ) resp_users = self.get(expire_interval_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) expire_interval_url = self._list_users_by_multiple_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'gte', self._format_timestamp( self.starttime + datetime.timedelta(days=2, seconds=1) ), 'lte', ) resp_users = self.get(expire_interval_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) def test_list_users_by_password_expires_with_bad_operator_fails(self): """Ensure an invalid operator returns a Bad Request. GET /users?password_expires_at={invalid_operator}:{timestamp} GET /users?password_expires_at={operator}:{timestamp}& {invalid_operator}:{timestamp} """ bad_op_url = self._list_users_by_password_expires_at( self._format_timestamp(self.starttime), 'x' ) self.get(bad_op_url, expected_status=http.client.BAD_REQUEST) bad_op_url = self._list_users_by_multiple_password_expires_at( self._format_timestamp(self.starttime), 'lt', self._format_timestamp(self.starttime), 'x', ) self.get(bad_op_url, expected_status=http.client.BAD_REQUEST) def test_list_users_by_password_expires_with_bad_timestamp_fails(self): """Ensure an invalid timestamp returns a Bad Request. GET /users?password_expires_at={invalid_timestamp} GET /users?password_expires_at={operator}:{timestamp}& {operator}:{invalid_timestamp} """ bad_ts_url = self._list_users_by_password_expires_at( self.starttime.strftime('%S:%M:%ST%Y-%m-%d') ) self.get(bad_ts_url, expected_status=http.client.BAD_REQUEST) bad_ts_url = self._list_users_by_multiple_password_expires_at( self._format_timestamp(self.starttime), 'lt', self.starttime.strftime('%S:%M:%ST%Y-%m-%d'), 'gt', ) self.get(bad_ts_url, expected_status=http.client.BAD_REQUEST) def _list_users_in_group_by_password_expires_at( self, time, operator=None, expected_status=http.client.OK ): """Call `list_users_in_group` with `password_expires_at` filter. GET /groups/{group_id}/users?password_expires_at= {operator}:{timestamp}&{operator}:{timestamp} """ url = '/groups/' + self.group_id + '/users?password_expires_at=' if operator: url += operator + ':' url += str(time) return url def _list_users_in_group_by_multiple_password_expires_at( self, first_time, first_operator, second_time, second_operator, expected_status=http.client.OK, ): """Call `list_users_in_group` with two `password_expires_at` filters. GET /groups/{group_id}/users?password_expires_at= {operator}:{timestamp}&{operator}:{timestamp} """ url = ( '/groups/' + self.group_id + '/users' '?password_expires_at=%s:%s&password_expires_at=%s:%s' % (first_operator, first_time, second_operator, second_time) ) return url def test_list_users_in_group_by_password_expires_at(self): """Ensure users in a group can be filtered on no operator, eq, and neq. GET /groups/{groupid}/users?password_expires_at={timestamp} GET /groups/{groupid}/users?password_expires_at=eq:{timestamp} """ expire_at_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp(self.starttime + datetime.timedelta(days=2)) ) resp_users = self.get(expire_at_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) # Same call as above, only explicitly stating equals expire_at_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'eq', ) resp_users = self.get(expire_at_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) expire_at_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'neq', ) resp_users = self.get(expire_at_url).result.get('users') self.assertEqual(self.user3['id'], resp_users[0]['id']) def test_list_users_in_group_by_password_expires_before(self): """Ensure users in a group can be filtered on with lt and lte. GET /groups/{groupid}/users?password_expires_at=lt:{timestamp} GET /groups/{groupid}/users?password_expires_at=lte:{timestamp} """ expire_before_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2, seconds=1) ), 'lt', ) resp_users = self.get(expire_before_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) expire_before_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'lte', ) resp_users = self.get(expire_before_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) def test_list_users_in_group_by_password_expires_after(self): """Ensure users in a group can be filtered on with gt and gte. GET /groups/{groupid}/users?password_expires_at=gt:{timestamp} GET /groups/{groupid}/users?password_expires_at=gte:{timestamp} """ expire_after_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2, seconds=1) ), 'gt', ) resp_users = self.get(expire_after_url).result.get('users') self.assertEqual(self.user3['id'], resp_users[0]['id']) expire_after_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'gte', ) resp_users = self.get(expire_after_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) self.assertEqual(self.user3['id'], resp_users[1]['id']) def test_list_users_in_group_by_password_expires_interval(self): """Ensure users in a group can be filtered on time intervals. GET /groups/{groupid}/users?password_expires_at= lt:{timestamp}>:{timestamp} GET /groups/{groupid}/users?password_expires_at= lte:{timestamp}>e:{timestamp} Time intervals are defined by using lt or lte and gt or gte, where the lt/lte time is greater than the gt/gte time. """ expire_interval_url = ( self._list_users_in_group_by_multiple_password_expires_at( self._format_timestamp(self.starttime), 'gt', self._format_timestamp( self.starttime + datetime.timedelta(days=3, seconds=1) ), 'lt', ) ) resp_users = self.get(expire_interval_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) self.assertEqual(self.user3['id'], resp_users[1]['id']) expire_interval_url = ( self._list_users_in_group_by_multiple_password_expires_at( self._format_timestamp( self.starttime + datetime.timedelta(days=2) ), 'gte', self._format_timestamp( self.starttime + datetime.timedelta(days=3) ), 'lte', ) ) resp_users = self.get(expire_interval_url).result.get('users') self.assertEqual(self.user2['id'], resp_users[0]['id']) self.assertEqual(self.user3['id'], resp_users[1]['id']) def test_list_users_in_group_by_password_expires_bad_operator_fails(self): """Ensure an invalid operator returns a Bad Request. GET /groups/{groupid}/users?password_expires_at= {invalid_operator}:{timestamp} GET /groups/{group_id}/users?password_expires_at= {operator}:{timestamp}&{invalid_operator}:{timestamp} """ bad_op_url = self._list_users_in_group_by_password_expires_at( self._format_timestamp(self.starttime), 'bad' ) self.get(bad_op_url, expected_status=http.client.BAD_REQUEST) bad_op_url = self._list_users_in_group_by_multiple_password_expires_at( self._format_timestamp(self.starttime), 'lt', self._format_timestamp(self.starttime), 'x', ) self.get(bad_op_url, expected_status=http.client.BAD_REQUEST) def test_list_users_in_group_by_password_expires_bad_timestamp_fails(self): """Ensure and invalid timestamp returns a Bad Request. GET /groups/{groupid}/users?password_expires_at={invalid_timestamp} GET /groups/{groupid}/users?password_expires_at={operator}:{timestamp}& {operator}:{invalid_timestamp} """ bad_ts_url = self._list_users_in_group_by_password_expires_at( self.starttime.strftime('%S:%M:%ST%Y-%m-%d') ) self.get(bad_ts_url, expected_status=http.client.BAD_REQUEST) bad_ts_url = self._list_users_in_group_by_multiple_password_expires_at( self._format_timestamp(self.starttime), 'lt', self.starttime.strftime('%S:%M:%ST%Y-%m-%d'), 'gt', ) self.get(bad_ts_url, expected_status=http.client.BAD_REQUEST) class IdentityTestListLimitCase(IdentityTestFilteredCase): """Test list limiting enforcement on the v3 Identity API.""" content_type = 'json' def setUp(self): """Setup for Identity Limit Test Cases.""" super().setUp() # Create 10 entries for each of the entities we are going to test self.ENTITY_TYPES = ['user', 'group', 'project'] self.entity_lists = {} for entity in self.ENTITY_TYPES: self.entity_lists[entity] = self._create_test_data(entity, 10) # Make sure we clean up when finished self.addCleanup(self.clean_up_entity, entity) self.service_list = [] self.addCleanup(self.clean_up_service) for _ in range(10): new_entity = unit.new_service_ref() service = PROVIDERS.catalog_api.create_service( new_entity['id'], new_entity ) self.service_list.append(service) self.policy_list = [] self.addCleanup(self.clean_up_policy) for _ in range(10): new_entity = unit.new_policy_ref() policy = PROVIDERS.policy_api.create_policy( new_entity['id'], new_entity ) self.policy_list.append(policy) def clean_up_entity(self, entity): """Clean up entity test data from Identity Limit Test Cases.""" self._delete_test_data(entity, self.entity_lists[entity]) def clean_up_service(self): """Clean up service test data from Identity Limit Test Cases.""" for service in self.service_list: PROVIDERS.catalog_api.delete_service(service['id']) def clean_up_policy(self): """Clean up policy test data from Identity Limit Test Cases.""" for policy in self.policy_list: PROVIDERS.policy_api.delete_policy(policy['id']) def _test_entity_list_limit(self, entity, driver): """GET / (limited). Test Plan: - For the specified type of entity: - Update policy for no protection on api - Add a bunch of entities - Set the global list limit to 5, and check that getting all - entities only returns 5 - Set the driver list_limit to 4, and check that now only 4 are - returned """ if entity == 'policy': plural = 'policies' else: plural = '%ss' % entity self._set_policy({"identity:list_%s" % plural: []}) self.config_fixture.config(list_limit=5) self.config_fixture.config(group=driver, list_limit=None) r = self.get('/%s' % plural, auth=self.auth) self.assertEqual(5, len(r.result.get(plural))) self.assertIs(r.result.get('truncated'), True) self.config_fixture.config(group=driver, list_limit=4) r = self.get('/%s' % plural, auth=self.auth) self.assertEqual(4, len(r.result.get(plural))) self.assertIs(r.result.get('truncated'), True) def test_users_list_limit(self): self._test_entity_list_limit('user', 'identity') def test_groups_list_limit(self): self._test_entity_list_limit('group', 'identity') def test_projects_list_limit(self): self._test_entity_list_limit('project', 'resource') def test_services_list_limit(self): self._test_entity_list_limit('service', 'catalog') def test_non_driver_list_limit(self): """Check list can be limited without driver level support. Policy limiting is not done at the driver level (since it really isn't worth doing it there). So use this as a test for ensuring the controller level will successfully limit in this case. """ self._test_entity_list_limit('policy', 'policy') def test_no_limit(self): """Check truncated attribute not set when list not limited.""" self._set_policy({"identity:list_services": []}) r = self.get('/services', auth=self.auth) self.assertEqual(10, len(r.result.get('services'))) self.assertNotIn('truncated', r.result) def test_at_limit(self): """Check truncated attribute not set when list at max size.""" # Test this by overriding the general limit with a higher # driver-specific limit (allowing all entities to be returned # in the collection), which should result in a non truncated list self._set_policy({"identity:list_services": []}) self.config_fixture.config(list_limit=5) self.config_fixture.config(group='catalog', list_limit=10) r = self.get('/services', auth=self.auth) self.assertEqual(10, len(r.result.get('services'))) self.assertNotIn('truncated', r.result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_identity.py0000664000175000017500000015315500000000000023331 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client from unittest import mock import uuid import fixtures import freezegun from oslo_db import exception as oslo_db_exception from oslo_log import log from oslo_utils import timeutils from testtools import matchers from keystone.common import provider_api from keystone.common import sql import keystone.conf from keystone.credential.providers import fernet as credential_fernet from keystone import exception from keystone.identity.backends import base as identity_base from keystone.identity.backends import resource_options as options from keystone.identity.backends import sql_model as model from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database from keystone.tests.unit import mapping_fixtures from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class IdentityTestCase(test_v3.RestfulTestCase): """Test users and groups.""" def setUp(self): super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) self.group = unit.new_group_ref(domain_id=self.domain_id) self.group = PROVIDERS.identity_api.create_group(self.group) self.group_id = self.group['id'] self.credential = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id ) PROVIDERS.credential_api.create_credential( self.credential['id'], self.credential ) # user crud tests def test_create_user(self): """Call ``POST /users``.""" ref = unit.new_user_ref(domain_id=self.domain_id) r = self.post('/users', body={'user': ref}) return self.assertValidUserResponse(r, ref) def test_create_user_without_domain(self): """Call ``POST /users`` without specifying domain. According to the identity-api specification, if you do not explicitly specific the domain_id in the entity, it should take the domain scope of the token as the domain_id. """ # Create a user with a role on the domain so we can get a # domain scoped token domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) PROVIDERS.assignment_api.create_grant( role_id=self.role_id, user_id=user['id'], domain_id=domain['id'] ) ref = unit.new_user_ref(domain_id=domain['id']) ref_nd = ref.copy() ref_nd.pop('domain_id') auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) r = self.post('/users', body={'user': ref_nd}, auth=auth) self.assertValidUserResponse(r, ref) # Now try the same thing without a domain token - which should fail ref = unit.new_user_ref(domain_id=domain['id']) ref_nd = ref.copy() ref_nd.pop('domain_id') auth = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) # TODO(henry-nash): Due to bug #1283539 we currently automatically # use the default domain_id if a domain scoped token is not being # used. For now we just check that a deprecation warning has been # issued. Change the code below to expect a failure once this bug is # fixed. with mock.patch( 'oslo_log.versionutils.report_deprecated_feature' ) as mock_dep: r = self.post('/users', body={'user': ref_nd}, auth=auth) self.assertTrue(mock_dep.called) ref['domain_id'] = CONF.identity.default_domain_id return self.assertValidUserResponse(r, ref) def test_create_user_with_admin_token_and_domain(self): """Call ``POST /users`` with admin token and domain id.""" ref = unit.new_user_ref(domain_id=self.domain_id) self.post( '/users', body={'user': ref}, token=self.get_admin_token(), expected_status=http.client.CREATED, ) def test_user_management_normalized_keys(self): """Illustrate the inconsistent handling of hyphens in keys. To quote Morgan in bug 1526244: the reason this is converted from "domain-id" to "domain_id" is because of how we process/normalize data. The way we have to handle specific data types for known columns requires avoiding "-" in the actual python code since "-" is not valid for attributes in python w/o significant use of "getattr" etc. In short, historically we handle some things in conversions. The use of "extras" has long been a poor design choice that leads to odd/strange inconsistent behaviors because of other choices made in handling data from within the body. (In many cases we convert from "-" to "_" throughout openstack) Source: https://bugs.launchpad.net/keystone/+bug/1526244/comments/9 """ # Create two domains to work with. domain1 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain1['id'], domain1) domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) # We can successfully create a normal user without any surprises. user = unit.new_user_ref(domain_id=domain1['id']) r = self.post('/users', body={'user': user}) self.assertValidUserResponse(r, user) user['id'] = r.json['user']['id'] # Query strings are not normalized: so we get all users back (like # self.user), not just the ones in the specified domain. r = self.get('/users?domain-id=%s' % domain1['id']) self.assertValidUserListResponse(r, ref=self.user) self.assertNotEqual(domain1['id'], self.user['domain_id']) # When creating a new user, if we move the 'domain_id' into the # 'domain-id' attribute, the server will normalize the request # attribute, and effectively "move it back" for us. user = unit.new_user_ref(domain_id=domain1['id']) user['domain-id'] = user.pop('domain_id') r = self.post('/users', body={'user': user}) self.assertNotIn('domain-id', r.json['user']) self.assertEqual(domain1['id'], r.json['user']['domain_id']) # (move this attribute back so we can use assertValidUserResponse) user['domain_id'] = user.pop('domain-id') self.assertValidUserResponse(r, user) user['id'] = r.json['user']['id'] # If we try updating the user's 'domain_id' by specifying a # 'domain-id', then it'll be stored into extras rather than normalized, # and the user's actual 'domain_id' is not affected. r = self.patch( '/users/%s' % user['id'], body={'user': {'domain-id': domain2['id']}}, ) self.assertEqual(domain2['id'], r.json['user']['domain-id']) self.assertEqual(user['domain_id'], r.json['user']['domain_id']) self.assertNotEqual(domain2['id'], user['domain_id']) self.assertValidUserResponse(r, user) def test_create_user_bad_request(self): """Call ``POST /users``.""" self.post( '/users', body={'user': {}}, expected_status=http.client.BAD_REQUEST, ) def test_create_user_bad_domain_id(self): """Call ``POST /users``.""" # create user with 'DEFaUlT' domain_id instead if 'default' # and verify it fails self.post( '/users', body={'user': {"name": "baddomain", "domain_id": "DEFaUlT"}}, expected_status=http.client.NOT_FOUND, ) def test_list_head_users(self): """Call ``GET & HEAD /users``.""" resource_url = '/users' r = self.get(resource_url) self.assertValidUserListResponse( r, ref=self.user, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) def test_list_users_with_multiple_backends(self): """Call ``GET /users`` when multiple backends is enabled. In this scenario, the controller requires a domain to be specified either as a filter or by using a domain scoped token. """ self.config_fixture.config( group='identity', domain_specific_drivers_enabled=True ) # Create a new domain with a new project and user domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) project = unit.new_project_ref(domain_id=domain['id']) PROVIDERS.resource_api.create_project(project['id'], project) user = unit.create_user(PROVIDERS.identity_api, domain_id=domain['id']) # Create both project and domain role grants for the user so we # can get both project and domain scoped tokens PROVIDERS.assignment_api.create_grant( role_id=self.role_id, user_id=user['id'], domain_id=domain['id'] ) PROVIDERS.assignment_api.create_grant( role_id=self.role_id, user_id=user['id'], project_id=project['id'] ) dom_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], domain_id=domain['id'], ) project_auth = self.build_authentication_request( user_id=user['id'], password=user['password'], project_id=project['id'], ) # First try using a domain scoped token resource_url = '/users' r = self.get(resource_url, auth=dom_auth) self.assertValidUserListResponse( r, ref=user, resource_url=resource_url ) # Now try using a project scoped token resource_url = '/users' r = self.get(resource_url, auth=project_auth) self.assertValidUserListResponse( r, ref=user, resource_url=resource_url ) # Now try with an explicit filter resource_url = '/users?domain_id={domain_id}'.format( domain_id=domain['id'] ) r = self.get(resource_url) self.assertValidUserListResponse( r, ref=user, resource_url=resource_url ) def test_list_users_no_default_project(self): """Call ``GET /users`` making sure no default_project_id.""" user = unit.new_user_ref(self.domain_id) user = PROVIDERS.identity_api.create_user(user) resource_url = '/users' r = self.get(resource_url) self.assertValidUserListResponse( r, ref=user, resource_url=resource_url ) def test_get_head_user(self): """Call ``GET & HEAD /users/{user_id}``.""" resource_url = '/users/{user_id}'.format(user_id=self.user['id']) r = self.get(resource_url) self.assertValidUserResponse(r, self.user) self.head(resource_url, expected_status=http.client.OK) def test_get_user_does_not_include_extra_attributes(self): """Call ``GET /users/{user_id}`` extra attributes are not included.""" user = unit.new_user_ref( domain_id=self.domain_id, project_id=self.project_id ) user = PROVIDERS.identity_api.create_user(user) self.assertNotIn('created_at', user) self.assertNotIn('last_active_at', user) def test_get_user_includes_required_attributes(self): """Call ``GET /users/{user_id}`` required attributes are included.""" user = unit.new_user_ref( domain_id=self.domain_id, project_id=self.project_id ) user = PROVIDERS.identity_api.create_user(user) self.assertIn('id', user) self.assertIn('name', user) self.assertIn('enabled', user) self.assertIn('password_expires_at', user) r = self.get('/users/{user_id}'.format(user_id=user['id'])) self.assertValidUserResponse(r, user) def test_get_user_with_default_project(self): """Call ``GET /users/{user_id}`` making sure of default_project_id.""" user = unit.new_user_ref( domain_id=self.domain_id, project_id=self.project_id ) user = PROVIDERS.identity_api.create_user(user) r = self.get('/users/{user_id}'.format(user_id=user['id'])) self.assertValidUserResponse(r, user) def test_add_user_to_group(self): """Call ``PUT /groups/{group_id}/users/{user_id}``.""" self.put( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group_id, 'user_id': self.user['id']} ) def test_list_head_groups_for_user(self): """Call ``GET & HEAD /users/{user_id}/groups``.""" user1 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) self.put( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group_id, 'user_id': user1['id']} ) # Scenarios below are written to test the default policy configuration # One should be allowed to list one's own groups auth = self.build_authentication_request( user_id=user1['id'], password=user1['password'] ) resource_url = '/users/{user_id}/groups'.format(user_id=user1['id']) r = self.get(resource_url, auth=auth) self.assertValidGroupListResponse( r, ref=self.group, resource_url=resource_url ) self.head(resource_url, auth=auth, expected_status=http.client.OK) # Administrator is allowed to list others' groups resource_url = '/users/{user_id}/groups'.format(user_id=user1['id']) r = self.get(resource_url) self.assertValidGroupListResponse( r, ref=self.group, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) # Ordinary users should not be allowed to list other's groups auth = self.build_authentication_request( user_id=user2['id'], password=user2['password'] ) resource_url = '/users/{user_id}/groups'.format(user_id=user1['id']) self.get( resource_url, auth=auth, expected_status=exception.ForbiddenAction.code, ) self.head( resource_url, auth=auth, expected_status=exception.ForbiddenAction.code, ) def test_check_user_in_group(self): """Call ``HEAD /groups/{group_id}/users/{user_id}``.""" self.put( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group_id, 'user_id': self.user['id']} ) self.head( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group_id, 'user_id': self.user['id']} ) def test_list_head_users_in_group(self): """Call ``GET & HEAD /groups/{group_id}/users``.""" self.put( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group_id, 'user_id': self.user['id']} ) resource_url = '/groups/{group_id}/users'.format( group_id=self.group_id ) r = self.get(resource_url) self.assertValidUserListResponse( r, ref=self.user, resource_url=resource_url ) self.assertIn( f'/groups/{self.group_id}/users', r.result['links']['self'], ) self.head(resource_url, expected_status=http.client.OK) def test_remove_user_from_group(self): """Call ``DELETE /groups/{group_id}/users/{user_id}``.""" self.put( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group_id, 'user_id': self.user['id']} ) self.delete( '/groups/%(group_id)s/users/%(user_id)s' % {'group_id': self.group_id, 'user_id': self.user['id']} ) def test_update_ephemeral_user(self): federated_user_a = model.FederatedUser() federated_user_b = model.FederatedUser() federated_user_a.idp_id = 'a_idp' federated_user_b.idp_id = 'b_idp' federated_user_a.display_name = 'federated_a' federated_user_b.display_name = 'federated_b' federated_users = [federated_user_a, federated_user_b] user_a = model.User() user_a.federated_users = federated_users self.assertEqual(federated_user_a.display_name, user_a.name) self.assertIsNone(user_a.password) user_a.name = 'new_federated_a' self.assertEqual('new_federated_a', user_a.name) self.assertIsNone(user_a.local_user) def test_update_user(self): """Call ``PATCH /users/{user_id}``.""" user = unit.new_user_ref(domain_id=self.domain_id) del user['id'] r = self.patch( '/users/{user_id}'.format(user_id=self.user['id']), body={'user': user}, ) self.assertValidUserResponse(r, user) def test_admin_password_reset(self): # bootstrap a user as admin user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) # auth as user should work before a password change old_password_auth = self.build_authentication_request( user_id=user_ref['id'], password=user_ref['password'] ) r = self.v3_create_token(old_password_auth) old_token = r.headers.get('X-Subject-Token') # auth as user with a token should work before a password change old_token_auth = self.build_authentication_request(token=old_token) self.v3_create_token(old_token_auth) # administrative password reset new_password = uuid.uuid4().hex self.patch( '/users/%s' % user_ref['id'], body={'user': {'password': new_password}}, ) # auth as user with original password should not work after change self.v3_create_token( old_password_auth, expected_status=http.client.UNAUTHORIZED ) # auth as user with an old token should not work after change self.v3_create_token( old_token_auth, expected_status=http.client.NOT_FOUND ) # new password should work new_password_auth = self.build_authentication_request( user_id=user_ref['id'], password=new_password ) self.v3_create_token(new_password_auth) def test_admin_password_reset_with_min_password_age_enabled(self): # enable minimum_password_age, this should have no effect on admin # password reset self.config_fixture.config( group='security_compliance', minimum_password_age=1 ) # create user user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) # administrative password reset new_password = uuid.uuid4().hex r = self.patch( '/users/%s' % user_ref['id'], body={'user': {'password': new_password}}, ) self.assertValidUserResponse(r, user_ref) # authenticate with new password new_password_auth = self.build_authentication_request( user_id=user_ref['id'], password=new_password ) self.v3_create_token(new_password_auth) def test_admin_password_reset_with_password_lock(self): # create user user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) lock_pw_opt = options.LOCK_PASSWORD_OPT.option_name update_user_body = {'user': {'options': {lock_pw_opt: True}}} self.patch('/users/%s' % user_ref['id'], body=update_user_body) # administrative password reset new_password = uuid.uuid4().hex r = self.patch( '/users/%s' % user_ref['id'], body={'user': {'password': new_password}}, ) self.assertValidUserResponse(r, user_ref) # authenticate with new password new_password_auth = self.build_authentication_request( user_id=user_ref['id'], password=new_password ) self.v3_create_token(new_password_auth) def test_update_user_domain_id(self): """Call ``PATCH /users/{user_id}`` with domain_id. A user's `domain_id` is immutable. Ensure that any attempts to update the `domain_id` of a user fails. """ user = unit.new_user_ref(domain_id=self.domain['id']) user = PROVIDERS.identity_api.create_user(user) user['domain_id'] = CONF.identity.default_domain_id self.patch( '/users/{user_id}'.format(user_id=user['id']), body={'user': user}, expected_status=exception.ValidationError.code, ) def test_delete_user(self): """Call ``DELETE /users/{user_id}``. As well as making sure the delete succeeds, we ensure that any credentials that reference this user are also deleted, while other credentials are unaffected. In addition, no tokens should remain valid for this user. """ # First check the credential for this user is present r = PROVIDERS.credential_api.get_credential(self.credential['id']) self.assertDictEqual(self.credential, r) # Create a second credential with a different user user2 = unit.new_user_ref( domain_id=self.domain['id'], project_id=self.project['id'] ) user2 = PROVIDERS.identity_api.create_user(user2) credential2 = unit.new_credential_ref( user_id=user2['id'], project_id=self.project['id'] ) PROVIDERS.credential_api.create_credential( credential2['id'], credential2 ) # Create a token for this user which we can check later # gets deleted auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], project_id=self.project['id'], ) token = self.get_requested_token(auth_data) # Confirm token is valid for now self.head( '/auth/tokens', headers={'X-Subject-Token': token}, expected_status=http.client.OK, ) # Now delete the user self.delete('/users/{user_id}'.format(user_id=self.user['id'])) # Deleting the user should have deleted any credentials # that reference this project self.assertRaises( exception.CredentialNotFound, PROVIDERS.credential_api.get_credential, self.credential['id'], ) # But the credential for user2 is unaffected r = PROVIDERS.credential_api.get_credential(credential2['id']) self.assertDictEqual(credential2, r) def test_delete_user_retries_on_deadlock(self): patcher = mock.patch( 'sqlalchemy.orm.query.Query.delete', autospec=True ) class FakeDeadlock: def __init__(self, mock_patcher): self.deadlock_count = 2 self.mock_patcher = mock_patcher self.patched = True def __call__(self, *args, **kwargs): if self.deadlock_count > 1: self.deadlock_count -= 1 else: self.mock_patcher.stop() self.patched = False raise oslo_db_exception.DBDeadlock sql_delete_mock = patcher.start() side_effect = FakeDeadlock(patcher) sql_delete_mock.side_effect = side_effect user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) try: PROVIDERS.identity_api.delete_user(user_id=user_ref['id']) finally: if side_effect.patched: patcher.stop() call_count = sql_delete_mock.call_count # initial attempt + 1 retry delete_user_attempt_count = 2 self.assertEqual(call_count, delete_user_attempt_count) # group crud tests def test_create_group(self): """Call ``POST /groups``.""" # Create a new group to avoid a duplicate check failure ref = unit.new_group_ref(domain_id=self.domain_id) r = self.post('/groups', body={'group': ref}) return self.assertValidGroupResponse(r, ref) def test_create_group_bad_request(self): """Call ``POST /groups``.""" self.post( '/groups', body={'group': {}}, expected_status=http.client.BAD_REQUEST, ) def test_list_head_groups(self): """Call ``GET & HEAD /groups``.""" resource_url = '/groups' r = self.get(resource_url) self.assertValidGroupListResponse( r, ref=self.group, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) def test_get_head_group(self): """Call ``GET & HEAD /groups/{group_id}``.""" resource_url = f'/groups/{self.group_id}' r = self.get(resource_url) self.assertValidGroupResponse(r, self.group) self.head(resource_url, expected_status=http.client.OK) def test_update_group(self): """Call ``PATCH /groups/{group_id}``.""" group = unit.new_group_ref(domain_id=self.domain_id) del group['id'] r = self.patch( f'/groups/{self.group_id}', body={'group': group}, ) self.assertValidGroupResponse(r, group) def test_update_group_domain_id(self): """Call ``PATCH /groups/{group_id}`` with domain_id. A group's `domain_id` is immutable. Ensure that any attempts to update the `domain_id` of a group fails. """ self.group['domain_id'] = CONF.identity.default_domain_id self.patch( '/groups/{group_id}'.format(group_id=self.group['id']), body={'group': self.group}, expected_status=exception.ValidationError.code, ) def test_delete_group(self): """Call ``DELETE /groups/{group_id}``.""" self.delete(f'/groups/{self.group_id}') def test_create_user_password_not_logged(self): # When a user is created, the password isn't logged at any level. log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) ref = unit.new_user_ref(domain_id=self.domain_id) self.post('/users', body={'user': ref}) self.assertNotIn(ref['password'], log_fix.output) def test_update_password_not_logged(self): # When admin modifies user password, the password isn't logged at any # level. log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) # bootstrap a user as admin user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) self.assertNotIn(user_ref['password'], log_fix.output) # administrative password reset new_password = uuid.uuid4().hex self.patch( '/users/%s' % user_ref['id'], body={'user': {'password': new_password}}, ) self.assertNotIn(new_password, log_fix.output) def test_setting_default_project_id_to_domain_failed(self): """Call ``POST and PATCH /users`` default_project_id=domain_id. Make sure we validate the default_project_id if it is specified. It cannot be set to a domain_id, even for a project acting as domain right now. That's because we haven't sort out the issuing project-scoped token for project acting as domain bit yet. Once we got that sorted out, we can relax this constraint. """ # creating a new user with default_project_id set to a # domain_id should result in HTTP 400 ref = unit.new_user_ref( domain_id=self.domain_id, project_id=self.domain_id ) self.post( '/users', body={'user': ref}, token=CONF.admin_token, expected_status=http.client.BAD_REQUEST, ) # updating user's default_project_id to a domain_id should result # in HTTP 400 user = {'default_project_id': self.domain_id} self.patch( '/users/{user_id}'.format(user_id=self.user['id']), body={'user': user}, token=CONF.admin_token, expected_status=http.client.BAD_REQUEST, ) class ChangePasswordTestCase(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) self.token = self.get_request_token( self.user_ref['password'], http.client.CREATED ) def get_request_token(self, password, expected_status): auth_data = self.build_authentication_request( user_id=self.user_ref['id'], password=password ) r = self.v3_create_token(auth_data, expected_status=expected_status) return r.headers.get('X-Subject-Token') def change_password(self, expected_status, **kwargs): """Return a test response for a change password request.""" return self.post( '/users/%s/password' % self.user_ref['id'], body={'user': kwargs}, token=self.token, expected_status=expected_status, ) class UserSelfServiceChangingPasswordsTestCase(ChangePasswordTestCase): def _create_user_with_expired_password(self): expire_days = CONF.security_compliance.password_expires_days + 1 time = timeutils.utcnow() - datetime.timedelta(expire_days) password = uuid.uuid4().hex user_ref = unit.new_user_ref( domain_id=self.domain_id, password=password ) with freezegun.freeze_time(time): self.user_ref = PROVIDERS.identity_api.create_user(user_ref) return password def test_changing_password(self): # original password works token_id = self.get_request_token( self.user_ref['password'], expected_status=http.client.CREATED ) # original token works old_token_auth = self.build_authentication_request(token=token_id) self.v3_create_token(old_token_auth) # change password new_password = uuid.uuid4().hex self.change_password( password=new_password, original_password=self.user_ref['password'], expected_status=http.client.NO_CONTENT, ) # old password fails self.get_request_token( self.user_ref['password'], expected_status=http.client.UNAUTHORIZED ) # old token fails self.v3_create_token( old_token_auth, expected_status=http.client.NOT_FOUND ) # new password works self.get_request_token( new_password, expected_status=http.client.CREATED ) def test_changing_password_with_min_password_age(self): time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # enable minimum_password_age and attempt to change password new_password = uuid.uuid4().hex self.config_fixture.config( group='security_compliance', minimum_password_age=1 ) # able to change password after create user self.change_password( password=new_password, original_password=self.user_ref['password'], expected_status=http.client.NO_CONTENT, ) # 2nd change password should fail due to minimum password age and # make sure we wait one second to avoid race conditions with Fernet frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) self.token = self.get_request_token( new_password, http.client.CREATED ) self.change_password( password=uuid.uuid4().hex, original_password=new_password, expected_status=http.client.BAD_REQUEST, ) # disable minimum_password_age and attempt to change password self.config_fixture.config( group='security_compliance', minimum_password_age=0 ) self.change_password( password=uuid.uuid4().hex, original_password=new_password, expected_status=http.client.NO_CONTENT, ) def test_changing_password_with_password_lock(self): password = uuid.uuid4().hex ref = unit.new_user_ref(domain_id=self.domain_id, password=password) response = self.post('/users', body={'user': ref}) user_id = response.json_body['user']['id'] time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: # Lock the user's password lock_pw_opt = options.LOCK_PASSWORD_OPT.option_name user_patch = {'user': {'options': {lock_pw_opt: True}}} self.patch('/users/%s' % user_id, body=user_patch) # Fail, password is locked new_password = uuid.uuid4().hex body = { 'user': { 'original_password': password, 'password': new_password, } } path = '/users/%s/password' % user_id self.post(path, body=body, expected_status=http.client.BAD_REQUEST) # Unlock the password, and change should work user_patch['user']['options'][lock_pw_opt] = False self.patch('/users/%s' % user_id, body=user_patch) path = '/users/%s/password' % user_id self.post(path, body=body, expected_status=http.client.NO_CONTENT) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) auth_data = self.build_authentication_request( user_id=user_id, password=new_password ) self.v3_create_token( auth_data, expected_status=http.client.CREATED ) path = '/users/%s' % user_id user = self.get(path).json_body['user'] self.assertIn(lock_pw_opt, user['options']) self.assertFalse(user['options'][lock_pw_opt]) # Completely unset the option from the user's reference user_patch['user']['options'][lock_pw_opt] = None self.patch('/users/%s' % user_id, body=user_patch) path = '/users/%s' % user_id user = self.get(path).json_body['user'] self.assertNotIn(lock_pw_opt, user['options']) def test_changing_password_with_missing_original_password_fails(self): r = self.change_password( password=uuid.uuid4().hex, expected_status=http.client.BAD_REQUEST ) self.assertThat( r.result['error']['message'], matchers.Contains('original_password'), ) def test_changing_password_with_missing_password_fails(self): r = self.change_password( original_password=self.user_ref['password'], expected_status=http.client.BAD_REQUEST, ) self.assertThat( r.result['error']['message'], matchers.Contains('password') ) def test_changing_password_with_incorrect_password_fails(self): self.change_password( password=uuid.uuid4().hex, original_password=uuid.uuid4().hex, expected_status=http.client.UNAUTHORIZED, ) def test_changing_password_with_disabled_user_fails(self): # disable the user account self.user_ref['enabled'] = False self.patch( '/users/%s' % self.user_ref['id'], body={'user': self.user_ref} ) self.change_password( password=uuid.uuid4().hex, original_password=self.user_ref['password'], expected_status=http.client.UNAUTHORIZED, ) def test_changing_password_not_logged(self): # When a user changes their password, the password isn't logged at any # level. log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) # change password new_password = uuid.uuid4().hex self.change_password( password=new_password, original_password=self.user_ref['password'], expected_status=http.client.NO_CONTENT, ) self.assertNotIn(self.user_ref['password'], log_fix.output) self.assertNotIn(new_password, log_fix.output) def test_changing_expired_password_succeeds(self): self.config_fixture.config( group='security_compliance', password_expires_days=2 ) password = self._create_user_with_expired_password() new_password = uuid.uuid4().hex self.change_password( password=new_password, original_password=password, expected_status=http.client.NO_CONTENT, ) # new password works self.get_request_token( new_password, expected_status=http.client.CREATED ) def test_changing_expired_password_with_disabled_user_fails(self): self.config_fixture.config( group='security_compliance', password_expires_days=2 ) password = self._create_user_with_expired_password() # disable the user account self.user_ref['enabled'] = False self.patch( '/users/%s' % self.user_ref['id'], body={'user': self.user_ref} ) new_password = uuid.uuid4().hex self.change_password( password=new_password, original_password=password, expected_status=http.client.UNAUTHORIZED, ) def test_change_password_required_upon_first_use_for_create(self): self.config_fixture.config( group='security_compliance', change_password_upon_first_use=True ) # create user self.user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) # attempt to authenticate with create user password self.get_request_token( self.user_ref['password'], expected_status=http.client.UNAUTHORIZED ) # self-service change password new_password = uuid.uuid4().hex self.change_password( password=new_password, original_password=self.user_ref['password'], expected_status=http.client.NO_CONTENT, ) # authenticate with the new password self.token = self.get_request_token(new_password, http.client.CREATED) def test_change_password_required_upon_first_use_for_admin_reset(self): self.config_fixture.config( group='security_compliance', change_password_upon_first_use=True ) # admin reset reset_password = uuid.uuid4().hex user_password = {'password': reset_password} PROVIDERS.identity_api.update_user(self.user_ref['id'], user_password) # attempt to authenticate with admin reset password self.get_request_token( reset_password, expected_status=http.client.UNAUTHORIZED ) # self-service change password new_password = uuid.uuid4().hex self.change_password( password=new_password, original_password=reset_password, expected_status=http.client.NO_CONTENT, ) # authenticate with the new password self.token = self.get_request_token(new_password, http.client.CREATED) def test_change_password_required_upon_first_use_ignore_user(self): self.config_fixture.config( group='security_compliance', change_password_upon_first_use=True ) # ignore user and reset password reset_password = uuid.uuid4().hex self.user_ref['password'] = reset_password ignore_opt_name = options.IGNORE_CHANGE_PASSWORD_OPT.option_name self.user_ref['options'][ignore_opt_name] = True PROVIDERS.identity_api.update_user(self.user_ref['id'], self.user_ref) # authenticate with the reset password self.token = self.get_request_token( reset_password, http.client.CREATED ) def test_lockout_exempt(self): self.config_fixture.config( group='security_compliance', lockout_failure_attempts=1 ) # create user self.user_ref = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) # update the user, mark her as exempt from lockout ignore_opt_name = options.IGNORE_LOCKOUT_ATTEMPT_OPT.option_name self.user_ref['options'][ignore_opt_name] = True PROVIDERS.identity_api.update_user(self.user_ref['id'], self.user_ref) # fail to auth, this should lockout the user, since we're allowed # one failure, but we're exempt from lockout! bad_password = uuid.uuid4().hex self.token = self.get_request_token( bad_password, http.client.UNAUTHORIZED ) # attempt to authenticate with correct password self.get_request_token( self.user_ref['password'], expected_status=http.client.CREATED ) class PasswordValidationTestCase(ChangePasswordTestCase): def setUp(self): super().setUp() # passwords requires: 1 letter, 1 digit, 7 chars self.config_fixture.config( group='security_compliance', password_regex=(r'^(?=.*\d)(?=.*[a-zA-Z]).{7,}$'), ) def test_create_user_with_invalid_password(self): user = unit.new_user_ref(domain_id=self.domain_id) user['password'] = 'simple' self.post( '/users', body={'user': user}, token=self.get_admin_token(), expected_status=http.client.BAD_REQUEST, ) def test_update_user_with_invalid_password(self): user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain['id'] ) user['password'] = 'simple' self.patch( '/users/{user_id}'.format(user_id=user['id']), body={'user': user}, expected_status=http.client.BAD_REQUEST, ) def test_changing_password_with_simple_password_strength(self): # password requires: any non-whitespace character self.config_fixture.config( group='security_compliance', password_regex=r'[\S]+' ) self.change_password( password='simple', original_password=self.user_ref['password'], expected_status=http.client.NO_CONTENT, ) def test_changing_password_with_strong_password_strength(self): self.change_password( password='mypassword2', original_password=self.user_ref['password'], expected_status=http.client.NO_CONTENT, ) def test_changing_password_with_strong_password_strength_fails(self): # no digit self.change_password( password='mypassword', original_password=self.user_ref['password'], expected_status=http.client.BAD_REQUEST, ) # no letter self.change_password( password='12345678', original_password=self.user_ref['password'], expected_status=http.client.BAD_REQUEST, ) # less than 7 chars self.change_password( password='mypas2', original_password=self.user_ref['password'], expected_status=http.client.BAD_REQUEST, ) class UserFederatedAttributesTests(test_v3.RestfulTestCase): def _create_federated_attributes(self): # Create the idp idp = { 'id': uuid.uuid4().hex, 'enabled': True, 'description': uuid.uuid4().hex, } PROVIDERS.federation_api.create_idp(idp['id'], idp) # Create the mapping mapping = mapping_fixtures.MAPPING_EPHEMERAL_USER mapping['id'] = uuid.uuid4().hex PROVIDERS.federation_api.create_mapping(mapping['id'], mapping) # Create the protocol protocol = {'id': uuid.uuid4().hex, 'mapping_id': mapping['id']} PROVIDERS.federation_api.create_protocol( idp['id'], protocol['id'], protocol ) return idp, protocol def _create_user_with_federated_user(self, user, fed_dict): with sql.session_for_write() as session: federated_ref = model.FederatedUser.from_dict(fed_dict) user_ref = model.User.from_dict(user) user_ref.created_at = timeutils.utcnow() user_ref.federated_users.append(federated_ref) session.add(user_ref) return identity_base.filter_user(user_ref.to_dict()) def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() # Create the federated object idp, protocol = self._create_federated_attributes() self.fed_dict = unit.new_federated_user_ref() self.fed_dict['idp_id'] = idp['id'] self.fed_dict['protocol_id'] = protocol['id'] self.fed_dict['unique_id'] = "jdoe" # Create the domain_id, user, and federated_user relationship self.domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(self.domain['id'], self.domain) self.fed_user = unit.new_user_ref(domain_id=self.domain['id']) self.fed_user = self._create_user_with_federated_user( self.fed_user, self.fed_dict ) # Create two new fed_users which will have the same idp and protocol # but be completely different from the first fed_user # Create a new idp and protocol for fed_user2 and 3 idp, protocol = self._create_federated_attributes() self.fed_dict2 = unit.new_federated_user_ref() self.fed_dict2['idp_id'] = idp['id'] self.fed_dict2['protocol_id'] = protocol['id'] self.fed_dict2['unique_id'] = "ravelar" self.fed_user2 = unit.new_user_ref(domain_id=self.domain['id']) self.fed_user2 = self._create_user_with_federated_user( self.fed_user2, self.fed_dict2 ) self.fed_dict3 = unit.new_federated_user_ref() self.fed_dict3['idp_id'] = idp['id'] self.fed_dict3['protocol_id'] = protocol['id'] self.fed_dict3['unique_id'] = "jsmith" self.fed_user3 = unit.new_user_ref(domain_id=self.domain['id']) self.fed_user3 = self._create_user_with_federated_user( self.fed_user3, self.fed_dict3 ) def _test_list_users_with_federated_parameter(self, parameter): # construct the resource url based off what's passed in parameter resource_url = '/users?{}={}'.format( parameter[0], self.fed_dict[parameter[0]], ) for attr in parameter[1:]: resource_url += f'&{attr}={self.fed_dict[attr]}' r = self.get(resource_url) # Check that only one out of 3 fed_users is matched by calling the api # and that it is a valid response self.assertEqual(1, len(r.result['users'])) self.assertValidUserListResponse( r, ref=self.fed_user, resource_url=resource_url ) # Since unique_id will always return one user if matching for unique_id # in the query, we rule out unique_id for the next tests if not any('unique_id' in x for x in parameter): # Check that we get two matches here since fed_user2 and fed_user3 # both have the same idp and protocol resource_url = '/users?{}={}'.format( parameter[0], self.fed_dict2[parameter[0]], ) for attr in parameter[1:]: resource_url += f'&{attr}={self.fed_dict2[attr]}' r = self.get(resource_url) self.assertEqual(2, len(r.result['users'])) self.assertValidUserListResponse( r, ref=self.fed_user2, resource_url=resource_url ) def test_list_users_with_idp_id(self): attribute = ['idp_id'] self._test_list_users_with_federated_parameter(attribute) def test_list_users_with_protocol_id(self): attribute = ['protocol_id'] self._test_list_users_with_federated_parameter(attribute) def test_list_users_with_unique_id(self): attribute = ['unique_id'] self._test_list_users_with_federated_parameter(attribute) def test_list_users_with_idp_id_and_unique_id(self): attribute = ['idp_id', 'unique_id'] self._test_list_users_with_federated_parameter(attribute) def test_list_users_with_idp_id_and_protocol_id(self): attribute = ['idp_id', 'protocol_id'] self._test_list_users_with_federated_parameter(attribute) def test_list_users_with_protocol_id_and_unique_id(self): attribute = ['protocol_id', 'unique_id'] self._test_list_users_with_federated_parameter(attribute) def test_list_users_with_all_federated_attributes(self): attribute = ['idp_id', 'protocol_id', 'unique_id'] self._test_list_users_with_federated_parameter(attribute) def test_get_user_includes_required_federated_attributes(self): user = self.identity_api.get_user(self.fed_user['id']) self.assertIn('federated', user) self.assertIn('idp_id', user['federated'][0]) self.assertIn('protocols', user['federated'][0]) self.assertIn('protocol_id', user['federated'][0]['protocols'][0]) self.assertIn('unique_id', user['federated'][0]['protocols'][0]) r = self.get('/users/{user_id}'.format(user_id=user['id'])) self.assertValidUserResponse(r, user) def test_create_user_with_federated_attributes(self): """Call ``POST /users``.""" idp, protocol = self._create_federated_attributes() ref = unit.new_user_ref(domain_id=self.domain_id) ref['federated'] = [ { 'idp_id': idp['id'], 'protocols': [ { 'protocol_id': protocol['id'], 'unique_id': uuid.uuid4().hex, } ], } ] r = self.post('/users', body={'user': ref}) user = r.result['user'] self.assertEqual(user['name'], ref['name']) self.assertEqual(user['federated'], ref['federated']) self.assertValidUserResponse(r, ref) def test_create_user_fails_when_given_invalid_idp_and_protocols(self): """Call ``POST /users`` with invalid idp and protocol to fail.""" idp, protocol = self._create_federated_attributes() ref = unit.new_user_ref(domain_id=self.domain_id) ref['federated'] = [ { 'idp_id': 'fakeidp', 'protocols': [ { 'protocol_id': 'fakeprotocol_id', 'unique_id': uuid.uuid4().hex, } ], } ] self.post( '/users', body={'user': ref}, token=self.get_admin_token(), expected_status=http.client.BAD_REQUEST, ) ref['federated'][0]['idp_id'] = idp['id'] self.post( '/users', body={'user': ref}, token=self.get_admin_token(), expected_status=http.client.BAD_REQUEST, ) def test_update_user_with_federated_attributes(self): """Call ``PATCH /users/{user_id}``.""" user = self.fed_user.copy() del user['id'] user['name'] = 'James Doe' idp, protocol = self._create_federated_attributes() user['federated'] = [ { 'idp_id': idp['id'], 'protocols': [ {'protocol_id': protocol['id'], 'unique_id': 'jdoe'} ], } ] r = self.patch( '/users/{user_id}'.format(user_id=self.fed_user['id']), body={'user': user}, ) resp_user = r.result['user'] self.assertEqual(user['name'], resp_user['name']) self.assertEqual(user['federated'], resp_user['federated']) self.assertValidUserResponse(r, user) def test_update_user_fails_when_given_invalid_idp_and_protocols(self): """Call ``PATCH /users/{user_id}``.""" user = self.fed_user.copy() del user['id'] idp, protocol = self._create_federated_attributes() user['federated'] = [ { 'idp_id': 'fakeidp', 'protocols': [ { 'protocol_id': 'fakeprotocol_id', 'unique_id': uuid.uuid4().hex, } ], } ] self.patch( '/users/{user_id}'.format(user_id=self.fed_user['id']), body={'user': user}, expected_status=http.client.BAD_REQUEST, ) user['federated'][0]['idp_id'] = idp['id'] self.patch( '/users/{user_id}'.format(user_id=self.fed_user['id']), body={'user': user}, expected_status=http.client.BAD_REQUEST, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_oauth1.py0000664000175000017500000014741300000000000022701 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import http.client import random from unittest import mock import urllib from urllib import parse as urlparse import uuid import freezegun from oslo_serialization import jsonutils from oslo_utils import timeutils from pycadf import cadftaxonomy from keystone.common import provider_api import keystone.conf from keystone import exception from keystone import oauth1 from keystone.oauth1.backends import base from keystone.tests import unit from keystone.tests.unit.common import test_notifications from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import temporaryfile from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs def _urllib_parse_qs_text_keys(content): results = urllib.parse.parse_qs(content) return {key.decode('utf-8'): value for key, value in results.items()} class OAuth1Tests(test_v3.RestfulTestCase): CONSUMER_URL = '/OS-OAUTH1/consumers' def setUp(self): super().setUp() # Now that the app has been served, we can query CONF values self.base_url = 'http://localhost/v3' def _create_single_consumer(self): ref = {'description': uuid.uuid4().hex} resp = self.post(self.CONSUMER_URL, body={'consumer': ref}) return resp.result['consumer'] def _create_request_token(self, consumer, project_id, base_url=None): endpoint = '/OS-OAUTH1/request_token' client = oauth1.Client( consumer['key'], client_secret=consumer['secret'], signature_method=oauth1.SIG_HMAC, callback_uri="oob", ) headers = {'requested_project_id': project_id} if not base_url: base_url = self.base_url url, headers, body = client.sign( base_url + endpoint, http_method='POST', headers=headers ) return endpoint, headers def _create_access_token(self, consumer, token, base_url=None): endpoint = '/OS-OAUTH1/access_token' client = oauth1.Client( consumer['key'], client_secret=consumer['secret'], resource_owner_key=token.key, resource_owner_secret=token.secret, signature_method=oauth1.SIG_HMAC, verifier=token.verifier, ) if not base_url: base_url = self.base_url url, headers, body = client.sign( base_url + endpoint, http_method='POST' ) headers.update({'Content-Type': 'application/json'}) return endpoint, headers def _get_oauth_token(self, consumer, token): client = oauth1.Client( consumer['key'], client_secret=consumer['secret'], resource_owner_key=token.key, resource_owner_secret=token.secret, signature_method=oauth1.SIG_HMAC, ) endpoint = '/auth/tokens' url, headers, body = client.sign( self.base_url + endpoint, http_method='POST' ) headers.update({'Content-Type': 'application/json'}) ref = {'auth': {'identity': {'oauth1': {}, 'methods': ['oauth1']}}} return endpoint, headers, ref def _authorize_request_token(self, request_id): if isinstance(request_id, bytes): request_id = request_id.decode() return '/OS-OAUTH1/authorize/%s' % (request_id) class ConsumerCRUDTests(OAuth1Tests): def _consumer_create( self, description=None, description_flag=True, **kwargs ): if description_flag: ref = {'description': description} else: ref = {} if kwargs: ref.update(kwargs) resp = self.post(self.CONSUMER_URL, body={'consumer': ref}) consumer = resp.result['consumer'] consumer_id = consumer['id'] self.assertEqual(description, consumer['description']) self.assertIsNotNone(consumer_id) self.assertIsNotNone(consumer['secret']) return consumer def test_consumer_create(self): description = uuid.uuid4().hex self._consumer_create(description=description) def test_consumer_create_none_desc_1(self): self._consumer_create() def test_consumer_create_none_desc_2(self): self._consumer_create(description_flag=False) def test_consumer_create_normalize_field(self): # If create a consumer with a field with : or - in the name, # the name is normalized by converting those chars to _. field_name = 'some:weird-field' field_value = uuid.uuid4().hex extra_fields = {field_name: field_value} consumer = self._consumer_create(**extra_fields) normalized_field_name = 'some_weird_field' self.assertEqual(field_value, consumer[normalized_field_name]) def test_consumer_delete(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] resp = self.delete(self.CONSUMER_URL + '/%s' % consumer_id) self.assertResponseStatus(resp, http.client.NO_CONTENT) def test_consumer_get_head(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] url = self.CONSUMER_URL + '/%s' % consumer_id resp = self.get(url) self_url = ['http://localhost/v3', self.CONSUMER_URL, '/', consumer_id] self_url = ''.join(self_url) self.assertEqual(self_url, resp.result['consumer']['links']['self']) self.assertEqual(consumer_id, resp.result['consumer']['id']) self.head(url, expected_status=http.client.OK) def test_consumer_list(self): self._consumer_create() resp = self.get(self.CONSUMER_URL) entities = resp.result['consumers'] self.assertIsNotNone(entities) self_url = ['http://localhost/v3', self.CONSUMER_URL] self_url = ''.join(self_url) self.assertEqual(self_url, resp.result['links']['self']) self.assertValidListLinks(resp.result['links']) self.head(self.CONSUMER_URL, expected_status=http.client.OK) def test_consumer_update(self): consumer = self._create_single_consumer() original_id = consumer['id'] original_description = consumer['description'] update_description = original_description + '_new' update_ref = {'description': update_description} update_resp = self.patch( self.CONSUMER_URL + '/%s' % original_id, body={'consumer': update_ref}, ) consumer = update_resp.result['consumer'] self.assertEqual(update_description, consumer['description']) self.assertEqual(original_id, consumer['id']) def test_consumer_update_bad_secret(self): consumer = self._create_single_consumer() original_id = consumer['id'] update_ref = copy.deepcopy(consumer) update_ref['description'] = uuid.uuid4().hex update_ref['secret'] = uuid.uuid4().hex self.patch( self.CONSUMER_URL + '/%s' % original_id, body={'consumer': update_ref}, expected_status=http.client.BAD_REQUEST, ) def test_consumer_update_bad_id(self): consumer = self._create_single_consumer() original_id = consumer['id'] original_description = consumer['description'] update_description = original_description + "_new" update_ref = copy.deepcopy(consumer) update_ref['description'] = update_description update_ref['id'] = update_description self.patch( self.CONSUMER_URL + '/%s' % original_id, body={'consumer': update_ref}, expected_status=http.client.BAD_REQUEST, ) def test_consumer_update_normalize_field(self): # If update a consumer with a field with : or - in the name, # the name is normalized by converting those chars to _. field1_name = 'some:weird-field' field1_orig_value = uuid.uuid4().hex extra_fields = {field1_name: field1_orig_value} consumer = self._consumer_create(**extra_fields) consumer_id = consumer['id'] field1_new_value = uuid.uuid4().hex field2_name = 'weird:some-field' field2_value = uuid.uuid4().hex update_ref = {field1_name: field1_new_value, field2_name: field2_value} update_resp = self.patch( self.CONSUMER_URL + '/%s' % consumer_id, body={'consumer': update_ref}, ) consumer = update_resp.result['consumer'] normalized_field1_name = 'some_weird_field' self.assertEqual(field1_new_value, consumer[normalized_field1_name]) normalized_field2_name = 'weird_some_field' self.assertEqual(field2_value, consumer[normalized_field2_name]) def test_consumer_create_no_description(self): resp = self.post(self.CONSUMER_URL, body={'consumer': {}}) consumer = resp.result['consumer'] consumer_id = consumer['id'] self.assertIsNone(consumer['description']) self.assertIsNotNone(consumer_id) self.assertIsNotNone(consumer['secret']) def test_consumer_get_bad_id(self): url = self.CONSUMER_URL + '/{consumer_id}'.format( consumer_id=uuid.uuid4().hex ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) class OAuthFlowTests(OAuth1Tests): def test_oauth_flow(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['secret']) url, headers = self._create_request_token( self.consumer, self.project_id ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http.client.OK) self.verifier = resp.result['token']['oauth_verifier'] self.assertTrue(all(i in base.VERIFIER_CHARS for i in self.verifier)) self.assertEqual(8, len(self.verifier)) self.request_token.set_verifier(self.verifier) url, headers = self._create_access_token( self.consumer, self.request_token ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) access_key = credentials['oauth_token'][0] access_secret = credentials['oauth_token_secret'][0] self.access_token = oauth1.Token(access_key, access_secret) self.assertIsNotNone(self.access_token.key) url, headers, body = self._get_oauth_token( self.consumer, self.access_token ) content = self.post(url, headers=headers, body=body) self.keystone_token_id = content.headers['X-Subject-Token'] self.keystone_token = content.result['token'] self.assertIsNotNone(self.keystone_token_id) # add a new role assignment to ensure it is ignored in the access token new_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} PROVIDERS.role_api.create_role(new_role['id'], new_role) PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=self.user_id, project_id=self.project_id, role_id=new_role['id'], ) content = self.post(url, headers=headers, body=body) token = content.result['token'] token_roles = [r['id'] for r in token['roles']] self.assertIn(self.role_id, token_roles) self.assertNotIn(new_role['id'], token_roles) class AccessTokenCRUDTests(OAuthFlowTests): def test_delete_access_token_dne(self): self.delete( '/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_list_no_access_tokens(self): url = '/users/{user_id}/OS-OAUTH1/access_tokens'.format( user_id=self.user_id ) resp = self.get(url) entities = resp.result['access_tokens'] self.assertEqual([], entities) self.assertValidListLinks(resp.result['links']) self.head(url, expected_status=http.client.OK) def test_get_single_access_token(self): self.test_oauth_flow() access_token_key_string = self.access_token.key.decode() url = '/users/{user_id}/OS-OAUTH1/access_tokens/{key}'.format( user_id=self.user_id, key=access_token_key_string, ) resp = self.get(url) entity = resp.result['access_token'] self.assertEqual(access_token_key_string, entity['id']) self.assertEqual(self.consumer['key'], entity['consumer_id']) self.assertEqual('http://localhost/v3' + url, entity['links']['self']) self.head(url, expected_status=http.client.OK) def test_get_access_token_dne(self): url = '/users/{user_id}/OS-OAUTH1/access_tokens/{key}'.format( user_id=self.user_id, key=uuid.uuid4().hex, ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_list_all_roles_in_access_token(self): self.test_oauth_flow() url = '/users/{id}/OS-OAUTH1/access_tokens/{key}/roles'.format( id=self.user_id, key=self.access_token.key.decode(), ) resp = self.get(url) entities = resp.result['roles'] self.assertTrue(entities) self.assertValidListLinks(resp.result['links']) self.head(url, expected_status=http.client.OK) def test_get_role_in_access_token(self): self.test_oauth_flow() access_token_key = self.access_token.key.decode() url = ( '/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles/%(role)s' % { 'id': self.user_id, 'key': access_token_key, 'role': self.role_id, } ) resp = self.get(url) entity = resp.result['role'] self.assertEqual(self.role_id, entity['id']) self.head(url, expected_status=http.client.OK) def test_get_role_in_access_token_dne(self): self.test_oauth_flow() access_token_key = self.access_token.key.decode() url = ( '/users/%(id)s/OS-OAUTH1/access_tokens/%(key)s/roles/%(role)s' % { 'id': self.user_id, 'key': access_token_key, 'role': uuid.uuid4().hex, } ) self.get(url, expected_status=http.client.NOT_FOUND) self.head(url, expected_status=http.client.NOT_FOUND) def test_list_and_delete_access_tokens(self): self.test_oauth_flow() # List access_tokens should be > 0 url = '/users/{user_id}/OS-OAUTH1/access_tokens'.format( user_id=self.user_id ) resp = self.get(url) self.head(url, expected_status=http.client.OK) entities = resp.result['access_tokens'] self.assertTrue(entities) self.assertValidListLinks(resp.result['links']) access_token_key = self.access_token.key.decode() # Delete access_token resp = self.delete( '/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': access_token_key} ) self.assertResponseStatus(resp, http.client.NO_CONTENT) # List access_token should be 0 resp = self.get(url) self.head(url, expected_status=http.client.OK) entities = resp.result['access_tokens'] self.assertEqual([], entities) self.assertValidListLinks(resp.result['links']) class AuthTokenTests: def test_keystone_token_is_valid(self): self.test_oauth_flow() headers = { 'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id, } r = self.get('/auth/tokens', headers=headers) self.assertValidTokenResponse(r, self.user) # now verify the oauth section oauth_section = r.result['token']['OS-OAUTH1'] self.assertEqual( self.access_token.key.decode(), oauth_section['access_token_id'] ) self.assertEqual(self.consumer['key'], oauth_section['consumer_id']) # verify the roles section roles_list = r.result['token']['roles'] # we can just verify the 0th role since we are only assigning one role self.assertEqual(self.role_id, roles_list[0]['id']) # verify that the token can perform delegated tasks ref = unit.new_user_ref(domain_id=self.domain_id) r = self.admin_request( path='/v3/users', headers=headers, method='POST', body={'user': ref}, ) self.assertValidUserResponse(r, ref) def test_delete_access_token_also_revokes_token(self): self.test_oauth_flow() access_token_key = self.access_token.key.decode() # Delete access token resp = self.delete( '/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': access_token_key} ) self.assertResponseStatus(resp, http.client.NO_CONTENT) # Check Keystone Token no longer exists headers = { 'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id, } self.get( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) def test_deleting_consumer_also_deletes_tokens(self): self.test_oauth_flow() # Delete consumer consumer_id = self.consumer['key'] resp = self.delete( '/OS-OAUTH1/consumers/%(consumer_id)s' % {'consumer_id': consumer_id} ) self.assertResponseStatus(resp, http.client.NO_CONTENT) # List access_token should be 0 resp = self.get( '/users/%(user_id)s/OS-OAUTH1/access_tokens' % {'user_id': self.user_id} ) entities = resp.result['access_tokens'] self.assertEqual([], entities) # Check Keystone Token no longer exists headers = { 'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id, } self.head( '/auth/tokens', headers=headers, expected_status=http.client.NOT_FOUND, ) def test_change_user_password_also_deletes_tokens(self): self.test_oauth_flow() # delegated keystone token exists headers = { 'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id, } r = self.get('/auth/tokens', headers=headers) self.assertValidTokenResponse(r, self.user) user = {'password': uuid.uuid4().hex} r = self.patch( '/users/{user_id}'.format(user_id=self.user['id']), body={'user': user}, ) headers = {'X-Subject-Token': self.keystone_token_id} self.get( path='/auth/tokens', token=self.get_admin_token(), headers=headers, expected_status=http.client.NOT_FOUND, ) def test_deleting_project_also_invalidates_tokens(self): self.test_oauth_flow() # delegated keystone token exists headers = { 'X-Subject-Token': self.keystone_token_id, 'X-Auth-Token': self.keystone_token_id, } r = self.get('/auth/tokens', headers=headers) self.assertValidTokenResponse(r, self.user) r = self.delete(f'/projects/{self.project_id}') headers = {'X-Subject-Token': self.keystone_token_id} self.get( path='/auth/tokens', token=self.get_admin_token(), headers=headers, expected_status=http.client.NOT_FOUND, ) def test_token_chaining_is_not_allowed(self): self.test_oauth_flow() # attempt to re-authenticate (token chain) with the given token path = '/v3/auth/tokens/' auth_data = self.build_authentication_request( token=self.keystone_token_id ) self.admin_request( path=path, body=auth_data, token=self.keystone_token_id, method='POST', expected_status=http.client.FORBIDDEN, ) def test_delete_keystone_tokens_by_consumer_id(self): self.test_oauth_flow() PROVIDERS.token_provider_api._persistence.get_token( self.keystone_token_id ) PROVIDERS.token_provider_api._persistence.delete_tokens( self.user_id, consumer_id=self.consumer['key'] ) self.assertRaises( exception.TokenNotFound, PROVIDERS.token_provider_api._persistence.get_token, self.keystone_token_id, ) def _create_trust_get_token(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) del ref['id'] r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r) auth_data = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], trust_id=trust['id'], ) return self.get_requested_token(auth_data) def _approve_request_token_url(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['secret']) url, headers = self._create_request_token( self.consumer, self.project_id ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) return url def test_oauth_token_cannot_create_new_trust(self): self.test_oauth_flow() ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) del ref['id'] self.post( '/OS-TRUST/trusts', body={'trust': ref}, token=self.keystone_token_id, expected_status=http.client.FORBIDDEN, ) def test_oauth_token_cannot_authorize_request_token(self): self.test_oauth_flow() url = self._approve_request_token_url() body = {'roles': [{'id': self.role_id}]} self.put( url, body=body, token=self.keystone_token_id, expected_status=http.client.FORBIDDEN, ) def test_oauth_token_cannot_list_request_tokens(self): self._set_policy( { "identity:list_access_tokens": [], "identity:create_consumer": [], "identity:authorize_request_token": [], } ) self.test_oauth_flow() url = '/users/%s/OS-OAUTH1/access_tokens' % self.user_id self.get( url, token=self.keystone_token_id, expected_status=http.client.FORBIDDEN, ) def _set_policy(self, new_policy): self.tempfile = self.useFixture(temporaryfile.SecureTempFile()) self.tmpfilename = self.tempfile.file_name self.config_fixture.config( group='oslo_policy', policy_file=self.tmpfilename ) with open(self.tmpfilename, "w") as policyfile: policyfile.write(jsonutils.dumps(new_policy)) def test_trust_token_cannot_authorize_request_token(self): trust_token = self._create_trust_get_token() url = self._approve_request_token_url() body = {'roles': [{'id': self.role_id}]} self.put( url, body=body, token=trust_token, expected_status=http.client.FORBIDDEN, ) def test_trust_token_cannot_list_request_tokens(self): self._set_policy( {"identity:list_access_tokens": [], "identity:create_trust": []} ) trust_token = self._create_trust_get_token() url = '/users/%s/OS-OAUTH1/access_tokens' % self.user_id self.get(url, token=trust_token, expected_status=http.client.FORBIDDEN) class FernetAuthTokenTests(AuthTokenTests, OAuthFlowTests): def config_overrides(self): super().config_overrides() self.config_fixture.config(group='token', provider='fernet') self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) def test_delete_keystone_tokens_by_consumer_id(self): self.skipTest('Fernet tokens are never persisted in the backend.') class MaliciousOAuth1Tests(OAuth1Tests): def _switch_baseurl_scheme(self): """Switch the base url scheme.""" base_url_list = list(urlparse.urlparse(self.base_url)) base_url_list[0] = 'https' if base_url_list[0] == 'http' else 'http' bad_url = urlparse.urlunparse(base_url_list) return bad_url def test_bad_consumer_secret(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer = {'key': consumer_id, 'secret': uuid.uuid4().hex} url, headers = self._create_request_token(consumer, self.project_id) self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) def test_bad_request_url(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} bad_base_url = 'http://localhost/identity_admin/v3' url, headers = self._create_request_token( consumer, self.project_id, base_url=bad_base_url ) self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) def test_bad_request_url_scheme(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} bad_url_scheme = self._switch_baseurl_scheme() url, headers = self._create_request_token( consumer, self.project_id, base_url=bad_url_scheme ) self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) def test_bad_request_token_key(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) url = self._authorize_request_token(uuid.uuid4().hex) body = {'roles': [{'id': self.role_id}]} self.put(url, body=body, expected_status=http.client.NOT_FOUND) def test_bad_request_body_when_authorize(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] url = self._authorize_request_token(request_key) bad_body = {'roles': [{'fake_key': 'fake_value'}]} self.put(url, body=bad_body, expected_status=http.client.BAD_REQUEST) def test_bad_consumer_id(self): consumer = self._create_single_consumer() consumer_id = uuid.uuid4().hex consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) self.post(url, headers=headers, expected_status=http.client.NOT_FOUND) def test_bad_requested_project_id(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} project_id = uuid.uuid4().hex url, headers = self._create_request_token(consumer, project_id) self.post(url, headers=headers, expected_status=http.client.NOT_FOUND) def test_bad_verifier(self): self.config_fixture.config(debug=True, insecure_debug=True) consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] request_token = oauth1.Token(request_key, request_secret) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http.client.OK) verifier = resp.result['token']['oauth_verifier'] self.assertIsNotNone(verifier) request_token.set_verifier(uuid.uuid4().hex) url, headers = self._create_access_token(consumer, request_token) resp = self.post( url, headers=headers, expected_status=http.client.BAD_REQUEST ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Validation failed with errors', resp_data.get('error', {}).get('message'), ) def test_validate_access_token_request_failed(self): self.config_fixture.config(debug=True, insecure_debug=True) consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] request_token = oauth1.Token(request_key, request_secret) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http.client.OK) verifier = resp.result['token']['oauth_verifier'] request_token.set_verifier(verifier) # 1. Invalid base url. # Update the base url, so it will fail to validate the signature. base_url = 'http://localhost/identity_admin/v3' url, headers = self._create_access_token( consumer, request_token, base_url=base_url ) resp = self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Invalid signature', resp_data.get('error', {}).get('message') ) # 2. Invalid base url scheme. # Update the base url scheme, so it will fail to validate signature. bad_url_scheme = self._switch_baseurl_scheme() url, headers = self._create_access_token( consumer, request_token, base_url=bad_url_scheme ) resp = self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Invalid signature', resp_data.get('error', {}).get('message') ) # 3. Invalid signature. # Update the secret, so it will fail to validate the signature. consumer.update({'secret': uuid.uuid4().hex}) url, headers = self._create_access_token(consumer, request_token) resp = self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Invalid signature', resp_data.get('error', {}).get('message') ) # 4. Invalid verifier. # Even though the verifier is well formatted, it is not verifier # that is stored in the backend, this is different with the testcase # above `test_bad_verifier` where it test that `verifier` is not # well formatted. verifier = ''.join( random.SystemRandom().sample(base.VERIFIER_CHARS, 8) ) request_token.set_verifier(verifier) url, headers = self._create_access_token(consumer, request_token) resp = self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Provided verifier', resp_data.get('error', {}).get('message') ) # 5. The provided consumer does not exist. consumer.update({'key': uuid.uuid4().hex}) url, headers = self._create_access_token(consumer, request_token) resp = self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Provided consumer does not exist', resp_data.get('error', {}).get('message'), ) # 6. The consumer key provided does not match stored consumer key. consumer2 = self._create_single_consumer() consumer.update({'key': consumer2['id']}) url, headers = self._create_access_token(consumer, request_token) resp = self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Provided consumer key', resp_data.get('error', {}).get('message') ) def test_bad_authorizing_roles_id(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} # This new role is utilzied to ensure the user still has access to # the project but is authorizing an incorrect role_id for the purposes # of oauth1. new_role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex} PROVIDERS.role_api.create_role(new_role['id'], new_role) PROVIDERS.assignment_api.add_role_to_user_and_project( user_id=self.user_id, project_id=self.project_id, role_id=new_role['id'], ) url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] PROVIDERS.assignment_api.remove_role_from_user_and_project( self.user_id, self.project_id, new_role['id'] ) url = self._authorize_request_token(request_key) body = {'roles': [{'id': new_role['id']}]} # NOTE(morgan): previous versions of this test erroneously checked for # 404 because an unrouted URI was being hit. It is correct to get a 401 # error back as the role is not in the superset of roles the user # has at the time of the Authorization. self.put(path=url, body=body, expected_status=http.client.UNAUTHORIZED) def test_bad_authorizing_roles_name(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] url = self._authorize_request_token(request_key) body = {'roles': [{'name': 'fake_name'}]} self.put(path=url, body=body, expected_status=http.client.NOT_FOUND) def test_no_authorizing_user_id(self): consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url, headers = self._create_request_token(consumer, self.project_id) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] request_token = oauth1.Token(request_key, request_secret) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http.client.OK) verifier = resp.result['token']['oauth_verifier'] request_token.set_verifier(verifier) request_token_created = PROVIDERS.oauth_api.get_request_token( request_key.decode('utf-8') ) request_token_created.update({'authorizing_user_id': ''}) # Update the request token that is created instead of mocking # the whole token object to focus on what's we want to test # here and avoid any other factors that will result in the same # exception. with mock.patch.object( PROVIDERS.oauth_api, 'get_request_token' ) as mock_token: mock_token.return_value = request_token_created url, headers = self._create_access_token(consumer, request_token) self.post( url, headers=headers, expected_status=http.client.UNAUTHORIZED ) def test_validate_requet_token_request_failed(self): self.config_fixture.config(debug=True, insecure_debug=True) consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] consumer = {'key': consumer_id, 'secret': consumer_secret} url = '/OS-OAUTH1/request_token' auth_header = ( 'OAuth oauth_version="1.0", oauth_consumer_key=' + consumer_id ) faked_header = { 'Authorization': auth_header, 'requested_project_id': self.project_id, } resp = self.post( url, headers=faked_header, expected_status=http.client.BAD_REQUEST ) resp_data = jsonutils.loads(resp.body) self.assertIn( 'Validation failed with errors', resp_data['error']['message'] ) def test_expired_authorizing_request_token(self): with freezegun.freeze_time(timeutils.utcnow()) as frozen_time: self.config_fixture.config( group='oauth1', request_token_duration=1 ) consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['key']) url, headers = self._create_request_token( self.consumer, self.project_id ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} frozen_time.tick( delta=datetime.timedelta( seconds=CONF.oauth1.request_token_duration + 1 ) ) self.put(url, body=body, expected_status=http.client.UNAUTHORIZED) def test_expired_creating_keystone_token(self): with freezegun.freeze_time(timeutils.utcnow()) as frozen_time: self.config_fixture.config(group='oauth1', access_token_duration=1) consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['key']) url, headers = self._create_request_token( self.consumer, self.project_id ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http.client.OK) self.verifier = resp.result['token']['oauth_verifier'] self.request_token.set_verifier(self.verifier) url, headers = self._create_access_token( self.consumer, self.request_token ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) access_key = credentials['oauth_token'][0] access_secret = credentials['oauth_token_secret'][0] self.access_token = oauth1.Token(access_key, access_secret) self.assertIsNotNone(self.access_token.key) url, headers, body = self._get_oauth_token( self.consumer, self.access_token ) frozen_time.tick( delta=datetime.timedelta( seconds=CONF.oauth1.access_token_duration + 1 ) ) self.post( url, headers=headers, body=body, expected_status=http.client.UNAUTHORIZED, ) def test_missing_oauth_headers(self): endpoint = '/OS-OAUTH1/request_token' client = oauth1.Client( uuid.uuid4().hex, client_secret=uuid.uuid4().hex, signature_method=oauth1.SIG_HMAC, callback_uri="oob", ) headers = {'requested_project_id': uuid.uuid4().hex} _url, headers, _body = client.sign( self.base_url + endpoint, http_method='POST', headers=headers ) # NOTE(stevemar): To simulate this error, we remove the Authorization # header from the post request. del headers['Authorization'] self.post( endpoint, headers=headers, expected_status=http.client.INTERNAL_SERVER_ERROR, ) class OAuthNotificationTests( OAuth1Tests, test_notifications.BaseNotificationTest ): def test_create_consumer(self): consumer_ref = self._create_single_consumer() self._assert_notify_sent( consumer_ref['id'], test_notifications.CREATED_OPERATION, 'OS-OAUTH1:consumer', ) self._assert_last_audit( consumer_ref['id'], test_notifications.CREATED_OPERATION, 'OS-OAUTH1:consumer', cadftaxonomy.SECURITY_ACCOUNT, ) def test_update_consumer(self): consumer_ref = self._create_single_consumer() update_ref = {'consumer': {'description': uuid.uuid4().hex}} PROVIDERS.oauth_api.update_consumer(consumer_ref['id'], update_ref) self._assert_notify_sent( consumer_ref['id'], test_notifications.UPDATED_OPERATION, 'OS-OAUTH1:consumer', ) self._assert_last_audit( consumer_ref['id'], test_notifications.UPDATED_OPERATION, 'OS-OAUTH1:consumer', cadftaxonomy.SECURITY_ACCOUNT, ) def test_delete_consumer(self): consumer_ref = self._create_single_consumer() PROVIDERS.oauth_api.delete_consumer(consumer_ref['id']) self._assert_notify_sent( consumer_ref['id'], test_notifications.DELETED_OPERATION, 'OS-OAUTH1:consumer', ) self._assert_last_audit( consumer_ref['id'], test_notifications.DELETED_OPERATION, 'OS-OAUTH1:consumer', cadftaxonomy.SECURITY_ACCOUNT, ) def test_oauth_flow_notifications(self): """Test to ensure notifications are sent for oauth tokens. This test is very similar to test_oauth_flow, however there are additional checks in this test for ensuring that notifications for request token creation, and access token creation/deletion are emitted. """ consumer = self._create_single_consumer() consumer_id = consumer['id'] consumer_secret = consumer['secret'] self.consumer = {'key': consumer_id, 'secret': consumer_secret} self.assertIsNotNone(self.consumer['secret']) url, headers = self._create_request_token( self.consumer, self.project_id ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) request_key = credentials['oauth_token'][0] request_secret = credentials['oauth_token_secret'][0] self.request_token = oauth1.Token(request_key, request_secret) self.assertIsNotNone(self.request_token.key) request_key_string = request_key.decode() # Test to ensure the create request token notification is sent self._assert_notify_sent( request_key_string, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:request_token', ) self._assert_last_audit( request_key_string, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:request_token', cadftaxonomy.SECURITY_CREDENTIAL, ) url = self._authorize_request_token(request_key) body = {'roles': [{'id': self.role_id}]} resp = self.put(url, body=body, expected_status=http.client.OK) self.verifier = resp.result['token']['oauth_verifier'] self.assertTrue(all(i in base.VERIFIER_CHARS for i in self.verifier)) self.assertEqual(8, len(self.verifier)) self.request_token.set_verifier(self.verifier) url, headers = self._create_access_token( self.consumer, self.request_token ) content = self.post( url, headers=headers, response_content_type='application/x-www-form-urlencoded', ) credentials = _urllib_parse_qs_text_keys(content.result) access_key = credentials['oauth_token'][0] access_secret = credentials['oauth_token_secret'][0] self.access_token = oauth1.Token(access_key, access_secret) self.assertIsNotNone(self.access_token.key) access_key_string = access_key.decode() # Test to ensure the create access token notification is sent self._assert_notify_sent( access_key_string, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:access_token', ) self._assert_last_audit( access_key_string, test_notifications.CREATED_OPERATION, 'OS-OAUTH1:access_token', cadftaxonomy.SECURITY_CREDENTIAL, ) resp = self.delete( '/users/%(user)s/OS-OAUTH1/access_tokens/%(auth)s' % {'user': self.user_id, 'auth': self.access_token.key.decode()} ) self.assertResponseStatus(resp, http.client.NO_CONTENT) # Test to ensure the delete access token notification is sent self._assert_notify_sent( access_key_string, test_notifications.DELETED_OPERATION, 'OS-OAUTH1:access_token', ) self._assert_last_audit( access_key_string, test_notifications.DELETED_OPERATION, 'OS-OAUTH1:access_token', cadftaxonomy.SECURITY_CREDENTIAL, ) class OAuthCADFNotificationTests(OAuthNotificationTests): def setUp(self): """Repeat the tests for CADF notifications.""" super().setUp() self.config_fixture.config(notification_format='cadf') class JsonHomeTests(OAuth1Tests, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-OAUTH1/1.0' '/rel/consumers': { 'href': '/OS-OAUTH1/consumers', }, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_oauth2.py0000664000175000017500000024527000000000000022702 0ustar00zuulzuul00000000000000# Copyright 2022 openStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from base64 import b64encode import http from http import client from unittest import mock from urllib import parse from cryptography.hazmat.primitives.serialization import Encoding import fixtures from oslo_log import log from oslo_serialization import jsonutils from keystone.api.os_oauth2 import AccessTokenResource from keystone.common import provider_api from keystone.common import utils from keystone import conf from keystone import exception from keystone.federation.utils import RuleProcessor from keystone.tests import unit from keystone.tests.unit import test_v3 from keystone.token.provider import Manager PROVIDERS = provider_api.ProviderAPIs LOG = log.getLogger(__name__) CONF = conf.CONF class FakeUserAppCredListCreateResource(mock.Mock): pass class OAuth2AuthnMethodsTests(test_v3.OAuth2RestfulTestCase): ACCESS_TOKEN_URL = '/OS-OAUTH2/token' def setUp(self): super().setUp() self.config_fixture.config( group='oauth2', oauth2_authn_methods=['client_secret_basic', 'tls_client_auth'], ) def _get_access_token( self, headers, data, expected_status, client_cert_content=None ): data = parse.urlencode(data).encode() kwargs = { 'headers': headers, 'noauth': True, 'convert': False, 'body': data, 'expected_status': expected_status, } if client_cert_content: kwargs.update( {'environ': {'SSL_CLIENT_CERT': client_cert_content}} ) resp = self.post(self.ACCESS_TOKEN_URL, **kwargs) return resp def _create_certificates(self): return unit.create_certificate( subject_dn=unit.create_dn( country_name='jp', state_or_province_name='tokyo', locality_name='musashino', organizational_unit_name='test', ) ) def _get_cert_content(self, cert): return cert.public_bytes(Encoding.PEM).decode('ascii') @mock.patch.object(AccessTokenResource, '_client_secret_basic') def test_secret_basic_header(self, mock_client_secret_basic): """client_secret_basic is used if a client sercret is found.""" client_id = 'client_id' client_secret = 'client_secret' b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()).decode().strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = {'grant_type': 'client_credentials'} _ = self._get_access_token( headers=headers, data=data, expected_status=client.OK ) mock_client_secret_basic.assert_called_once_with( client_id, client_secret ) @mock.patch.object(AccessTokenResource, '_client_secret_basic') def test_secret_basic_form(self, mock_client_secret_basic): """client_secret_basic is used if a client sercret is found.""" client_id = 'client_id' client_secret = 'client_secret' headers = { 'Content-Type': 'application/x-www-form-urlencoded', } data = { 'grant_type': 'client_credentials', 'client_id': client_id, 'client_secret': client_secret, } _ = self._get_access_token( headers=headers, data=data, expected_status=client.OK ) mock_client_secret_basic.assert_called_once_with( client_id, client_secret ) @mock.patch.object(AccessTokenResource, '_client_secret_basic') def test_secret_basic_header_and_form(self, mock_client_secret_basic): """A header is used if secrets are found in a header and body.""" client_id_h = 'client_id_h' client_secret_h = 'client_secret_h' client_id_d = 'client_id_d' client_secret_d = 'client_secret_d' b64str = ( b64encode(f'{client_id_h}:{client_secret_h}'.encode()) .decode() .strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = { 'grant_type': 'client_credentials', 'client_id': client_id_d, 'client_secret': client_secret_d, } _ = self._get_access_token( headers=headers, data=data, expected_status=client.OK ) mock_client_secret_basic.assert_called_once_with( client_id_h, client_secret_h ) @mock.patch.object(AccessTokenResource, '_tls_client_auth') def test_client_cert(self, mock_tls_client_auth): """tls_client_auth is used if a certificate is found.""" client_id = 'client_id' client_cert, _ = self._create_certificates() cert_content = self._get_cert_content(client_cert) headers = { 'Content-Type': 'application/x-www-form-urlencoded', } data = {'grant_type': 'client_credentials', 'client_id': client_id} _ = self._get_access_token( headers=headers, data=data, expected_status=client.OK, client_cert_content=cert_content, ) mock_tls_client_auth.assert_called_once_with(client_id, cert_content) @mock.patch.object(AccessTokenResource, '_tls_client_auth') def test_secret_basic_and_client_cert(self, mock_tls_client_auth): """tls_client_auth is used if a certificate and secret are found.""" client_id_s = 'client_id_s' client_secret = 'client_secret' client_id_c = 'client_id_c' client_cert, _ = self._create_certificates() cert_content = self._get_cert_content(client_cert) b64str = ( b64encode(f'{client_id_s}:{client_secret}'.encode()) .decode() .strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = { 'grant_type': 'client_credentials', 'client_id': client_id_c, } _ = self._get_access_token( headers=headers, data=data, expected_status=client.OK, client_cert_content=cert_content, ) mock_tls_client_auth.assert_called_once_with(client_id_c, cert_content) class OAuth2SecretBasicTests(test_v3.OAuth2RestfulTestCase): APP_CRED_CREATE_URL = '/users/%(user_id)s/application_credentials' APP_CRED_LIST_URL = '/users/%(user_id)s/application_credentials' APP_CRED_DELETE_URL = ( '/users/%(user_id)s/application_credentials/%(app_cred_id)s' ) APP_CRED_SHOW_URL = ( '/users/%(user_id)s/application_credentials/%(app_cred_id)s' ) ACCESS_TOKEN_URL = '/OS-OAUTH2/token' def setUp(self): super().setUp() log.set_defaults( logging_context_format_string='%(asctime)s.%(msecs)03d %(' 'color)s%(levelname)s %(name)s [^[[' '01;36m%(request_id)s ^[[00;36m%(' 'project_name)s %(user_name)s%(' 'color)s] ^[[01;35m%(instance)s%(' 'color)s%(message)s^[[00m', default_log_levels=log.DEBUG, ) CONF.log_opt_values(LOG, log.DEBUG) LOG.debug(f'is_debug_enabled: {log.is_debug_enabled(CONF)}') LOG.debug(f'get_default_log_levels: {log.get_default_log_levels()}') self.config_fixture.config( group='oauth2', oauth2_authn_methods=['client_secret_basic'], ) def _assert_error_resp(self, error_resp, error_msg, error_description): resp_keys = ('error', 'error_description') for key in resp_keys: self.assertIsNotNone(error_resp.get(key, None)) self.assertEqual(error_msg, error_resp.get('error')) self.assertEqual( error_description, error_resp.get('error_description') ) def _create_app_cred(self, user_id, app_cred_name): resp = self.post( self.APP_CRED_CREATE_URL % {'user_id': user_id}, body={'application_credential': {'name': app_cred_name}}, ) LOG.debug(f'resp: {resp}') app_ref = resp.result['application_credential'] return app_ref def _delete_app_cred(self, user_id, app_cred_id): resp = self.delete( self.APP_CRED_CREATE_URL % {'user_id': user_id, 'app_cred_id': app_cred_id} ) LOG.debug(f'resp: {resp}') def _get_access_token( self, app_cred, b64str, headers, data, expected_status ): if b64str is None: client_id = app_cred.get('id') client_secret = app_cred.get('secret') b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()) .decode() .strip() ) if headers is None: headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } if data is None: data = {'grant_type': 'client_credentials'} data = parse.urlencode(data).encode() resp = self.post( self.ACCESS_TOKEN_URL, headers=headers, convert=False, body=data, expected_status=expected_status, ) return resp def _get_access_token_method_not_allowed(self, app_cred, http_func): client_id = app_cred.get('id') client_secret = app_cred.get('secret') b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()).decode().strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = {'grant_type': 'client_credentials'} data = parse.urlencode(data).encode() resp = http_func( self.ACCESS_TOKEN_URL, headers=headers, convert=False, body=data, expected_status=client.METHOD_NOT_ALLOWED, ) LOG.debug(f'response: {resp}') json_resp = jsonutils.loads(resp.body) return json_resp def test_get_access_token(self): """Test case when an access token can be successfully obtain.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) resp = self._get_access_token( app_cred, b64str=None, headers=None, data=None, expected_status=client.OK, ) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) def test_get_access_token_form(self): """Test case when there is no client authorization.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) headers = { 'Content-Type': 'application/x-www-form-urlencoded', } data = { 'grant_type': 'client_credentials', 'client_id': app_cred.get('id'), 'client_secret': app_cred.get('secret'), } resp = self._get_access_token( app_cred, b64str=None, headers=headers, data=data, expected_status=client.OK, ) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) def test_get_access_token_auth_type_is_not_basic(self): """Test case when auth_type is not basic.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) client_id = app_cred.get('id') base = ( 'username="%s", realm="%s", nonce="%s", uri="%s", ' 'response="%s"' % (client_id, 'realm', 'nonce', 'path', 'responding') ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Digest {base}', } error = 'invalid_client' error_description = 'Client authentication failed.' resp = self._get_access_token( app_cred, b64str=None, headers=headers, data=None, expected_status=client.UNAUTHORIZED, ) self.assertNotEmpty(resp.headers.get("WWW-Authenticate")) self.assertEqual( 'Keystone uri="http://localhost/v3"', resp.headers.get("WWW-Authenticate"), ) json_resp = jsonutils.loads(resp.body) LOG.debug(f'error: {json_resp.get("error")}') LOG.debug(f'error_description: {json_resp.get("error_description")}') self.assertEqual(error, json_resp.get('error')) self.assertEqual(error_description, json_resp.get('error_description')) def test_get_access_token_without_client_id(self): """Test case when there is no client_id.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) client_secret = app_cred.get('secret') b64str = b64encode(f':{client_secret}'.encode()).decode().strip() error = 'invalid_client' error_description = 'Client authentication failed.' resp = self._get_access_token( app_cred, b64str=b64str, headers=None, data=None, expected_status=client.UNAUTHORIZED, ) self.assertNotEmpty(resp.headers.get("WWW-Authenticate")) self.assertEqual( 'Keystone uri="http://localhost/v3"', resp.headers.get("WWW-Authenticate"), ) json_resp = jsonutils.loads(resp.body) LOG.debug(f'error: {json_resp.get("error")}') LOG.debug(f'error_description: {json_resp.get("error_description")}') self.assertEqual(error, json_resp.get('error')) self.assertEqual(error_description, json_resp.get('error_description')) def test_get_access_token_without_client_secret(self): """Test case when there is no client_secret.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) client_id = app_cred.get('id') b64str = b64encode(f'{client_id}:'.encode()).decode().strip() error = 'invalid_client' error_description = 'Client authentication failed.' resp = self._get_access_token( app_cred, b64str=b64str, headers=None, data=None, expected_status=client.UNAUTHORIZED, ) self.assertNotEmpty(resp.headers.get("WWW-Authenticate")) self.assertEqual( 'Keystone uri="http://localhost/v3"', resp.headers.get("WWW-Authenticate"), ) json_resp = jsonutils.loads(resp.body) LOG.debug(f'error: {json_resp.get("error")}') LOG.debug(f'error_description: {json_resp.get("error_description")}') self.assertEqual(error, json_resp.get('error')) self.assertEqual(error_description, json_resp.get('error_description')) def test_get_access_token_without_grant_type(self): """Test case when there is no grant_type.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) data = {} error = 'invalid_request' error_description = 'The parameter grant_type is required.' resp = self._get_access_token( app_cred, b64str=None, headers=None, data=data, expected_status=client.BAD_REQUEST, ) json_resp = jsonutils.loads(resp.body) LOG.debug(f'error: {json_resp.get("error")}') LOG.debug(f'error_description: {json_resp.get("error_description")}') self.assertEqual(error, json_resp.get('error')) self.assertEqual(error_description, json_resp.get('error_description')) def test_get_access_token_blank_grant_type(self): """Test case when grant_type is blank.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) data = {'grant_type': ''} error = 'unsupported_grant_type' error_description = ( f'The parameter grant_type {data["grant_type"]} is not supported.' ) resp = self._get_access_token( app_cred, b64str=None, headers=None, data=data, expected_status=client.BAD_REQUEST, ) json_resp = jsonutils.loads(resp.body) LOG.debug(f'error: {json_resp.get("error")}') LOG.debug(f'error_description: {json_resp.get("error_description")}') self.assertEqual(error, json_resp.get('error')) self.assertEqual(error_description, json_resp.get('error_description')) def test_get_access_token_grant_type_is_not_client_credentials(self): """Test case when grant_type is not client_credentials.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) data = {'grant_type': 'not_client_credentials'} error = 'unsupported_grant_type' error_description = ( f'The parameter grant_type {data["grant_type"]} is not supported.' ) resp = self._get_access_token( app_cred, b64str=None, headers=None, data=data, expected_status=client.BAD_REQUEST, ) json_resp = jsonutils.loads(resp.body) LOG.debug(f'error: {json_resp.get("error")}') LOG.debug(f'error_description: {json_resp.get("error_description")}') self.assertEqual(error, json_resp.get('error')) self.assertEqual(error_description, json_resp.get('error_description')) def test_get_access_token_failed_401(self): """Test case when client authentication failed.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) error = 'invalid_client' client_id = app_cred.get('id') client_secret = app_cred.get('secret') b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()).decode().strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = {'grant_type': 'client_credentials'} data = parse.urlencode(data).encode() with mock.patch( 'keystone.api._shared.authentication.authenticate_for_token' ) as co_mock: co_mock.side_effect = exception.Unauthorized( 'client is unauthorized' ) resp = self.post( self.ACCESS_TOKEN_URL, headers=headers, convert=False, body=data, noauth=True, expected_status=client.UNAUTHORIZED, ) self.assertNotEmpty(resp.headers.get("WWW-Authenticate")) self.assertEqual( 'Keystone uri="http://localhost/v3"', resp.headers.get("WWW-Authenticate"), ) LOG.debug(f'response: {resp}') json_resp = jsonutils.loads(resp.body) self.assertEqual(error, json_resp.get('error')) LOG.debug(f'error: {json_resp.get("error")}') def test_get_access_token_failed_400(self): """Test case when the called API is incorrect.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) error = 'invalid_request' client_id = app_cred.get('id') client_secret = app_cred.get('secret') b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()).decode().strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = {'grant_type': 'client_credentials'} data = parse.urlencode(data).encode() with mock.patch( 'keystone.api._shared.authentication.authenticate_for_token' ) as co_mock: co_mock.side_effect = exception.ValidationError( 'Auth method is invalid' ) resp = self.post( self.ACCESS_TOKEN_URL, headers=headers, convert=False, body=data, noauth=True, expected_status=client.BAD_REQUEST, ) LOG.debug(f'response: {resp}') json_resp = jsonutils.loads(resp.body) self.assertEqual(error, json_resp.get('error')) LOG.debug(f'error: {json_resp.get("error")}') def test_get_access_token_failed_500_other(self): """Test case when unexpected error.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) error = 'other_error' client_id = app_cred.get('id') client_secret = app_cred.get('secret') b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()).decode().strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = {'grant_type': 'client_credentials'} data = parse.urlencode(data).encode() with mock.patch( 'keystone.api._shared.authentication.authenticate_for_token' ) as co_mock: co_mock.side_effect = exception.UnexpectedError( 'unexpected error.' ) resp = self.post( self.ACCESS_TOKEN_URL, headers=headers, convert=False, body=data, noauth=True, expected_status=client.INTERNAL_SERVER_ERROR, ) LOG.debug(f'response: {resp}') json_resp = jsonutils.loads(resp.body) self.assertEqual(error, json_resp.get('error')) def test_get_access_token_failed_500(self): """Test case when internal server error.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) error = 'other_error' client_id = app_cred.get('id') client_secret = app_cred.get('secret') b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()).decode().strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } data = {'grant_type': 'client_credentials'} data = parse.urlencode(data).encode() with mock.patch( 'keystone.api._shared.authentication.authenticate_for_token' ) as co_mock: co_mock.side_effect = Exception('Internal server is invalid') resp = self.post( self.ACCESS_TOKEN_URL, headers=headers, convert=False, body=data, noauth=True, expected_status=client.INTERNAL_SERVER_ERROR, ) LOG.debug(f'response: {resp}') json_resp = jsonutils.loads(resp.body) self.assertEqual(error, json_resp.get('error')) def test_get_access_token_method_get_not_allowed(self): """Test case when the request is get method that is not allowed.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) json_resp = self._get_access_token_method_not_allowed( app_cred, self.get ) self.assertEqual('other_error', json_resp.get('error')) self.assertEqual( 'The method is not allowed for the requested URL.', json_resp.get('error_description'), ) def test_get_access_token_method_patch_not_allowed(self): """Test case when the request is patch method that is not allowed.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) json_resp = self._get_access_token_method_not_allowed( app_cred, self.patch ) self.assertEqual('other_error', json_resp.get('error')) self.assertEqual( 'The method is not allowed for the requested URL.', json_resp.get('error_description'), ) def test_get_access_token_method_put_not_allowed(self): """Test case when the request is put method that is not allowed.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) json_resp = self._get_access_token_method_not_allowed( app_cred, self.put ) self.assertEqual('other_error', json_resp.get('error')) self.assertEqual( 'The method is not allowed for the requested URL.', json_resp.get('error_description'), ) def test_get_access_token_method_delete_not_allowed(self): """Test case when the request is delete method that is not allowed.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) json_resp = self._get_access_token_method_not_allowed( app_cred, self.delete ) self.assertEqual('other_error', json_resp.get('error')) self.assertEqual( 'The method is not allowed for the requested URL.', json_resp.get('error_description'), ) def test_get_access_token_method_head_not_allowed(self): """Test case when the request is head method that is not allowed.""" client_name = 'client_name_test' app_cred = self._create_app_cred(self.user_id, client_name) client_id = app_cred.get('id') client_secret = app_cred.get('secret') b64str = ( b64encode(f'{client_id}:{client_secret}'.encode()).decode().strip() ) headers = { 'Content-Type': 'application/x-www-form-urlencoded', 'Authorization': f'Basic {b64str}', } self.head( self.ACCESS_TOKEN_URL, headers=headers, convert=False, expected_status=client.METHOD_NOT_ALLOWED, ) class OAuth2CertificateTests(test_v3.OAuth2RestfulTestCase): ACCESS_TOKEN_URL = '/OS-OAUTH2/token' def setUp(self): super().setUp() self.log_fix = self.useFixture(fixtures.FakeLogger(level=log.DEBUG)) self.config_fixture.config( group='oauth2', oauth2_authn_methods=['tls_client_auth'] ) self.config_fixture.config( group='oauth2', oauth2_cert_dn_mapping_id='oauth2_mapping' ) ( self.oauth2_user, self.oauth2_user_domain, _, ) = self._create_project_user() *_, self.client_cert, self.client_key = self._create_certificates( client_dn=unit.create_dn( user_id=self.oauth2_user.get('id'), common_name=self.oauth2_user.get('name'), email_address=self.oauth2_user.get('email'), domain_component=self.oauth2_user_domain.get('id'), organization_name=self.oauth2_user_domain.get('name'), ) ) def _create_project_user(self, no_roles=False): new_domain_ref = unit.new_domain_ref() PROVIDERS.resource_api.create_domain( new_domain_ref['id'], new_domain_ref ) new_project_ref = unit.new_project_ref(domain_id=self.domain_id) PROVIDERS.resource_api.create_project( new_project_ref['id'], new_project_ref ) new_user = unit.create_user( PROVIDERS.identity_api, domain_id=new_domain_ref['id'], project_id=new_project_ref['id'], ) if not no_roles: PROVIDERS.assignment_api.create_grant( self.role['id'], user_id=new_user['id'], project_id=new_project_ref['id'], ) return new_user, new_domain_ref, new_project_ref def _create_certificates( self, root_dn=None, server_dn=None, client_dn=None ): root_subj = unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organization_name='fujitsu', organizational_unit_name='test', common_name='root', ) if root_dn: root_subj = unit.update_dn(root_subj, root_dn) root_cert, root_key = unit.create_certificate(root_subj) keystone_subj = unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organization_name='fujitsu', organizational_unit_name='test', common_name='keystone.local', ) if server_dn: keystone_subj = unit.update_dn(keystone_subj, server_dn) ks_cert, ks_key = unit.create_certificate( keystone_subj, ca=root_cert, ca_key=root_key ) client_subj = unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', ) if client_dn: client_subj = unit.update_dn(client_subj, client_dn) client_cert, client_key = unit.create_certificate( client_subj, ca=root_cert, ca_key=root_key ) return root_cert, root_key, ks_cert, ks_key, client_cert, client_key def _create_mapping(self, id='oauth2_mapping', dn_rules=None): rules = [] if not dn_rules: dn_rules = [ { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID', 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS', 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O', 'SSL_CLIENT_ISSUER_DN_CN': ['root'], } ] for info in dn_rules: index = 0 local_user = {} remote = [] for k in info: if k == 'user.name': local_user['name'] = '{%s}' % index remote.append({'type': info.get(k)}) index += 1 elif k == 'user.id': local_user['id'] = '{%s}' % index remote.append({'type': info.get(k)}) index += 1 elif k == 'user.email': local_user['email'] = '{%s}' % index remote.append({'type': info.get(k)}) index += 1 elif k == 'user.domain.name' or k == 'user.domain.id': if not local_user.get('domain'): local_user['domain'] = {} if k == 'user.domain.name': local_user['domain']['name'] = '{%s}' % index remote.append({'type': info.get(k)}) index += 1 else: local_user['domain']['id'] = '{%s}' % index remote.append({'type': info.get(k)}) index += 1 else: remote.append({'type': k, 'any_one_of': info.get(k)}) rule = {'local': [{'user': local_user}], 'remote': remote} rules.append(rule) mapping = {'id': id, 'rules': rules} PROVIDERS.federation_api.create_mapping(mapping['id'], mapping) def _get_access_token( self, client_id=None, client_cert_content=None, expected_status=http.client.OK, ): headers = { 'Content-Type': 'application/x-www-form-urlencoded', } data = {'grant_type': 'client_credentials'} if client_id: data.update({'client_id': client_id}) data = parse.urlencode(data).encode() kwargs = { 'headers': headers, 'noauth': True, 'convert': False, 'body': data, 'expected_status': expected_status, } if client_cert_content: kwargs.update( {'environ': {'SSL_CLIENT_CERT': client_cert_content}} ) resp = self.post(self.ACCESS_TOKEN_URL, **kwargs) return resp def _get_cert_content(self, cert): return cert.public_bytes(Encoding.PEM).decode('ascii') def assertUnauthorizedResp(self, resp): LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertEqual('invalid_client', json_resp['error']) self.assertEqual( 'Client authentication failed.', json_resp['error_description'] ) def test_get_access_token_project_scope(self): """Test case when an access token can be successfully obtain.""" self._create_mapping() user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) def test_get_access_token_mapping_config(self): """Test case when an access token can be successfully obtain.""" self.config_fixture.config( group='oauth2', oauth2_cert_dn_mapping_id='oauth2_custom' ) self._create_mapping( id='oauth2_custom', dn_rules=[ { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_DC', 'SSL_CLIENT_ISSUER_DN_CN': ['root'], } ], ) user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id='test_UID', common_name=user.get('name'), domain_component=user_domain.get('name'), organization_name='test_O', ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) self.config_fixture.config( group='oauth2', oauth2_cert_dn_mapping_id='oauth2_mapping' ) def test_get_access_token_mapping_multi_ca(self): """Test case when an access token can be successfully obtain.""" self.config_fixture.config( group='oauth2', oauth2_cert_dn_mapping_id='oauth2_custom' ) self._create_mapping( id='oauth2_custom', dn_rules=[ { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID', 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS', 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O', 'SSL_CLIENT_ISSUER_DN_CN': ['rootA', 'rootB'], }, { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_DC', 'SSL_CLIENT_ISSUER_DN_CN': ['rootC'], }, ], ) # CA rootA OK user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( root_dn=unit.create_dn(common_name='rootA'), client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ), ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) # CA rootB OK user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( root_dn=unit.create_dn(common_name='rootB'), client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ), ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) # CA rootC OK user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( root_dn=unit.create_dn(common_name='rootC'), client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id='test_UID', common_name=user.get('name'), domain_component=user_domain.get('name'), organization_name='test_O', ), ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) # CA not found NG user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( root_dn=unit.create_dn(common_name='root_other'), client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ), ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: mapping rule process failed.', self.log_fix.output, ) self.config_fixture.config( group='oauth2', oauth2_cert_dn_mapping_id='oauth2_mapping' ) def test_get_access_token_no_default_mapping(self): user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: ' 'mapping id %s is not found. ' % 'oauth2_mapping', self.log_fix.output, ) def test_get_access_token_no_custom_mapping(self): self.config_fixture.config( group='oauth2', oauth2_cert_dn_mapping_id='oauth2_custom' ) self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: ' 'mapping id %s is not found. ' % 'oauth2_custom', self.log_fix.output, ) self.config_fixture.config( group='oauth2', oauth2_cert_dn_mapping_id='oauth2_mapping' ) def test_get_access_token_ignore_userid(self): """Test case when an access token can be successfully obtain.""" self._create_mapping( dn_rules=[ { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS', 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O', 'SSL_CLIENT_ISSUER_DN_CN': ['root'], } ] ) user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id') + "_diff", common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) def test_get_access_token_ignore_username(self): """Test case when an access token can be successfully obtain.""" self._create_mapping( dn_rules=[ { 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID', 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS', 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O', 'SSL_CLIENT_ISSUER_DN_CN': ['root'], } ] ) user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) def test_get_access_token_ignore_email(self): """Test case when an access token can be successfully obtain.""" self._create_mapping( dn_rules=[ { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID', 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O', 'SSL_CLIENT_ISSUER_DN_CN': ['root'], } ] ) user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) def test_get_access_token_ignore_domain_id(self): """Test case when an access token can be successfully obtain.""" self._create_mapping( dn_rules=[ { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID', 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS', 'user.domain.name': 'SSL_CLIENT_SUBJECT_DN_O', 'SSL_CLIENT_ISSUER_DN_CN': ['root'], } ] ) user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id') + "_diff", organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) def test_get_access_token_ignore_domain_name(self): """Test case when an access token can be successfully obtain.""" self._create_mapping( dn_rules=[ { 'user.name': 'SSL_CLIENT_SUBJECT_DN_CN', 'user.id': 'SSL_CLIENT_SUBJECT_DN_UID', 'user.email': 'SSL_CLIENT_SUBJECT_DN_EMAILADDRESS', 'user.domain.id': 'SSL_CLIENT_SUBJECT_DN_DC', 'SSL_CLIENT_ISSUER_DN_CN': ['root'], } ] ) user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) def test_get_access_token_ignore_all(self): """Test case when an access token can be successfully obtain.""" self._create_mapping(dn_rules=[{'SSL_CLIENT_ISSUER_DN_CN': ['root']}]) user, user_domain, user_project = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id') + "_diff", common_name=user.get('name') + "_diff", email_address=user.get('email') + "_diff", domain_component=user_domain.get('id') + "_diff", ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertIn('access_token', json_resp) self.assertEqual('Bearer', json_resp['token_type']) self.assertEqual(3600, json_resp['expires_in']) verify_resp = self.get( '/auth/tokens', headers={ 'X-Subject-Token': json_resp['access_token'], 'X-Auth-Token': json_resp['access_token'], }, ) self.assertIn('token', verify_resp.result) self.assertIn('oauth2_credential', verify_resp.result['token']) self.assertIn('roles', verify_resp.result['token']) self.assertIn('project', verify_resp.result['token']) self.assertIn('catalog', verify_resp.result['token']) self.assertEqual( user_project.get('id'), verify_resp.result['token']['project']['id'], ) check_oauth2 = verify_resp.result['token']['oauth2_credential'] self.assertEqual( utils.get_certificate_thumbprint(cert_content), check_oauth2['x5t#S256'], ) def test_get_access_token_no_roles_project_scope(self): self._create_mapping() user, user_domain, _ = self._create_project_user(no_roles=True) *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) LOG.debug(resp) def test_get_access_token_no_default_project_id(self): self._create_mapping() user, user_domain, _ = self._create_project_user(no_roles=True) user['default_project_id'] = None *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) _ = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) def test_get_access_token_without_client_id(self): self._create_mapping() cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: ' 'failed to get a client_id from the request.', self.log_fix.output, ) def test_get_access_token_without_client_cert(self): self._create_mapping() resp = self._get_access_token( client_id=self.oauth2_user.get('id'), expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: ' 'failed to get client credentials from the request.', self.log_fix.output, ) @mock.patch.object(utils, 'get_certificate_subject_dn') def test_get_access_token_failed_to_get_cert_subject_dn( self, mock_get_certificate_subject_dn ): self._create_mapping() mock_get_certificate_subject_dn.side_effect = ( exception.ValidationError('Boom!') ) cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_id=self.oauth2_user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: ' 'failed to get the subject DN from the certificate.', self.log_fix.output, ) @mock.patch.object(utils, 'get_certificate_issuer_dn') def test_get_access_token_failed_to_get_cert_issuer_dn( self, mock_get_certificate_issuer_dn ): self._create_mapping() mock_get_certificate_issuer_dn.side_effect = exception.ValidationError( 'Boom!' ) cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_id=self.oauth2_user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: ' 'failed to get the issuer DN from the certificate.', self.log_fix.output, ) def test_get_access_token_user_not_exist(self): self._create_mapping() cert_content = self._get_cert_content(self.client_cert) user_id_not_exist = 'user_id_not_exist' resp = self._get_access_token( client_id=user_id_not_exist, client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: ' 'the user does not exist. user id: %s' % user_id_not_exist, self.log_fix.output, ) def test_get_access_token_cert_dn_not_match_user_id(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id') + "_diff", common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.' % ('user id', user.get('id') + '_diff', user.get('id')), self.log_fix.output, ) def test_get_access_token_cert_dn_not_match_user_name(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name') + "_diff", email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.' % ('user name', user.get('name') + '_diff', user.get('name')), self.log_fix.output, ) def test_get_access_token_cert_dn_not_match_email(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email') + "_diff", domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.' % ('user email', user.get('email') + '_diff', user.get('email')), self.log_fix.output, ) def test_get_access_token_cert_dn_not_match_domain_id(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id') + "_diff", organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.' % ( 'user domain id', user_domain.get('id') + '_diff', user_domain.get('id'), ), self.log_fix.output, ) def test_get_access_token_cert_dn_not_match_domain_name(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name') + "_diff", ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: %s check failed. ' 'DN value: %s, DB value: %s.' % ( 'user domain name', user_domain.get('name') + '_diff', user_domain.get('name'), ), self.log_fix.output, ) def test_get_access_token_cert_dn_missing_user_id(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: mapping rule process failed.', self.log_fix.output, ) def test_get_access_token_cert_dn_missing_user_name(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), email_address=user.get('email'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: mapping rule process failed.', self.log_fix.output, ) def test_get_access_token_cert_dn_missing_email(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), domain_component=user_domain.get('id'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: mapping rule process failed.', self.log_fix.output, ) def test_get_access_token_cert_dn_missing_domain_id(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), organization_name=user_domain.get('name'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: mapping rule process failed.', self.log_fix.output, ) def test_get_access_token_cert_dn_missing_domain_name(self): self._create_mapping() user, user_domain, _ = self._create_project_user() *_, client_cert, _ = self._create_certificates( client_dn=unit.create_dn( country_name='jp', state_or_province_name='kanagawa', locality_name='kawasaki', organizational_unit_name='test', user_id=user.get('id'), common_name=user.get('name'), email_address=user.get('email'), domain_component=user_domain.get('id'), ) ) cert_content = self._get_cert_content(client_cert) resp = self._get_access_token( client_id=user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) self.assertUnauthorizedResp(resp) self.assertIn( 'Get OAuth2.0 Access Token API: mapping rule process failed.', self.log_fix.output, ) @mock.patch.object(Manager, 'issue_token') def test_get_access_token_issue_token_ks_error_400(self, mock_issue_token): self._create_mapping() err_msg = 'Boom!' mock_issue_token.side_effect = exception.ValidationError(err_msg) cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_id=self.oauth2_user.get('id'), client_cert_content=cert_content, expected_status=http.client.BAD_REQUEST, ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertEqual('invalid_request', json_resp['error']) self.assertEqual(err_msg, json_resp['error_description']) self.assertIn(err_msg, self.log_fix.output) @mock.patch.object(Manager, 'issue_token') def test_get_access_token_issue_token_ks_error_401(self, mock_issue_token): self._create_mapping() err_msg = 'Boom!' mock_issue_token.side_effect = exception.Unauthorized(err_msg) cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_id=self.oauth2_user.get('id'), client_cert_content=cert_content, expected_status=http.client.UNAUTHORIZED, ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertEqual('invalid_client', json_resp['error']) self.assertEqual( 'The request you have made requires authentication.', json_resp['error_description'], ) @mock.patch.object(Manager, 'issue_token') def test_get_access_token_issue_token_ks_error_other( self, mock_issue_token ): self._create_mapping() err_msg = 'Boom!' mock_issue_token.side_effect = exception.NotImplemented(err_msg) cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_id=self.oauth2_user.get('id'), client_cert_content=cert_content, expected_status=exception.NotImplemented.code, ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertEqual('other_error', json_resp['error']) self.assertEqual( 'An unknown error occurred and failed to get an OAuth2.0 ' 'access token.', json_resp['error_description'], ) @mock.patch.object(Manager, 'issue_token') def test_get_access_token_issue_token_other_exception( self, mock_issue_token ): self._create_mapping() err_msg = 'Boom!' mock_issue_token.side_effect = Exception(err_msg) cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_id=self.oauth2_user.get('id'), client_cert_content=cert_content, expected_status=http.client.INTERNAL_SERVER_ERROR, ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertEqual('other_error', json_resp['error']) self.assertEqual(err_msg, json_resp['error_description']) @mock.patch.object(RuleProcessor, 'process') def test_get_access_token_process_other_exception(self, mock_process): self._create_mapping() err_msg = 'Boom!' mock_process.side_effect = Exception(err_msg) cert_content = self._get_cert_content(self.client_cert) resp = self._get_access_token( client_id=self.oauth2_user.get('id'), client_cert_content=cert_content, expected_status=http.client.INTERNAL_SERVER_ERROR, ) LOG.debug(resp) json_resp = jsonutils.loads(resp.body) self.assertEqual('other_error', json_resp['error']) self.assertEqual(err_msg, json_resp['error_description']) self.assertIn( 'Get OAuth2.0 Access Token API: mapping rule process failed.', self.log_fix.output, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_os_revoke.py0000664000175000017500000002010300000000000023456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import http.client from unittest import mock import uuid import freezegun from oslo_db import exception as oslo_db_exception from oslo_utils import timeutils from testtools import matchers from keystone.common import provider_api from keystone.common import utils from keystone.models import revoke_model from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs def _future_time_string(): expire_delta = datetime.timedelta(seconds=1000) future_time = timeutils.utcnow() + expire_delta return utils.isotime(future_time) class OSRevokeTests(test_v3.RestfulTestCase, test_v3.JsonHomeTestMixin): JSON_HOME_DATA = { 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0' '/rel/events': { 'href': '/OS-REVOKE/events', }, } def test_get_empty_list(self): resp = self.get('/OS-REVOKE/events') self.assertEqual([], resp.json_body['events']) def _blank_event(self): return {} # The two values will be the same with the exception of # 'issued_before' which is set when the event is recorded. def assertReportedEventMatchesRecorded(self, event, sample, before_time): after_time = timeutils.utcnow() event_issued_before = timeutils.normalize_time( timeutils.parse_isotime(event['issued_before']) ) self.assertLessEqual( before_time, event_issued_before, 'invalid event issued_before time; %s is not later than %s.' % ( utils.isotime(event_issued_before, subsecond=True), utils.isotime(before_time, subsecond=True), ), ) self.assertLessEqual( event_issued_before, after_time, 'invalid event issued_before time; %s is not earlier than %s.' % ( utils.isotime(event_issued_before, subsecond=True), utils.isotime(after_time, subsecond=True), ), ) del event['issued_before'] del event['revoked_at'] self.assertEqual(sample, event) def test_revoked_list_self_url(self): revoked_list_url = '/OS-REVOKE/events' resp = self.get(revoked_list_url) links = resp.json_body['links'] self.assertThat(links['self'], matchers.EndsWith(revoked_list_url)) def test_revoked_token_in_list(self): audit_id = uuid.uuid4().hex sample = self._blank_event() sample['audit_id'] = str(audit_id) before_time = timeutils.utcnow().replace(microsecond=0) PROVIDERS.revoke_api.revoke_by_audit_id(audit_id) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) self.assertReportedEventMatchesRecorded(events[0], sample, before_time) def test_disabled_project_in_list(self): project_id = uuid.uuid4().hex sample = dict() sample['project_id'] = str(project_id) before_time = timeutils.utcnow().replace(microsecond=0) PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent(project_id=project_id) ) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) self.assertReportedEventMatchesRecorded(events[0], sample, before_time) def test_disabled_domain_in_list(self): domain_id = uuid.uuid4().hex sample = dict() sample['domain_id'] = str(domain_id) before_time = timeutils.utcnow().replace(microsecond=0) PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent(domain_id=domain_id) ) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) self.assertReportedEventMatchesRecorded(events[0], sample, before_time) def test_list_since_invalid(self): self.get( '/OS-REVOKE/events?since=blah', expected_status=http.client.BAD_REQUEST, ) def test_list_since_valid(self): resp = self.get('/OS-REVOKE/events?since=2013-02-27T18:30:59.999999Z') events = resp.json_body['events'] self.assertEqual(0, len(events)) def test_since_future_time_no_events(self): domain_id = uuid.uuid4().hex sample = dict() sample['domain_id'] = str(domain_id) PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent(domain_id=domain_id) ) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertEqual(1, len(events)) resp = self.get('/OS-REVOKE/events?since=%s' % _future_time_string()) events = resp.json_body['events'] self.assertEqual([], events) def test_revoked_at_in_list(self): time = timeutils.utcnow() with freezegun.freeze_time(time) as frozen_datetime: revoked_at = timeutils.utcnow() # Given or not, `revoked_at` will always be set in the backend. PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent(revoked_at=revoked_at) ) frozen_datetime.tick(delta=datetime.timedelta(seconds=1)) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertThat(events, matchers.HasLength(1)) # Strip off the microseconds from `revoked_at`. self.assertTimestampEqual( utils.isotime(revoked_at), events[0]['revoked_at'] ) def test_access_token_id_not_in_event(self): ref = {'description': uuid.uuid4().hex} resp = self.post('/OS-OAUTH1/consumers', body={'consumer': ref}) consumer_id = resp.result['consumer']['id'] PROVIDERS.oauth_api.delete_consumer(consumer_id) resp = self.get('/OS-REVOKE/events') events = resp.json_body['events'] self.assertThat(events, matchers.HasLength(1)) event = events[0] self.assertEqual(consumer_id, event['OS-OAUTH1:consumer_id']) # `OS-OAUTH1:access_token_id` is None and won't be returned to # end user. self.assertNotIn('OS-OAUTH1:access_token_id', event) def test_retries_on_deadlock(self): patcher = mock.patch( 'sqlalchemy.orm.query.Query.delete', autospec=True ) # NOTE(mnikolaenko): raise 2 deadlocks and back to normal work of # method. Two attempts is enough to check that retry decorator works. # Otherwise it will take very much time to pass this test class FakeDeadlock: def __init__(self, mock_patcher): self.deadlock_count = 2 self.mock_patcher = mock_patcher self.patched = True def __call__(self, *args, **kwargs): if self.deadlock_count > 1: self.deadlock_count -= 1 else: self.mock_patcher.stop() self.patched = False raise oslo_db_exception.DBDeadlock sql_delete_mock = patcher.start() side_effect = FakeDeadlock(patcher) sql_delete_mock.side_effect = side_effect try: PROVIDERS.revoke_api.revoke( revoke_model.RevokeEvent(user_id=uuid.uuid4().hex) ) finally: if side_effect.patched: patcher.stop() call_count = sql_delete_mock.call_count # initial attempt + 1 retry revoke_attempt_count = 2 self.assertEqual(call_count, revoke_attempt_count) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_policy.py0000664000175000017500000000462100000000000022770 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import json import uuid from keystone.common import provider_api from keystone.tests import unit from keystone.tests.unit import test_v3 PROVIDERS = provider_api.ProviderAPIs class PolicyTestCase(test_v3.RestfulTestCase): """Test policy CRUD.""" def setUp(self): super().setUp() self.policy = unit.new_policy_ref() self.policy_id = self.policy['id'] PROVIDERS.policy_api.create_policy(self.policy_id, self.policy.copy()) # policy crud tests def test_create_policy(self): """Call ``POST /policies``.""" ref = unit.new_policy_ref() r = self.post('/policies', body={'policy': ref}) return self.assertValidPolicyResponse(r, ref) def test_list_head_policies(self): """Call ``GET & HEAD /policies``.""" resource_url = '/policies' r = self.get(resource_url) self.assertValidPolicyListResponse(r, ref=self.policy) self.head(resource_url, expected_status=http.client.OK) def test_get_head_policy(self): """Call ``GET & HEAD /policies/{policy_id}``.""" resource_url = f'/policies/{self.policy_id}' r = self.get(resource_url) self.assertValidPolicyResponse(r, self.policy) self.head(resource_url, expected_status=http.client.OK) def test_update_policy(self): """Call ``PATCH /policies/{policy_id}``.""" self.policy['blob'] = json.dumps( { 'data': uuid.uuid4().hex, } ) r = self.patch( f'/policies/{self.policy_id}', body={'policy': self.policy}, ) self.assertValidPolicyResponse(r, self.policy) def test_delete_policy(self): """Call ``DELETE /policies/{policy_id}``.""" self.delete(f'/policies/{self.policy_id}') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_resource.py0000664000175000017500000023004000000000000023314 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from testtools import matchers from keystone.common import provider_api import keystone.conf from keystone.credential.providers import fernet as credential_fernet from keystone import exception from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.tests.unit import test_v3 from keystone.tests.unit import utils as test_utils CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class ResourceTestCase(test_v3.RestfulTestCase, test_v3.AssignmentTestMixin): """Test domains and projects.""" def setUp(self): super().setUp() self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'credential', credential_fernet.MAX_ACTIVE_KEYS, ) ) # Domain CRUD tests def test_create_domain(self): """Call ``POST /domains``.""" ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': ref}) return self.assertValidDomainResponse(r, ref) def test_create_domain_case_sensitivity(self): """Call `POST /domains`` twice with upper() and lower() cased name.""" ref = unit.new_domain_ref() # ensure the name is lowercase ref['name'] = ref['name'].lower() r = self.post('/domains', body={'domain': ref}) self.assertValidDomainResponse(r, ref) # ensure the name is uppercase ref['name'] = ref['name'].upper() r = self.post('/domains', body={'domain': ref}) self.assertValidDomainResponse(r, ref) def test_create_domain_bad_request(self): """Call ``POST /domains``.""" self.post( '/domains', body={'domain': {}}, expected_status=http.client.BAD_REQUEST, ) def test_create_domain_unsafe(self): """Call ``POST /domains with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config( group='resource', domain_name_url_safe='off' ) ref = unit.new_domain_ref(name=unsafe_name) self.post('/domains', body={'domain': ref}) for config_setting in ['new', 'strict']: self.config_fixture.config( group='resource', domain_name_url_safe=config_setting ) ref = unit.new_domain_ref(name=unsafe_name) self.post( '/domains', body={'domain': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_domain_unsafe_default(self): """Check default for unsafe names for ``POST /domains``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_domain_ref(name=unsafe_name) self.post('/domains', body={'domain': ref}) def test_create_domain_creates_is_domain_project(self): """Check a project that acts as a domain is created. Call ``POST /domains``. """ # Create a new domain domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) self.assertValidDomainResponse(r, domain_ref) # Retrieve its correspondent project r = self.get( '/projects/%(project_id)s' % {'project_id': r.result['domain']['id']} ) self.assertValidProjectResponse(r) # The created project has is_domain flag as True self.assertTrue(r.result['project']['is_domain']) # And its parent_id and domain_id attributes are equal self.assertIsNone(r.result['project']['parent_id']) self.assertIsNone(r.result['project']['domain_id']) def test_create_is_domain_project_creates_domain(self): """Call ``POST /projects`` is_domain and check a domain is created.""" # Create a new project that acts as a domain project_ref = unit.new_project_ref(domain_id=None, is_domain=True) r = self.post('/projects', body={'project': project_ref}) self.assertValidProjectResponse(r) # Retrieve its correspondent domain r = self.get( '/domains/{domain_id}'.format(domain_id=r.result['project']['id']) ) self.assertValidDomainResponse(r) self.assertIsNotNone(r.result['domain']) def test_create_domain_valid_explicit_id(self): """Call ``POST /domains`` with a valid `explicit_domain_id` set.""" ref = unit.new_domain_ref() explicit_domain_id = '9aea63518f0040c6b4518d8d2242911c' ref['explicit_domain_id'] = explicit_domain_id r = self.post('/domains', body={'domain': ref}) self.assertValidDomainResponse(r, ref) r = self.get(f'/domains/{explicit_domain_id}') self.assertValidDomainResponse(r) self.assertIsNotNone(r.result['domain']) def test_create_second_domain_valid_explicit_id_fails(self): """Call ``POST /domains`` with a valid `explicit_domain_id` set.""" ref = unit.new_domain_ref() explicit_domain_id = '9aea63518f0040c6b4518d8d2242911c' ref['explicit_domain_id'] = explicit_domain_id r = self.post('/domains', body={'domain': ref}) self.assertValidDomainResponse(r, ref) # second one should fail r = self.post( '/domains', body={'domain': ref}, expected_status=http.client.CONFLICT, ) def test_create_domain_invalid_explicit_ids(self): """Call ``POST /domains`` with various invalid explicit_domain_ids.""" ref = unit.new_domain_ref() bad_ids = [ 'bad!', '', '9aea63518f0040c', '1234567890123456789012345678901234567890', '9aea63518f0040c6b4518d8d2242911c9aea63518f0040c6b45', ] for explicit_domain_id in bad_ids: ref['explicit_domain_id'] = explicit_domain_id self.post( '/domains', body={'domain': {}}, expected_status=http.client.BAD_REQUEST, ) def test_list_head_domains(self): """Call ``GET & HEAD /domains``.""" resource_url = '/domains' r = self.get(resource_url) self.assertValidDomainListResponse( r, ref=self.domain, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) def test_list_limit_for_domains(self): for x in range(6): domain = {'domain': unit.new_domain_ref()} self.post('/domains', body=domain) for expected_length in range(1, 6): self.config_fixture.config( group='resource', list_limit=expected_length ) response = self.get('/domains') domain_list = response.json_body['domains'] self.assertEqual(expected_length, len(domain_list)) def test_get_head_domain(self): """Call ``GET /domains/{domain_id}``.""" resource_url = f'/domains/{self.domain_id}' r = self.get(resource_url) self.assertValidDomainResponse(r, self.domain) self.head(resource_url, expected_status=http.client.OK) def test_update_domain(self): """Call ``PATCH /domains/{domain_id}``.""" ref = unit.new_domain_ref() del ref['id'] r = self.patch( f'/domains/{self.domain_id}', body={'domain': ref}, ) self.assertValidDomainResponse(r, ref) def test_update_domain_unsafe(self): """Call ``POST /domains/{domain_id} with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config( group='resource', domain_name_url_safe='off' ) ref = unit.new_domain_ref(name=unsafe_name) del ref['id'] self.patch( f'/domains/{self.domain_id}', body={'domain': ref}, ) unsafe_name = 'i am still not / safe' for config_setting in ['new', 'strict']: self.config_fixture.config( group='resource', domain_name_url_safe=config_setting ) ref = unit.new_domain_ref(name=unsafe_name) del ref['id'] self.patch( f'/domains/{self.domain_id}', body={'domain': ref}, expected_status=http.client.BAD_REQUEST, ) def test_update_domain_unsafe_default(self): """Check default for unsafe names for ``POST /domains``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_domain_ref(name=unsafe_name) del ref['id'] self.patch( f'/domains/{self.domain_id}', body={'domain': ref}, ) def test_update_domain_updates_is_domain_project(self): """Check the project that acts as a domain is updated. Call ``PATCH /domains``. """ # Create a new domain domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) self.assertValidDomainResponse(r, domain_ref) # Disable it self.patch( '/domains/%s' % r.result['domain']['id'], body={'domain': {'enabled': False}}, ) # Retrieve its correspondent project r = self.get( '/projects/%(project_id)s' % {'project_id': r.result['domain']['id']} ) self.assertValidProjectResponse(r) # The created project is disabled as well self.assertFalse(r.result['project']['enabled']) def test_disable_domain(self): """Call ``PATCH /domains/{domain_id}`` (set enabled=False).""" # Create a 2nd set of entities in a 2nd domain domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) project2 = unit.new_project_ref(domain_id=domain2['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=domain2['id'], project_id=project2['id'], ) role_member = unit.new_role_ref() PROVIDERS.role_api.create_role(role_member['id'], role_member) PROVIDERS.assignment_api.add_role_to_user_and_project( user2['id'], project2['id'], role_member['id'] ) # First check a user in that domain can authenticate.. auth_data = self.build_authentication_request( user_id=user2['id'], password=user2['password'], project_id=project2['id'], ) self.v3_create_token(auth_data) # Now disable the domain domain2['enabled'] = False r = self.patch( '/domains/{domain_id}'.format(domain_id=domain2['id']), body={'domain': {'enabled': False}}, ) self.assertValidDomainResponse(r, domain2) # Try looking up in v3 by name and id auth_data = self.build_authentication_request( user_id=user2['id'], password=user2['password'], project_id=project2['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) auth_data = self.build_authentication_request( username=user2['name'], user_domain_id=domain2['id'], password=user2['password'], project_id=project2['id'], ) self.v3_create_token( auth_data, expected_status=http.client.UNAUTHORIZED ) def test_delete_enabled_domain_fails(self): """Call ``DELETE /domains/{domain_id}`` (when domain enabled).""" # Try deleting an enabled domain, which should fail self.delete( '/domains/{domain_id}'.format(domain_id=self.domain['id']), expected_status=exception.ForbiddenAction.code, ) def test_delete_domain(self): """Call ``DELETE /domains/{domain_id}``. The sample data set up already has a user and project that is part of self.domain. Additionally we will create a group and a credential within it. Since we will authenticate in this domain, we create another set of entities in a second domain. Deleting this second domain should delete all these new entities. In addition, all the entities in the regular self.domain should be unaffected by the delete. Test Plan: - Create domain2 and a 2nd set of entities - Disable domain2 - Delete domain2 - Check entities in domain2 have been deleted - Check entities in self.domain are unaffected """ # Create a group and a credential in the main domain group = unit.new_group_ref(domain_id=self.domain_id) group = PROVIDERS.identity_api.create_group(group) credential = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id ) PROVIDERS.credential_api.create_credential( credential['id'], credential ) # Create a 2nd set of entities in a 2nd domain domain2 = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain2['id'], domain2) project2 = unit.new_project_ref(domain_id=domain2['id']) project2 = PROVIDERS.resource_api.create_project( project2['id'], project2 ) user2 = unit.new_user_ref( domain_id=domain2['id'], project_id=project2['id'] ) user2 = PROVIDERS.identity_api.create_user(user2) group2 = unit.new_group_ref(domain_id=domain2['id']) group2 = PROVIDERS.identity_api.create_group(group2) credential2 = unit.new_credential_ref( user_id=user2['id'], project_id=project2['id'] ) PROVIDERS.credential_api.create_credential( credential2['id'], credential2 ) # Now disable the new domain and delete it domain2['enabled'] = False r = self.patch( '/domains/{domain_id}'.format(domain_id=domain2['id']), body={'domain': {'enabled': False}}, ) self.assertValidDomainResponse(r, domain2) self.delete('/domains/{domain_id}'.format(domain_id=domain2['id'])) # Check all the domain2 relevant entities are gone self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, domain2['id'], ) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, project2['id'], ) self.assertRaises( exception.GroupNotFound, PROVIDERS.identity_api.get_group, group2['id'], ) self.assertRaises( exception.UserNotFound, PROVIDERS.identity_api.get_user, user2['id'], ) self.assertRaises( exception.CredentialNotFound, PROVIDERS.credential_api.get_credential, credential2['id'], ) # ...and that all self.domain entities are still here r = PROVIDERS.resource_api.get_domain(self.domain['id']) self.assertDictEqual(self.domain, r) r = PROVIDERS.resource_api.get_project(self.project['id']) self.assertDictEqual(self.project, r) r = PROVIDERS.identity_api.get_group(group['id']) self.assertDictEqual(group, r) r = PROVIDERS.identity_api.get_user(self.user['id']) self.user.pop('password') self.assertDictEqual(self.user, r) r = PROVIDERS.credential_api.get_credential(credential['id']) self.assertDictEqual(credential, r) def test_delete_domain_with_idp(self): # Create a new domain domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) self.assertValidDomainResponse(r, domain_ref) domain_id = r.result['domain']['id'] # Create a Idp in the domain self.put( '/OS-FEDERATION/identity_providers/test_idp', body={"identity_provider": {"domain_id": domain_id}}, expected_status=http.client.CREATED, ) # Disable and delete the domain with no error. self.patch( f'/domains/{domain_id}', body={'domain': {'enabled': False}}, ) self.delete('/domains/%s' % domain_id) # The Idp is deleted as well self.get( '/OS-FEDERATION/identity_providers/test_idp', expected_status=http.client.NOT_FOUND, ) def test_delete_domain_deletes_is_domain_project(self): """Check the project that acts as a domain is deleted. Call ``DELETE /domains``. """ # Create a new domain domain_ref = unit.new_domain_ref() r = self.post('/domains', body={'domain': domain_ref}) self.assertValidDomainResponse(r, domain_ref) # Retrieve its correspondent project self.get( '/projects/%(project_id)s' % {'project_id': r.result['domain']['id']} ) # Delete the domain self.patch( '/domains/%s' % r.result['domain']['id'], body={'domain': {'enabled': False}}, ) self.delete('/domains/%s' % r.result['domain']['id']) # The created project is deleted as well self.get( '/projects/%(project_id)s' % {'project_id': r.result['domain']['id']}, expected_status=404, ) def test_delete_default_domain(self): # Need to disable it first. self.patch( '/domains/%(domain_id)s' % {'domain_id': CONF.identity.default_domain_id}, body={'domain': {'enabled': False}}, ) self.delete( '/domains/%(domain_id)s' % {'domain_id': CONF.identity.default_domain_id} ) def test_token_revoked_once_domain_disabled(self): """Test token from a disabled domain has been invalidated. Test that a token that was valid for an enabled domain becomes invalid once that domain is disabled. """ domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) user2 = unit.create_user( PROVIDERS.identity_api, domain_id=domain['id'] ) # build a request body auth_body = self.build_authentication_request( user_id=user2['id'], password=user2['password'] ) # sends a request for the user's token token_resp = self.post('/auth/tokens', body=auth_body) subject_token = token_resp.headers.get('x-subject-token') # validates the returned token and it should be valid. self.head( '/auth/tokens', headers={'x-subject-token': subject_token}, expected_status=http.client.OK, ) # now disable the domain domain['enabled'] = False url = "/domains/{domain_id}".format(domain_id=domain['id']) self.patch(url, body={'domain': {'enabled': False}}) # validates the same token again and it should be 'not found' # as the domain has already been disabled. self.head( '/auth/tokens', headers={'x-subject-token': subject_token}, expected_status=http.client.NOT_FOUND, ) def test_delete_domain_hierarchy(self): """Call ``DELETE /domains/{domain_id}``.""" domain = unit.new_domain_ref() PROVIDERS.resource_api.create_domain(domain['id'], domain) root_project = unit.new_project_ref(domain_id=domain['id']) root_project = PROVIDERS.resource_api.create_project( root_project['id'], root_project ) leaf_project = unit.new_project_ref( domain_id=domain['id'], parent_id=root_project['id'] ) PROVIDERS.resource_api.create_project(leaf_project['id'], leaf_project) # Need to disable it first. self.patch( '/domains/{domain_id}'.format(domain_id=domain['id']), body={'domain': {'enabled': False}}, ) self.delete('/domains/{domain_id}'.format(domain_id=domain['id'])) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.get_domain, domain['id'], ) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, root_project['id'], ) self.assertRaises( exception.ProjectNotFound, PROVIDERS.resource_api.get_project, leaf_project['id'], ) def test_forbid_operations_on_federated_domain(self): """Make sure one cannot operate on federated domain. This includes operations like create, update, delete on domain identified by id and name where difference variations of id 'Federated' are used. """ def create_domains(): for variation in ( 'Federated', 'FEDERATED', 'federated', 'fEderated', ): domain = unit.new_domain_ref() domain['id'] = variation yield domain for domain in create_domains(): self.assertRaises( AssertionError, PROVIDERS.resource_api.create_domain, domain['id'], domain, ) self.assertRaises( AssertionError, PROVIDERS.resource_api.update_domain, domain['id'], domain, ) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.delete_domain, domain['id'], ) # swap 'name' with 'id' and try again, expecting the request to # gracefully fail domain['id'], domain['name'] = domain['name'], domain['id'] self.assertRaises( AssertionError, PROVIDERS.resource_api.create_domain, domain['id'], domain, ) self.assertRaises( AssertionError, PROVIDERS.resource_api.update_domain, domain['id'], domain, ) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.delete_domain, domain['id'], ) def test_forbid_operations_on_defined_federated_domain(self): """Make sure one cannot operate on a user-defined federated domain. This includes operations like create, update, delete. """ non_default_name = 'beta_federated_domain' self.config_fixture.config( group='federation', federated_domain_name=non_default_name ) domain = unit.new_domain_ref(name=non_default_name) self.assertRaises( AssertionError, PROVIDERS.resource_api.create_domain, domain['id'], domain, ) self.assertRaises( exception.DomainNotFound, PROVIDERS.resource_api.delete_domain, domain['id'], ) self.assertRaises( AssertionError, PROVIDERS.resource_api.update_domain, domain['id'], domain, ) # Project CRUD tests def test_list_head_projects(self): """Call ``GET & HEAD /projects``.""" resource_url = '/projects' r = self.get(resource_url) self.assertValidProjectListResponse( r, ref=self.project, resource_url=resource_url ) self.head(resource_url, expected_status=http.client.OK) def test_create_project(self): """Call ``POST /projects``.""" ref = unit.new_project_ref(domain_id=self.domain_id) r = self.post('/projects', body={'project': ref}) self.assertValidProjectResponse(r, ref) def test_create_project_bad_request(self): """Call ``POST /projects``.""" self.post( '/projects', body={'project': {}}, expected_status=http.client.BAD_REQUEST, ) def test_create_project_invalid_domain_id(self): """Call ``POST /projects``.""" ref = unit.new_project_ref(domain_id=uuid.uuid4().hex) self.post( '/projects', body={'project': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_project_unsafe(self): """Call ``POST /projects with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config( group='resource', project_name_url_safe='off' ) ref = unit.new_project_ref(name=unsafe_name) self.post('/projects', body={'project': ref}) for config_setting in ['new', 'strict']: self.config_fixture.config( group='resource', project_name_url_safe=config_setting ) ref = unit.new_project_ref(name=unsafe_name) self.post( '/projects', body={'project': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_project_unsafe_default(self): """Check default for unsafe names for ``POST /projects``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_project_ref(name=unsafe_name) self.post('/projects', body={'project': ref}) def test_create_project_with_parent_id_none_and_domain_id_none(self): """Call ``POST /projects``.""" # Grant a domain role for the user collection_url = '/domains/{domain_id}/users/{user_id}/roles'.format( domain_id=self.domain_id, user_id=self.user['id'], ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url) # Create an authentication request for a domain scoped token auth = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id, ) # Without parent_id and domain_id passed as None, the domain_id should # be normalized to the domain on the token, when using a domain # scoped token. ref = unit.new_project_ref() r = self.post('/projects', auth=auth, body={'project': ref}) ref['domain_id'] = self.domain['id'] self.assertValidProjectResponse(r, ref) def test_create_project_without_parent_id_and_without_domain_id(self): """Call ``POST /projects``.""" # Grant a domain role for the user collection_url = '/domains/{domain_id}/users/{user_id}/roles'.format( domain_id=self.domain_id, user_id=self.user['id'], ) member_url = '{collection_url}/{role_id}'.format( collection_url=collection_url, role_id=self.role_id, ) self.put(member_url) # Create an authentication request for a domain scoped token auth = self.build_authentication_request( user_id=self.user['id'], password=self.user['password'], domain_id=self.domain_id, ) # Without domain_id and parent_id, the domain_id should be # normalized to the domain on the token, when using a domain # scoped token. ref = unit.new_project_ref() r = self.post('/projects', auth=auth, body={'project': ref}) ref['domain_id'] = self.domain['id'] self.assertValidProjectResponse(r, ref) @test_utils.wip('waiting for support for parent_id to imply domain_id') def test_create_project_with_parent_id_and_no_domain_id(self): """Call ``POST /projects``.""" # With only the parent_id, the domain_id should be # normalized to the parent's domain_id ref_child = unit.new_project_ref(parent_id=self.project['id']) r = self.post('/projects', body={'project': ref_child}) self.assertEqual( self.project['domain_id'], r.result['project']['domain_id'] ) ref_child['domain_id'] = self.domain['id'] self.assertValidProjectResponse(r, ref_child) def _create_projects_hierarchy(self, hierarchy_size=1): """Create a single-branched project hierarchy with the specified size. :param hierarchy_size: the desired hierarchy size, default is 1 - a project with one child. :returns projects: a list of the projects in the created hierarchy. """ new_ref = unit.new_project_ref(domain_id=self.domain_id) resp = self.post('/projects', body={'project': new_ref}) projects = [resp.result] for i in range(hierarchy_size): new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[i]['project']['id'], ) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) return projects def _create_project_and_tags(self, num_of_tags=1): """Create a project and a number of tags attached to that project. :param num_of_tags: the desired number of tags created with a specified project. :returns: A tuple containing a new project and a list of random tags """ tags = [uuid.uuid4().hex for i in range(num_of_tags)] ref = unit.new_project_ref(domain_id=self.domain_id, tags=tags) resp = self.post('/projects', body={'project': ref}) return resp.result['project'], tags def test_list_project_response_returns_tags(self): """Call ``GET /projects`` should always return tag attributes.""" tagged_project, tags = self._create_project_and_tags() self.get('/projects') ref = unit.new_project_ref(domain_id=self.domain_id) untagged_project = self.post( '/projects', body={'project': ref} ).json_body['project'] resp = self.get('/projects') for project in resp.json_body['projects']: if project['id'] == tagged_project['id']: self.assertIsNotNone(project['tags']) self.assertEqual(project['tags'], tags) if project['id'] == untagged_project['id']: self.assertEqual(project['tags'], []) def test_list_projects_filtering_by_tags(self): """Call ``GET /projects?tags={tags}``.""" project, tags = self._create_project_and_tags(num_of_tags=2) tag_string = ','.join(tags) resp = self.get(f'/projects?tags={tag_string}') self.assertValidProjectListResponse(resp) self.assertEqual(project['id'], resp.result['projects'][0]['id']) def test_list_projects_filtering_by_tags_any(self): """Call ``GET /projects?tags-any={tags}``.""" project, tags = self._create_project_and_tags(num_of_tags=2) project1, tags1 = self._create_project_and_tags(num_of_tags=2) tag_string = tags[0] + ',' + tags1[0] resp = self.get(f'/projects?tags-any={tag_string}') pids = [p['id'] for p in resp.result['projects']] self.assertValidProjectListResponse(resp) self.assertIn(project['id'], pids) self.assertIn(project1['id'], pids) def test_list_projects_filtering_by_not_tags(self): """Call ``GET /projects?not-tags={tags}``.""" project1, tags1 = self._create_project_and_tags(num_of_tags=2) project2, tags2 = self._create_project_and_tags(num_of_tags=2) tag_string = ','.join(tags1) resp = self.get(f'/projects?not-tags={tag_string}') self.assertValidProjectListResponse(resp) pids = [p['id'] for p in resp.result['projects']] self.assertNotIn(project1['id'], pids) self.assertIn(project2['id'], pids) def test_list_projects_filtering_by_not_tags_any(self): """Call ``GET /projects?not-tags-any={tags}``.""" project1, tags1 = self._create_project_and_tags(num_of_tags=2) project2, tags2 = self._create_project_and_tags(num_of_tags=2) project3, tags3 = self._create_project_and_tags(num_of_tags=2) tag_string = tags1[0] + ',' + tags2[0] resp = self.get(f'/projects?not-tags-any={tag_string}') self.assertValidProjectListResponse(resp) pids = [p['id'] for p in resp.result['projects']] self.assertNotIn(project1['id'], pids) self.assertNotIn(project2['id'], pids) self.assertIn(project3['id'], pids) def test_list_projects_filtering_multiple_tag_filters(self): """Call ``GET /projects?tags={tags}&tags-any={tags}``.""" project1, tags1 = self._create_project_and_tags(num_of_tags=2) project2, tags2 = self._create_project_and_tags(num_of_tags=2) project3, tags3 = self._create_project_and_tags(num_of_tags=2) tags1_query = ','.join(tags1) resp = self.patch( '/projects/{project_id}'.format(project_id=project3['id']), body={'project': {'tags': tags1}}, ) tags1.append(tags2[0]) resp = self.patch( '/projects/{project_id}'.format(project_id=project1['id']), body={'project': {'tags': tags1}}, ) url = '/projects?tags=%(value1)s&tags-any=%(value2)s' resp = self.get( url % {'value1': tags1_query, 'value2': ','.join(tags2)} ) self.assertValidProjectListResponse(resp) self.assertEqual(len(resp.result['projects']), 1) self.assertIn(project1['id'], resp.result['projects'][0]['id']) def test_list_projects_filtering_multiple_any_tag_filters(self): """Call ``GET /projects?tags-any={tags}¬-tags-any={tags}``.""" project1, tags1 = self._create_project_and_tags() project2, tags2 = self._create_project_and_tags(num_of_tags=2) url = '/projects?tags-any=%(value1)s¬-tags-any=%(value2)s' resp = self.get(url % {'value1': tags1[0], 'value2': tags2[0]}) self.assertValidProjectListResponse(resp) pids = [p['id'] for p in resp.result['projects']] self.assertIn(project1['id'], pids) self.assertNotIn(project2['id'], pids) def test_list_projects_filtering_conflict_tag_filters(self): """Call ``GET /projects?tags={tags}¬-tags={tags}``.""" project, tags = self._create_project_and_tags(num_of_tags=2) tag_string = ','.join(tags) url = '/projects?tags=%(values)s¬-tags=%(values)s' resp = self.get(url % {'values': tag_string}) self.assertValidProjectListResponse(resp) self.assertEqual(len(resp.result['projects']), 0) def test_list_projects_filtering_conflict_any_tag_filters(self): """Call ``GET /projects?tags-any={tags}¬-tags-any={tags}``.""" project, tags = self._create_project_and_tags(num_of_tags=2) tag_string = ','.join(tags) url = '/projects?tags-any=%(values)s¬-tags-any=%(values)s' resp = self.get(url % {'values': tag_string}) self.assertValidProjectListResponse(resp) self.assertEqual(len(resp.result['projects']), 0) def test_list_projects_by_tags_and_name(self): """Call ``GET /projects?tags-any={tags}&name={name}``.""" project, tags = self._create_project_and_tags(num_of_tags=2) ref = {'project': {'name': 'tags and name'}} resp = self.patch( '/projects/{project_id}'.format(project_id=project['id']), body=ref, ) url = '/projects?tags-any=%(values)s&name=%(name)s' resp = self.get(url % {'values': tags[0], 'name': 'tags and name'}) self.assertValidProjectListResponse(resp) pids = [p['id'] for p in resp.result['projects']] self.assertIn(project['id'], pids) resp = self.get(url % {'values': tags[0], 'name': 'foo'}) self.assertValidProjectListResponse(resp) self.assertEqual(len(resp.result['projects']), 0) def test_list_projects_filtering_by_parent_id(self): """Call ``GET /projects?parent_id={project_id}``.""" projects = self._create_projects_hierarchy(hierarchy_size=2) # Add another child to projects[1] - it will be projects[3] new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[1]['project']['id'] ) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) # Query for projects[0] immediate children - it will # be only projects[1] r = self.get( '/projects?parent_id=%(project_id)s' % {'project_id': projects[0]['project']['id']} ) self.assertValidProjectListResponse(r) projects_result = r.result['projects'] expected_list = [projects[1]['project']] # projects[0] has projects[1] as child self.assertEqual(expected_list, projects_result) # Query for projects[1] immediate children - it will # be projects[2] and projects[3] r = self.get( '/projects?parent_id=%(project_id)s' % {'project_id': projects[1]['project']['id']} ) self.assertValidProjectListResponse(r) projects_result = r.result['projects'] expected_list = [projects[2]['project'], projects[3]['project']] # projects[1] has projects[2] and projects[3] as children self.assertEqual(expected_list, projects_result) # Query for projects[2] immediate children - it will be an empty list r = self.get( '/projects?parent_id=%(project_id)s' % {'project_id': projects[2]['project']['id']} ) self.assertValidProjectListResponse(r) projects_result = r.result['projects'] expected_list = [] # projects[2] has no child, projects_result must be an empty list self.assertEqual(expected_list, projects_result) def test_create_hierarchical_project(self): """Call ``POST /projects``.""" self._create_projects_hierarchy() def test_get_head_project(self): """Call ``GET & HEAD /projects/{project_id}``.""" resource_url = '/projects/{project_id}'.format( project_id=self.project_id ) r = self.get(resource_url) self.assertValidProjectResponse(r, self.project) self.head(resource_url, expected_status=http.client.OK) def test_get_project_with_parents_as_list_with_invalid_id(self): """Call ``GET /projects/{project_id}?parents_as_list``.""" self.get( f'/projects/{None}?parents_as_list', expected_status=http.client.NOT_FOUND, ) self.get( '/projects/%(project_id)s?parents_as_list' % {'project_id': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_get_project_with_subtree_as_list_with_invalid_id(self): """Call ``GET /projects/{project_id}?subtree_as_list``.""" self.get( f'/projects/{None}?subtree_as_list', expected_status=http.client.NOT_FOUND, ) self.get( '/projects/%(project_id)s?subtree_as_list' % {'project_id': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_get_project_with_parents_as_ids(self): """Call ``GET /projects/{project_id}?parents_as_ids``.""" projects = self._create_projects_hierarchy(hierarchy_size=2) # Query for projects[2] parents_as_ids r = self.get( '/projects/%(project_id)s?parents_as_ids' % {'project_id': projects[2]['project']['id']} ) self.assertValidProjectResponse(r, projects[2]['project']) parents_as_ids = r.result['project']['parents'] # Assert parents_as_ids is a structured dictionary correctly # representing the hierarchy. The request was made using projects[2] # id, hence its parents should be projects[1], projects[0] and the # is_domain_project, which is the root of the hierarchy. It should # have the following structure: # { # projects[1]: { # projects[0]: { # is_domain_project: None # } # } # } is_domain_project_id = projects[0]['project']['domain_id'] expected_dict = { projects[1]['project']['id']: { projects[0]['project']['id']: {is_domain_project_id: None} } } self.assertDictEqual(expected_dict, parents_as_ids) # Query for projects[0] parents_as_ids r = self.get( '/projects/%(project_id)s?parents_as_ids' % {'project_id': projects[0]['project']['id']} ) self.assertValidProjectResponse(r, projects[0]['project']) parents_as_ids = r.result['project']['parents'] # projects[0] has only the project that acts as a domain as parent expected_dict = {is_domain_project_id: None} self.assertDictEqual(expected_dict, parents_as_ids) # Query for is_domain_project parents_as_ids r = self.get( '/projects/%(project_id)s?parents_as_ids' % {'project_id': is_domain_project_id} ) parents_as_ids = r.result['project']['parents'] # the project that acts as a domain has no parents, parents_as_ids # must be None self.assertIsNone(parents_as_ids) def test_get_project_with_parents_as_list_with_full_access(self): """``GET /projects/{project_id}?parents_as_list`` with full access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on each one of those projects; - Check that calling parents_as_list on 'subproject' returns both 'project' and 'parent'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on all the created projects for proj in (parent, project, subproject): self.put( self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'], ) ) # Make the API call r = self.get( '/projects/%(project_id)s?parents_as_list' % {'project_id': subproject['project']['id']} ) self.assertValidProjectResponse(r, subproject['project']) # Assert only 'project' and 'parent' are in the parents list self.assertIn(project, r.result['project']['parents']) self.assertIn(parent, r.result['project']['parents']) self.assertEqual(2, len(r.result['project']['parents'])) def test_get_project_with_parents_as_list_with_partial_access(self): """``GET /projects/{project_id}?parents_as_list`` with partial access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on 'parent' and 'subproject'; - Check that calling parents_as_list on 'subproject' only returns 'parent'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on parent and subproject for proj in (parent, subproject): self.put( self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'], ) ) # Make the API call r = self.get( '/projects/%(project_id)s?parents_as_list' % {'project_id': subproject['project']['id']} ) self.assertValidProjectResponse(r, subproject['project']) # Assert only 'parent' is in the parents list self.assertIn(parent, r.result['project']['parents']) self.assertEqual(1, len(r.result['project']['parents'])) def test_get_project_with_parents_as_list_and_parents_as_ids(self): """Attempt to list a project's parents as both a list and as IDs. This uses ``GET /projects/{project_id}?parents_as_list&parents_as_ids`` which should fail with a Bad Request due to the conflicting query strings. """ projects = self._create_projects_hierarchy(hierarchy_size=2) self.get( '/projects/%(project_id)s?parents_as_list&parents_as_ids' % {'project_id': projects[1]['project']['id']}, expected_status=http.client.BAD_REQUEST, ) def test_get_project_with_include_limits(self): PROVIDERS.assignment_api.create_system_grant_for_user( self.user_id, self.role_id ) system_admin_token = self.get_system_scoped_token() parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on all the created projects for proj in (parent, project, subproject): self.put( self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'], ) ) # create a registered limit and three limits for each project. reg_limit = unit.new_registered_limit_ref( service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) self.post( '/registered_limits', body={'registered_limits': [reg_limit]}, token=system_admin_token, expected_status=http.client.CREATED, ) limit1 = unit.new_limit_ref( project_id=parent['project']['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) limit2 = unit.new_limit_ref( project_id=project['project']['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) limit3 = unit.new_limit_ref( project_id=subproject['project']['id'], service_id=self.service_id, region_id=self.region_id, resource_name='volume', ) self.post( '/limits', body={'limits': [limit1, limit2, limit3]}, token=system_admin_token, expected_status=http.client.CREATED, ) # "include_limits" should work together with "parents_as_list" or # "subtree_as_list". Only using "include_limits" really does nothing. r = self.get( '/projects/%(project_id)s?include_limits' % {'project_id': subproject['project']['id']} ) self.assertNotIn('parents', r.result['project']) self.assertNotIn('subtree', r.result['project']) self.assertNotIn('limits', r.result['project']) # using "include_limits" with "parents_as_list" r = self.get( '/projects/%(project_id)s?include_limits&parents_as_list' % {'project_id': subproject['project']['id']} ) self.assertEqual(2, len(r.result['project']['parents'])) for parent in r.result['project']['parents']: self.assertEqual(1, len(parent['project']['limits'])) self.assertEqual( parent['project']['id'], parent['project']['limits'][0]['project_id'], ) self.assertEqual( 10, parent['project']['limits'][0]['resource_limit'] ) # using "include_limits" with "subtree_as_list" r = self.get( '/projects/%(project_id)s?include_limits&subtree_as_list' % {'project_id': parent['project']['id']} ) self.assertEqual(2, len(r.result['project']['subtree'])) for child in r.result['project']['subtree']: self.assertEqual(1, len(child['project']['limits'])) self.assertEqual( child['project']['id'], child['project']['limits'][0]['project_id'], ) self.assertEqual( 10, child['project']['limits'][0]['resource_limit'] ) def test_list_project_is_domain_filter(self): """Call ``GET /projects?is_domain=True/False``.""" # Get the initial number of projects, both acting as a domain as well # as regular. r = self.get('/projects?is_domain=True', expected_status=200) initial_number_is_domain_true = len(r.result['projects']) r = self.get('/projects?is_domain=False', expected_status=200) initial_number_is_domain_false = len(r.result['projects']) # Add some more projects acting as domains new_is_domain_project = unit.new_project_ref(is_domain=True) new_is_domain_project = PROVIDERS.resource_api.create_project( new_is_domain_project['id'], new_is_domain_project ) new_is_domain_project2 = unit.new_project_ref(is_domain=True) new_is_domain_project2 = PROVIDERS.resource_api.create_project( new_is_domain_project2['id'], new_is_domain_project2 ) number_is_domain_true = initial_number_is_domain_true + 2 r = self.get('/projects?is_domain=True', expected_status=200) self.assertThat( r.result['projects'], matchers.HasLength(number_is_domain_true) ) self.assertIn( new_is_domain_project['id'], [p['id'] for p in r.result['projects']], ) self.assertIn( new_is_domain_project2['id'], [p['id'] for p in r.result['projects']], ) # Now add a regular project new_regular_project = unit.new_project_ref(domain_id=self.domain_id) new_regular_project = PROVIDERS.resource_api.create_project( new_regular_project['id'], new_regular_project ) number_is_domain_false = initial_number_is_domain_false + 1 # Check we still have the same number of projects acting as domains r = self.get('/projects?is_domain=True', expected_status=200) self.assertThat( r.result['projects'], matchers.HasLength(number_is_domain_true) ) # Check the number of regular projects is correct r = self.get('/projects?is_domain=False', expected_status=200) self.assertThat( r.result['projects'], matchers.HasLength(number_is_domain_false) ) self.assertIn( new_regular_project['id'], [p['id'] for p in r.result['projects']] ) def test_list_project_is_domain_filter_default(self): """Default project list should not see projects acting as domains.""" # Get the initial count of regular projects r = self.get('/projects?is_domain=False', expected_status=200) number_is_domain_false = len(r.result['projects']) # Make sure we have at least one project acting as a domain new_is_domain_project = unit.new_project_ref(is_domain=True) new_is_domain_project = PROVIDERS.resource_api.create_project( new_is_domain_project['id'], new_is_domain_project ) r = self.get('/projects', expected_status=200) self.assertThat( r.result['projects'], matchers.HasLength(number_is_domain_false) ) self.assertNotIn(new_is_domain_project, r.result['projects']) def test_get_project_with_subtree_as_ids(self): """Call ``GET /projects/{project_id}?subtree_as_ids``. This test creates a more complex hierarchy to test if the structured dictionary returned by using the ``subtree_as_ids`` query param correctly represents the hierarchy. The hierarchy contains 5 projects with the following structure:: +--A--+ | | +--B--+ C | | D E """ projects = self._create_projects_hierarchy(hierarchy_size=2) # Add another child to projects[0] - it will be projects[3] new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[0]['project']['id'] ) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) # Add another child to projects[1] - it will be projects[4] new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[1]['project']['id'] ) resp = self.post('/projects', body={'project': new_ref}) self.assertValidProjectResponse(resp, new_ref) projects.append(resp.result) # Query for projects[0] subtree_as_ids r = self.get( '/projects/%(project_id)s?subtree_as_ids' % {'project_id': projects[0]['project']['id']} ) self.assertValidProjectResponse(r, projects[0]['project']) subtree_as_ids = r.result['project']['subtree'] # The subtree hierarchy from projects[0] should have the following # structure: # { # projects[1]: { # projects[2]: None, # projects[4]: None # }, # projects[3]: None # } expected_dict = { projects[1]['project']['id']: { projects[2]['project']['id']: None, projects[4]['project']['id']: None, }, projects[3]['project']['id']: None, } self.assertDictEqual(expected_dict, subtree_as_ids) # Now query for projects[1] subtree_as_ids r = self.get( '/projects/%(project_id)s?subtree_as_ids' % {'project_id': projects[1]['project']['id']} ) self.assertValidProjectResponse(r, projects[1]['project']) subtree_as_ids = r.result['project']['subtree'] # The subtree hierarchy from projects[1] should have the following # structure: # { # projects[2]: None, # projects[4]: None # } expected_dict = { projects[2]['project']['id']: None, projects[4]['project']['id']: None, } self.assertDictEqual(expected_dict, subtree_as_ids) # Now query for projects[3] subtree_as_ids r = self.get( '/projects/%(project_id)s?subtree_as_ids' % {'project_id': projects[3]['project']['id']} ) self.assertValidProjectResponse(r, projects[3]['project']) subtree_as_ids = r.result['project']['subtree'] # projects[3] has no subtree, subtree_as_ids must be None self.assertIsNone(subtree_as_ids) def test_get_project_with_subtree_as_list_with_full_access(self): """``GET /projects/{project_id}?subtree_as_list`` with full access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on each one of those projects; - Check that calling subtree_as_list on 'parent' returns both 'parent' and 'subproject'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on all the created projects for proj in (parent, project, subproject): self.put( self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'], ) ) # Make the API call r = self.get( '/projects/%(project_id)s?subtree_as_list' % {'project_id': parent['project']['id']} ) self.assertValidProjectResponse(r, parent['project']) # Assert only 'project' and 'subproject' are in the subtree self.assertIn(project, r.result['project']['subtree']) self.assertIn(subproject, r.result['project']['subtree']) self.assertEqual(2, len(r.result['project']['subtree'])) def test_get_project_with_subtree_as_list_with_partial_access(self): """``GET /projects/{project_id}?subtree_as_list`` with partial access. Test plan: - Create 'parent', 'project' and 'subproject' projects; - Assign a user a role on 'parent' and 'subproject'; - Check that calling subtree_as_list on 'parent' returns 'subproject'. """ # Create the project hierarchy parent, project, subproject = self._create_projects_hierarchy(2) # Assign a role for the user on parent and subproject for proj in (parent, subproject): self.put( self.build_role_assignment_link( role_id=self.role_id, user_id=self.user_id, project_id=proj['project']['id'], ) ) # Make the API call r = self.get( '/projects/%(project_id)s?subtree_as_list' % {'project_id': parent['project']['id']} ) self.assertValidProjectResponse(r, parent['project']) # Assert only 'subproject' is in the subtree self.assertIn(subproject, r.result['project']['subtree']) self.assertEqual(1, len(r.result['project']['subtree'])) def test_get_project_with_subtree_as_list_and_subtree_as_ids(self): """Attempt to get a project subtree as both a list and as IDs. This uses ``GET /projects/{project_id}?subtree_as_list&subtree_as_ids`` which should fail with a bad request due to the conflicting query strings. """ projects = self._create_projects_hierarchy(hierarchy_size=2) self.get( '/projects/%(project_id)s?subtree_as_list&subtree_as_ids' % {'project_id': projects[1]['project']['id']}, expected_status=http.client.BAD_REQUEST, ) def test_update_project(self): """Call ``PATCH /projects/{project_id}``.""" ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=self.project['parent_id'] ) del ref['id'] r = self.patch( f'/projects/{self.project_id}', body={'project': ref}, ) self.assertValidProjectResponse(r, ref) def test_update_project_unsafe(self): """Call ``POST /projects/{project_id} with unsafe names``.""" unsafe_name = 'i am not / safe' self.config_fixture.config( group='resource', project_name_url_safe='off' ) ref = unit.new_project_ref( name=unsafe_name, domain_id=self.domain_id, parent_id=self.project['parent_id'], ) del ref['id'] self.patch( f'/projects/{self.project_id}', body={'project': ref}, ) unsafe_name = 'i am still not / safe' for config_setting in ['new', 'strict']: self.config_fixture.config( group='resource', project_name_url_safe=config_setting ) ref = unit.new_project_ref( name=unsafe_name, domain_id=self.domain_id, parent_id=self.project['parent_id'], ) del ref['id'] self.patch( f'/projects/{self.project_id}', body={'project': ref}, expected_status=http.client.BAD_REQUEST, ) def test_update_project_unsafe_default(self): """Check default for unsafe names for ``POST /projects``.""" unsafe_name = 'i am not / safe' # By default, we should be able to create unsafe names ref = unit.new_project_ref( name=unsafe_name, domain_id=self.domain_id, parent_id=self.project['parent_id'], ) del ref['id'] self.patch( f'/projects/{self.project_id}', body={'project': ref}, ) def test_update_project_domain_id(self): """Call ``PATCH /projects/{project_id}`` with domain_id. A projects's `domain_id` is immutable. Ensure that any attempts to update the `domain_id` of a project fails. """ project = unit.new_project_ref(domain_id=self.domain['id']) project = PROVIDERS.resource_api.create_project(project['id'], project) project['domain_id'] = CONF.identity.default_domain_id self.patch( '/projects/{project_id}'.format(project_id=project['id']), body={'project': project}, expected_status=exception.ValidationError.code, ) def test_update_project_parent_id(self): """Call ``PATCH /projects/{project_id}``.""" projects = self._create_projects_hierarchy() leaf_project = projects[1]['project'] leaf_project['parent_id'] = None self.patch( '/projects/{project_id}'.format(project_id=leaf_project['id']), body={'project': leaf_project}, expected_status=http.client.FORBIDDEN, ) def test_update_project_is_domain_not_allowed(self): """Call ``PATCH /projects/{project_id}`` with is_domain. The is_domain flag is immutable. """ project = unit.new_project_ref(domain_id=self.domain['id']) resp = self.post('/projects', body={'project': project}) self.assertFalse(resp.result['project']['is_domain']) project['parent_id'] = resp.result['project']['parent_id'] project['is_domain'] = True self.patch( '/projects/%(project_id)s' % {'project_id': resp.result['project']['id']}, body={'project': project}, expected_status=http.client.BAD_REQUEST, ) def test_disable_leaf_project(self): """Call ``PATCH /projects/{project_id}``.""" projects = self._create_projects_hierarchy() leaf_project = projects[1]['project'] leaf_project['enabled'] = False r = self.patch( '/projects/{project_id}'.format(project_id=leaf_project['id']), body={'project': leaf_project}, ) self.assertEqual( leaf_project['enabled'], r.result['project']['enabled'] ) def test_disable_not_leaf_project(self): """Call ``PATCH /projects/{project_id}``.""" projects = self._create_projects_hierarchy() root_project = projects[0]['project'] root_project['enabled'] = False self.patch( '/projects/{project_id}'.format(project_id=root_project['id']), body={'project': root_project}, expected_status=http.client.FORBIDDEN, ) def test_delete_project(self): """Call ``DELETE /projects/{project_id}``. As well as making sure the delete succeeds, we ensure that any credentials that reference this projects are also deleted, while other credentials are unaffected. """ credential = unit.new_credential_ref( user_id=self.user['id'], project_id=self.project_id ) PROVIDERS.credential_api.create_credential( credential['id'], credential ) # First check the credential for this project is present r = PROVIDERS.credential_api.get_credential(credential['id']) self.assertDictEqual(credential, r) # Create a second credential with a different project project2 = unit.new_project_ref(domain_id=self.domain['id']) PROVIDERS.resource_api.create_project(project2['id'], project2) credential2 = unit.new_credential_ref( user_id=self.user['id'], project_id=project2['id'] ) PROVIDERS.credential_api.create_credential( credential2['id'], credential2 ) # Now delete the project self.delete(f'/projects/{self.project_id}') # Deleting the project should have deleted any credentials # that reference this project self.assertRaises( exception.CredentialNotFound, PROVIDERS.credential_api.get_credential, credential_id=credential['id'], ) # But the credential for project2 is unaffected r = PROVIDERS.credential_api.get_credential(credential2['id']) self.assertDictEqual(credential2, r) def test_delete_not_leaf_project(self): """Call ``DELETE /projects/{project_id}``.""" projects = self._create_projects_hierarchy() self.delete( '/projects/%(project_id)s' % {'project_id': projects[0]['project']['id']}, expected_status=http.client.FORBIDDEN, ) def test_create_project_with_tags(self): project, tags = self._create_project_and_tags(num_of_tags=10) ref = self.get( '/projects/{project_id}'.format(project_id=project['id']), expected_status=http.client.OK, ) self.assertIn('tags', ref.result['project']) for tag in tags: self.assertIn(tag, ref.result['project']['tags']) def test_update_project_with_tags(self): project, tags = self._create_project_and_tags(num_of_tags=9) tag = uuid.uuid4().hex project['tags'].append(tag) ref = self.patch( f'/projects/{self.project_id}', body={'project': {'tags': project['tags']}}, ) self.assertIn(tag, ref.result['project']['tags']) def test_create_project_tag(self): tag = uuid.uuid4().hex url = '/projects/%(project_id)s/tags/%(value)s' self.put( url % {'project_id': self.project_id, 'value': tag}, expected_status=http.client.CREATED, ) self.get( url % {'project_id': self.project_id, 'value': tag}, expected_status=http.client.NO_CONTENT, ) def test_create_project_tag_is_case_insensitive(self): case_tags = ['case', 'CASE'] for tag in case_tags: self.put( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': self.project_id, 'value': tag}, expected_status=http.client.CREATED, ) resp = self.get( f'/projects/{self.project_id}', expected_status=http.client.OK, ) for tag in case_tags: self.assertIn(tag, resp.result['project']['tags']) def test_get_single_project_tag(self): project, tags = self._create_project_and_tags() self.get( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': tags[0]}, expected_status=http.client.NO_CONTENT, ) self.head( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': tags[0]}, expected_status=http.client.NO_CONTENT, ) def test_get_project_tag_that_does_not_exist(self): project, _ = self._create_project_and_tags() self.get( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_delete_project_tag(self): project, tags = self._create_project_and_tags() self.delete( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': tags[0]}, expected_status=http.client.NO_CONTENT, ) self.get( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': self.project_id, 'value': tags[0]}, expected_status=http.client.NOT_FOUND, ) def test_delete_project_tags(self): project, tags = self._create_project_and_tags(num_of_tags=5) self.delete( '/projects/{project_id}/tags/'.format(project_id=project['id']), expected_status=http.client.NO_CONTENT, ) self.get( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': self.project_id, 'value': tags[0]}, expected_status=http.client.NOT_FOUND, ) resp = self.get( f'/projects/{self.project_id}/tags/', expected_status=http.client.OK, ) self.assertEqual(len(resp.result['tags']), 0) def test_create_project_tag_invalid_project_id(self): self.put( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': uuid.uuid4().hex, 'value': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_create_project_tag_unsafe_name(self): tag = uuid.uuid4().hex + ',' self.put( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': self.project_id, 'value': tag}, expected_status=http.client.BAD_REQUEST, ) def test_create_project_tag_already_exists(self): project, tags = self._create_project_and_tags() self.put( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': tags[0]}, expected_status=http.client.BAD_REQUEST, ) def test_create_project_tag_over_tag_limit(self): project, _ = self._create_project_and_tags(num_of_tags=80) self.put( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': uuid.uuid4().hex}, expected_status=http.client.BAD_REQUEST, ) def test_create_project_tag_name_over_character_limit(self): tag = 'a' * 256 self.put( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': self.project_id, 'value': tag}, expected_status=http.client.BAD_REQUEST, ) def test_delete_tag_invalid_project_id(self): self.delete( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': uuid.uuid4().hex, 'value': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_delete_project_tag_not_found(self): self.delete( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': self.project_id, 'value': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_list_project_tags(self): project, tags = self._create_project_and_tags(num_of_tags=5) resp = self.get( '/projects/{project_id}/tags'.format(project_id=project['id']), expected_status=http.client.OK, ) for tag in tags: self.assertIn(tag, resp.result['tags']) def test_check_if_project_tag_exists(self): project, tags = self._create_project_and_tags(num_of_tags=5) self.head( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': tags[0]}, expected_status=http.client.NO_CONTENT, ) def test_list_project_tags_for_project_with_no_tags(self): resp = self.get( f'/projects/{self.project_id}/tags', expected_status=http.client.OK, ) self.assertEqual([], resp.result['tags']) def test_check_project_with_no_tags(self): self.head( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': self.project_id, 'value': uuid.uuid4().hex}, expected_status=http.client.NOT_FOUND, ) def test_update_project_tags(self): project, tags = self._create_project_and_tags(num_of_tags=5) resp = self.put( '/projects/{project_id}/tags'.format(project_id=project['id']), body={'tags': tags}, expected_status=http.client.OK, ) self.assertIn(tags[1], resp.result['tags']) def test_update_project_tags_removes_previous_tags(self): tag = uuid.uuid4().hex project, tags = self._create_project_and_tags(num_of_tags=5) self.put( '/projects/%(project_id)s/tags/%(value)s' % {'project_id': project['id'], 'value': tag}, expected_status=http.client.CREATED, ) resp = self.put( '/projects/{project_id}/tags'.format(project_id=project['id']), body={'tags': tags}, expected_status=http.client.OK, ) self.assertNotIn(tag, resp.result['tags']) self.assertIn(tags[1], resp.result['tags']) def test_update_project_tags_unsafe_names(self): project, tags = self._create_project_and_tags(num_of_tags=5) invalid_chars = [',', '/'] for char in invalid_chars: tags[0] = uuid.uuid4().hex + char self.put( '/projects/%(project_id)s/tags' % {'project_id': project['id']}, body={'tags': tags}, expected_status=http.client.BAD_REQUEST, ) def test_update_project_tags_with_too_many_tags(self): project, _ = self._create_project_and_tags() tags = [uuid.uuid4().hex for i in range(81)] tags.append(uuid.uuid4().hex) self.put( '/projects/{project_id}/tags'.format(project_id=project['id']), body={'tags': tags}, expected_status=http.client.BAD_REQUEST, ) def test_list_projects_by_user_with_inherited_role(self): """Ensure the cache is invalidated when creating/deleting a project.""" domain_ref = unit.new_domain_ref() resp = self.post('/domains', body={'domain': domain_ref}) domain = resp.result['domain'] user_ref = unit.new_user_ref(domain_id=self.domain_id) resp = self.post('/users', body={'user': user_ref}) user = resp.result['user'] role_ref = unit.new_role_ref() resp = self.post('/roles', body={'role': role_ref}) role = resp.result['role'] self.put( '/OS-INHERIT/domains/%(domain_id)s/users/%(user_id)s/roles/' '%(role_id)s/inherited_to_projects' % { 'domain_id': domain['id'], 'user_id': user['id'], 'role_id': role['id'], } ) resp = self.get('/users/{user}/projects'.format(user=user['id'])) self.assertValidProjectListResponse(resp) self.assertEqual([], resp.result['projects']) project_ref = unit.new_project_ref(domain_id=domain['id']) resp = self.post('/projects', body={'project': project_ref}) project = resp.result['project'] resp = self.get('/users/{user}/projects'.format(user=user['id'])) self.assertValidProjectListResponse(resp) self.assertEqual(project['id'], resp.result['projects'][0]['id']) class StrictTwoLevelLimitsResourceTestCase(ResourceTestCase): def setUp(self): super().setUp() def config_overrides(self): super().config_overrides() self.config_fixture.config( group='unified_limit', enforcement_model='strict_two_level' ) def _create_projects_hierarchy(self, hierarchy_size=1): if hierarchy_size > 1: self.skip_test_overrides( "Strict two level limit enforcement model doesn't allow the" "project tree depth > 2" ) return super()._create_projects_hierarchy(hierarchy_size) def test_create_hierarchical_project(self): projects = self._create_projects_hierarchy() # create grandchild project will fail. new_ref = unit.new_project_ref( domain_id=self.domain_id, parent_id=projects[1]['project']['id'] ) self.post( '/projects', body={'project': new_ref}, expected_status=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_v3_trust.py0000664000175000017500000005771200000000000022663 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import http.client import uuid from oslo_utils import timeutils from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.tests import unit from keystone.tests.unit import test_v3 CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestTrustOperations(test_v3.RestfulTestCase): """Test module for create, read, update and delete operations on trusts. This module is specific to tests for trust CRUD operations. All other tests related to trusts that are authentication or authorization specific should live in the keystone/tests/unit/test_v3_auth.py module. """ def setUp(self): super().setUp() # create a trustee to delegate stuff to self.trustee_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) self.trustee_user_id = self.trustee_user['id'] def test_create_trust_bad_request(self): # The server returns a 403 Forbidden rather than a 400 Bad Request, see # bug 1133435 self.post( '/OS-TRUST/trusts', body={'trust': {}}, expected_status=http.client.FORBIDDEN, ) def test_create_trust_with_invalid_expiration_fails(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id], ) ref['expires_at'] = 'bad' self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) ref['expires_at'] = '' self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) ref['expires_at'] = 'Z' self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) def test_trusts_do_not_implement_updates(self): with self.test_client() as c: # create a new trust token = self.get_scoped_token() ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id], ) r = c.post( '/v3/OS-TRUST/trusts', json={'trust': ref}, headers={'X-Auth-Token': token}, ) trust_id = r.json['trust']['id'] c.patch( f'/v3/OS-TRUST/trusts/{trust_id}', json={'trust': ref}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) c.put( f'/v3/OS-TRUST/trusts/{trust_id}', json={'trust': ref}, headers={'X-Auth-Token': token}, expected_status_code=http.client.METHOD_NOT_ALLOWED, ) def test_trust_crud(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) # get the trust r = self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']) ) self.assertValidTrustResponse(r, ref) # validate roles on the trust r = self.get( '/OS-TRUST/trusts/{trust_id}/roles'.format(trust_id=trust['id']) ) roles = self.assertValidRoleListResponse(r, self.role) self.assertIn(self.role['id'], [x['id'] for x in roles]) self.head( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {'trust_id': trust['id'], 'role_id': self.role['id']}, expected_status=http.client.OK, ) r = self.get( '/OS-TRUST/trusts/%(trust_id)s/roles/%(role_id)s' % {'trust_id': trust['id'], 'role_id': self.role['id']} ) self.assertValidRoleResponse(r, self.role) # list all trusts r = self.get('/OS-TRUST/trusts') self.assertValidTrustListResponse(r, trust) # delete the trust self.delete('/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id'])) # ensure the trust is not found self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']), expected_status=http.client.NOT_FOUND, ) def test_list_trusts(self): # create three trusts with the same trustor and trustee ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) for i in range(3): ref['expires_at'] = ( timeutils.utcnow() .replace(year=2032) .strftime(unit.TIME_FORMAT) ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) self.assertValidTrustResponse(r, ref) # list all trusts list_url = '/OS-TRUST/trusts' r = self.get(list_url) self.head(list_url, expected_status=http.client.OK) trusts = r.result['trusts'] self.assertEqual(3, len(trusts)) self.assertValidTrustListResponse(r) # list all trusts for the trustor list_for_trustor_url = ( '/OS-TRUST/trusts?trustor_user_id=%s' % self.user_id ) r = self.get(list_for_trustor_url) self.head(list_for_trustor_url, expected_status=http.client.OK) trusts = r.result['trusts'] self.assertEqual(3, len(trusts)) self.assertValidTrustListResponse(r) # list all trusts for trustee as the trustor list_as_trustor_url = ( '/OS-TRUST/trusts?trustee_user_id=%s' % self.user_id ) r = self.get(list_as_trustor_url) self.head(list_as_trustor_url, expected_status=http.client.OK) trusts = r.result['trusts'] self.assertEqual(0, len(trusts)) # list all trusts as the trustee is forbidden # FIXME(dmendiza): This test is not written to do what the above # comment says it should be doing. The main issue is that it's # still using the trustor credentiasl to make the request. # list_all_as_trustee_url = ( # '/OS-TRUST/trusts?trustee_user_id=%s' % self.trustee_user_id # ) # r = self.get( # list_all_as_trustee_url, # expected_status=http.client.FORBIDDEN # ) # self.head( # list_all_as_trustee_url, # expected_status=http.client.FORBIDDEN # ) def test_create_trust_with_expiration_in_the_past_fails(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires='2010-06-04T08:44:31.999999Z', role_ids=[self.role_id], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) def test_delete_trust(self): # create a trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) # delete the trust self.delete('/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id'])) # ensure the trust isn't found self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']), expected_status=http.client.NOT_FOUND, ) def test_create_trust_without_trustee_returns_bad_request(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id], ) # trustee_user_id is required to create a trust del ref['trustee_user_id'] self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_trust_without_impersonation_returns_bad_request(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id], ) # impersonation is required to create a trust del ref['impersonation'] self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_trust_with_bad_remaining_uses_returns_bad_request(self): # negative numbers, strings, non-integers, and 0 are not value values for value in [-1, 0, "a bad value", 7.2]: ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, remaining_uses=value, role_ids=[self.role_id], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_trust_with_non_existant_trustee_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=uuid.uuid4().hex, project_id=self.project_id, role_ids=[self.role_id], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.NOT_FOUND, ) def test_create_trust_with_trustee_as_trustor_returns_forbidden(self): ref = unit.new_trust_ref( trustor_user_id=self.trustee_user_id, trustee_user_id=self.user_id, project_id=self.project_id, role_ids=[self.role_id], ) # NOTE(lbragstad): This fails because the user making the request isn't # the trustor defined in the request. self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.FORBIDDEN, ) def test_create_trust_with_non_existant_project_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=uuid.uuid4().hex, role_ids=[self.role_id], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.NOT_FOUND, ) def test_create_trust_with_non_existant_role_id_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[uuid.uuid4().hex], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.NOT_FOUND, ) def test_create_trust_with_extra_attributes_fails(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id], ) ref['roles'].append({'fake_key': 'fake_value'}) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) def test_create_trust_with_non_existant_role_name_returns_not_found(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_names=[uuid.uuid4().hex], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.NOT_FOUND, ) def test_create_trust_with_role_name_ambiguous_returns_bad_request(self): # Create second role with the same name role_ref = unit.new_role_ref( name=self.role['name'], domain_id=uuid.uuid4().hex ) self.post('/roles', body={'role': role_ref}) ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_names=[self.role['name']], ) self.post( '/OS-TRUST/trusts', body={'trust': ref}, expected_status=http.client.BAD_REQUEST, ) def test_exercise_trust_scoped_token_without_impersonation(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) # get a trust-scoped token as the trustee auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) resp = self.v3_create_token(auth_data) resp_body = resp.json_body['token'] self.assertValidProjectScopedTokenResponse(resp, self.trustee_user) self.assertEqual(self.trustee_user['id'], resp_body['user']['id']) self.assertEqual(self.trustee_user['name'], resp_body['user']['name']) self.assertEqual(self.domain['id'], resp_body['user']['domain']['id']) self.assertEqual( self.domain['name'], resp_body['user']['domain']['name'] ) self.assertEqual(self.project['id'], resp_body['project']['id']) self.assertEqual(self.project['name'], resp_body['project']['name']) def test_exercise_trust_scoped_token_with_impersonation(self): # create a new trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=True, expires=dict(minutes=1), role_ids=[self.role_id], ) resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) # get a trust-scoped token as the trustee auth_data = self.build_authentication_request( user_id=self.trustee_user['id'], password=self.trustee_user['password'], trust_id=trust['id'], ) resp = self.v3_create_token(auth_data) resp_body = resp.json_body['token'] self.assertValidProjectScopedTokenResponse(resp, self.user) self.assertEqual(self.user['id'], resp_body['user']['id']) self.assertEqual(self.user['name'], resp_body['user']['name']) self.assertEqual(self.domain['id'], resp_body['user']['domain']['id']) self.assertEqual( self.domain['name'], resp_body['user']['domain']['name'] ) self.assertEqual(self.project['id'], resp_body['project']['id']) self.assertEqual(self.project['name'], resp_body['project']['name']) def test_forbidden_trust_impersonation_in_redelegation(self): """Test forbiddance of impersonation in trust redelegation. Check that trustee not allowed to create a trust (with impersonation set to true) from a redelegated trust (with impersonation set to false) """ # create trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, role_ids=[self.role_id], allow_redelegation=True, ) resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) auth_data = self.build_authentication_request( user_id=self.trustee_user_id, password=self.trustee_user['password'], trust_id=trust['id'], ) resp = self.v3_create_token(auth_data) # create third-party user, which will be trustee in trust created from # redelegated trust third_party_trustee = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) third_party_trustee_id = third_party_trustee['id'] # create trust from redelegated trust ref = unit.new_trust_ref( trustor_user_id=self.trustee_user_id, trustee_user_id=third_party_trustee_id, project_id=self.project_id, impersonation=True, role_ids=[self.role_id], ) ref['redelegated_trust_id'] = trust['id'] self.admin_request( path='/v3/OS-TRUST/trusts', body={'trust': ref}, token=resp.headers.get('X-Subject-Token'), method='POST', expected_status=http.client.FORBIDDEN, ) def test_trust_deleted_when_user_deleted(self): # create trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, role_ids=[self.role_id], allow_redelegation=True, ) resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) # list all trusts r = self.get('/OS-TRUST/trusts') self.assertEqual(1, len(r.result['trusts'])) # delete the trustee will delete the trust self.delete( '/users/{user_id}'.format(user_id=trust['trustee_user_id']) ) self.get( '/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']), expected_status=http.client.NOT_FOUND, ) # create another user as the new trustee trustee_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) trustee_user_id = trustee_user['id'] # create the trust again ref['trustee_user_id'] = trustee_user_id resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) r = self.get('/OS-TRUST/trusts') self.assertEqual(1, len(r.result['trusts'])) # delete the trustor will delete the trust self.delete( '/users/{user_id}'.format(user_id=trust['trustor_user_id']) ) # call the backend method directly to bypass authentication since the # user has been deleted. self.assertRaises( exception.TrustNotFound, PROVIDERS.trust_api.get_trust, trust['id'] ) def test_trust_deleted_when_project_deleted(self): # create trust ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, role_ids=[self.role_id], allow_redelegation=True, ) resp = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(resp) # list all trusts r = self.get('/OS-TRUST/trusts') self.assertEqual(1, len(r.result['trusts'])) # delete the project will delete the trust. self.delete( '/projects/{project_id}'.format(project_id=trust['project_id']) ) # call the backend method directly to bypass authentication since the # user no longer has the assignment on the project. self.assertRaises( exception.TrustNotFound, PROVIDERS.trust_api.get_trust, trust['id'] ) class TrustsWithApplicationCredentials(test_v3.RestfulTestCase): def setUp(self): super().setUp() self.trustee_user = unit.create_user( PROVIDERS.identity_api, domain_id=self.domain_id ) self.trustee_user_id = self.trustee_user['id'] def config_overrides(self): super().config_overrides() self.config_fixture.config( group='auth', methods='password,application_credential' ) def test_create_trust_with_application_credential(self): app_cred = { 'id': uuid.uuid4().hex, 'user_id': self.user_id, 'project_id': self.project_id, 'name': uuid.uuid4().hex, 'roles': [{'id': self.role_id}], 'secret': uuid.uuid4().hex, } app_cred_api = PROVIDERS.application_credential_api app_cred_api.create_application_credential(app_cred) auth_data = self.build_authentication_request( app_cred_id=app_cred['id'], secret=app_cred['secret'] ) token_data = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) trust_body = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, role_ids=[self.role_id], ) self.post( path='/OS-TRUST/trusts', body={'trust': trust_body}, token=token_data.headers['x-subject-token'], expected_status=http.client.FORBIDDEN, ) def test_delete_trust_with_application_credential(self): ref = unit.new_trust_ref( trustor_user_id=self.user_id, trustee_user_id=self.trustee_user_id, project_id=self.project_id, impersonation=False, expires=dict(minutes=1), role_ids=[self.role_id], ) r = self.post('/OS-TRUST/trusts', body={'trust': ref}) trust = self.assertValidTrustResponse(r, ref) app_cred = { 'id': uuid.uuid4().hex, 'user_id': self.user_id, 'project_id': self.project_id, 'name': uuid.uuid4().hex, 'roles': [{'id': self.role_id}], 'secret': uuid.uuid4().hex, } app_cred_api = PROVIDERS.application_credential_api app_cred_api.create_application_credential(app_cred) auth_data = self.build_authentication_request( app_cred_id=app_cred['id'], secret=app_cred['secret'] ) token_data = self.v3_create_token( auth_data, expected_status=http.client.CREATED ) # delete the trust self.delete( path='/OS-TRUST/trusts/{trust_id}'.format(trust_id=trust['id']), token=token_data.headers['x-subject-token'], expected_status=http.client.FORBIDDEN, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_validation.py0000664000175000017500000040770400000000000023224 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import uuid from keystone.application_credential import schema as app_cred_schema from keystone.assignment import schema as assignment_schema from keystone.catalog import schema as catalog_schema from keystone.common import validation from keystone.common.validation import parameter_types from keystone.common.validation import validators from keystone.credential import schema as credential_schema from keystone import exception from keystone.federation import schema as federation_schema from keystone.identity.backends import resource_options as ro from keystone.identity import schema as identity_schema from keystone.limit import schema as limit_schema from keystone.oauth1 import schema as oauth1_schema from keystone.policy import schema as policy_schema from keystone.resource import schema as resource_schema from keystone.tests import unit from keystone.trust import schema as trust_schema """Example model to validate create requests against. Assume that this is the only backend for the create and validate schemas. This is just an example to show how a backend can be used to construct a schema. In Keystone, schemas are built according to the Identity API and the backends available in Keystone. This example does not mean that all schema in Keystone were strictly based on the SQL backends. class Entity(sql.ModelBase): __tablename__ = 'entity' attributes = ['id', 'name', 'domain_id', 'description'] id = sql.Column(sql.String(64), primary_key=True) name = sql.Column(sql.String(255), nullable=False) description = sql.Column(sql.Text(), nullable=True) enabled = sql.Column(sql.Boolean, default=True, nullable=False) url = sql.Column(sql.String(225), nullable=True) email = sql.Column(sql.String(64), nullable=True) """ # Test schema to validate create requests against _entity_properties = { 'name': parameter_types.name, 'description': validation.nullable(parameter_types.description), 'enabled': parameter_types.boolean, 'url': validation.nullable(parameter_types.url), 'email': validation.nullable(parameter_types.email), 'id_string': validation.nullable(parameter_types.id_string), } entity_create = { 'type': 'object', 'properties': _entity_properties, 'required': ['name'], 'additionalProperties': True, } entity_create_optional_body = { 'type': 'object', 'properties': _entity_properties, 'additionalProperties': True, } entity_update = { 'type': 'object', 'properties': _entity_properties, 'minProperties': 1, 'additionalProperties': True, } _VALID_ENABLED_FORMATS = [True, False] _INVALID_ENABLED_FORMATS = ['some string', 1, 0, 'True', 'False'] _INVALID_DESC_FORMATS = [False, 1, 2.0] _VALID_URLS = [ 'https://example.com', 'http://EXAMPLE.com/v3', 'http://localhost', 'http://127.0.0.1:5000', 'http://1.1.1.1', 'http://255.255.255.255', 'http://[::1]', 'http://[::1]:35357', 'http://[1::8]', 'http://[fe80::8%25eth0]', 'http://[::1.2.3.4]', 'http://[2001:DB8::1.2.3.4]', 'http://[::a:1.2.3.4]', 'http://[a::b:1.2.3.4]', 'http://[1:2:3:4:5:6:7:8]', 'http://[1:2:3:4:5:6:1.2.3.4]', 'http://[abcd:efAB:CDEF:1111:9999::]', ] _INVALID_URLS = [ False, 'this is not a URL', 1234, 'www.example.com', 'localhost', 'http//something.com', 'https//something.com', ' http://example.com', ] _VALID_FILTERS = [ {'interface': 'admin'}, {'region': 'US-WEST', 'interface': 'internal'}, ] _INVALID_FILTERS = ['some string', 1, 0, True, False] _INVALID_NAMES = [True, 24, ' ', '', 'a' * 256, None] class CommonValidationTestCase(unit.BaseTestCase): def test_nullable_type_only(self): bool_without_enum = copy.deepcopy(parameter_types.boolean) bool_without_enum.pop('enum') schema_type_only = { 'type': 'object', 'properties': {'test': validation.nullable(bool_without_enum)}, 'additionalProperties': False, 'required': ['test'], } # Null should be in the types self.assertIn('null', schema_type_only['properties']['test']['type']) # No Enum, and nullable should not have added it. self.assertNotIn('enum', schema_type_only['properties']['test'].keys()) validator = validators.SchemaValidator(schema_type_only) reqs_to_validate = [{'test': val} for val in [True, False, None]] for req in reqs_to_validate: validator.validate(req) def test_nullable_with_enum(self): schema_with_enum = { 'type': 'object', 'properties': { 'test': validation.nullable(parameter_types.boolean) }, 'additionalProperties': False, 'required': ['test'], } # Null should be in enum and type self.assertIn('null', schema_with_enum['properties']['test']['type']) self.assertIn(None, schema_with_enum['properties']['test']['enum']) validator = validators.SchemaValidator(schema_with_enum) reqs_to_validate = [{'test': val} for val in [True, False, None]] for req in reqs_to_validate: validator.validate(req) class EntityValidationTestCase(unit.BaseTestCase): def setUp(self): super().setUp() self.resource_name = 'some resource name' self.description = 'Some valid description' self.valid_enabled = True self.valid_url = 'http://example.com' self.valid_email = 'joe@example.com' self.create_schema_validator = validators.SchemaValidator( entity_create ) self.update_schema_validator = validators.SchemaValidator( entity_update ) def test_create_entity_with_all_valid_parameters_validates(self): """Validate all parameter values against test schema.""" request_to_validate = { 'name': self.resource_name, 'description': self.description, 'enabled': self.valid_enabled, 'url': self.valid_url, 'email': self.valid_email, } self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_only_required_valid_parameters_validates(self): """Validate correct for only parameters values against test schema.""" request_to_validate = {'name': self.resource_name} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_name_too_long_raises_exception(self): """Validate long names. Validate that an exception is raised when validating a string of 255+ characters passed in as a name. """ invalid_name = 'a' * 256 request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate, ) def test_create_entity_with_name_too_short_raises_exception(self): """Validate short names. Test that an exception is raised when passing a string of length zero as a name parameter. """ request_to_validate = {'name': ''} self.assertRaises( exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate, ) def test_create_entity_with_unicode_name_validates(self): """Test that we successfully validate a unicode string.""" request_to_validate = {'name': 'αβγδ'} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_enabled_format_raises_exception(self): """Validate invalid enabled formats. Test that an exception is raised when passing invalid boolean-like values as `enabled`. """ for format in _INVALID_ENABLED_FORMATS: request_to_validate = { 'name': self.resource_name, 'enabled': format, } self.assertRaises( exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate, ) def test_create_entity_with_valid_enabled_formats_validates(self): """Validate valid enabled formats. Test that we have successful validation on boolean values for `enabled`. """ for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = { 'name': self.resource_name, 'enabled': valid_enabled, } # Make sure validation doesn't raise a validation exception self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_valid_urls_validates(self): """Test that proper urls are successfully validated.""" for valid_url in _VALID_URLS: request_to_validate = { 'name': self.resource_name, 'url': valid_url, } self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_urls_fails(self): """Test that an exception is raised when validating improper urls.""" for invalid_url in _INVALID_URLS: request_to_validate = { 'name': self.resource_name, 'url': invalid_url, } self.assertRaises( exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate, ) def test_create_entity_with_valid_email_validates(self): """Validate email address. Test that we successfully validate properly formatted email addresses. """ request_to_validate = { 'name': self.resource_name, 'email': self.valid_email, } self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_email_fails(self): """Validate invalid email address. Test that an exception is raised when validating improperly formatted email addresses. """ request_to_validate = { 'name': self.resource_name, 'email': 'some invalid email value', } self.assertRaises( exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate, ) def test_create_entity_with_valid_id_strings(self): """Validate acceptable id strings.""" valid_id_strings = [str(uuid.uuid4()), uuid.uuid4().hex, 'default'] for valid_id in valid_id_strings: request_to_validate = { 'name': self.resource_name, 'id_string': valid_id, } self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_invalid_id_strings(self): """Exception raised when using invalid id strings.""" long_string = 'A' * 65 invalid_id_strings = ['', long_string] for invalid_id in invalid_id_strings: request_to_validate = { 'name': self.resource_name, 'id_string': invalid_id, } self.assertRaises( exception.SchemaValidationError, self.create_schema_validator.validate, request_to_validate, ) def test_create_entity_with_null_id_string(self): """Validate that None is an acceptable optional string type.""" request_to_validate = {'name': self.resource_name, 'id_string': None} self.create_schema_validator.validate(request_to_validate) def test_create_entity_with_null_string_succeeds(self): """Exception raised when passing None on required id strings.""" request_to_validate = {'name': self.resource_name, 'id_string': None} self.create_schema_validator.validate(request_to_validate) def test_update_entity_with_no_parameters_fails(self): """At least one parameter needs to be present for an update.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate, ) def test_update_entity_with_all_parameters_valid_validates(self): """Simulate updating an entity by ID.""" request_to_validate = { 'name': self.resource_name, 'description': self.description, 'enabled': self.valid_enabled, 'url': self.valid_url, 'email': self.valid_email, } self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_a_valid_required_parameter_validates(self): """Succeed if a valid required parameter is provided.""" request_to_validate = {'name': self.resource_name} self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_invalid_required_parameter_fails(self): """Fail if a provided required parameter is invalid.""" request_to_validate = {'name': 'a' * 256} self.assertRaises( exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate, ) def test_update_entity_with_a_null_optional_parameter_validates(self): """Optional parameters can be null to removed the value.""" request_to_validate = {'email': None} self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_a_required_null_parameter_fails(self): """The `name` parameter can't be null.""" request_to_validate = {'name': None} self.assertRaises( exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate, ) def test_update_entity_with_a_valid_optional_parameter_validates(self): """Succeed with only a single valid optional parameter.""" request_to_validate = {'email': self.valid_email} self.update_schema_validator.validate(request_to_validate) def test_update_entity_with_invalid_optional_parameter_fails(self): """Fail when an optional parameter is invalid.""" request_to_validate = {'email': 0} self.assertRaises( exception.SchemaValidationError, self.update_schema_validator.validate, request_to_validate, ) class ProjectValidationTestCase(unit.BaseTestCase): """Test for V3 Project API validation.""" def setUp(self): super().setUp() self.project_name = 'My Project' create = resource_schema.project_create update = resource_schema.project_update self.create_project_validator = validators.SchemaValidator(create) self.update_project_validator = validators.SchemaValidator(update) def test_validate_project_request(self): """Test that we validate a project with `name` in request.""" request_to_validate = {'name': self.project_name} self.create_project_validator.validate(request_to_validate) def test_validate_project_request_without_name_fails(self): """Validate project request fails without name.""" request_to_validate = {'enabled': True} self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_request_with_enabled(self): """Validate `enabled` as boolean-like values for projects.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = { 'name': self.project_name, 'enabled': valid_enabled, } self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = { 'name': self.project_name, 'enabled': invalid_enabled, } self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_request_with_valid_description(self): """Test that we validate `description` in create project requests.""" request_to_validate = { 'name': self.project_name, 'description': 'My Project', } self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" request_to_validate = {'name': self.project_name, 'description': False} self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_request_with_name_too_long(self): """Exception is raised when `name` is too long.""" long_project_name = 'a' * 65 request_to_validate = {'name': long_project_name} self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_create_fails_with_invalid_name(self): """Exception when validating a create request with invalid `name`.""" for invalid_name in _INVALID_NAMES + ['a' * 65]: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_create_with_tags(self): request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', 'bar'], } self.create_project_validator.validate(request_to_validate) def test_validate_project_create_with_tags_invalid_char(self): invalid_chars = [',', '/', ',foo', 'foo/bar'] for char in invalid_chars: tag = uuid.uuid4().hex + char request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', tag], } self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_create_with_tag_name_too_long(self): invalid_name = 'a' * 256 request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', invalid_name], } self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_create_with_too_many_tags(self): tags = [uuid.uuid4().hex for _ in range(81)] request_to_validate = {'name': uuid.uuid4().hex, 'tags': tags} self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_request_with_valid_parent_id(self): """Test that we validate `parent_id` in create project requests.""" # parent_id is nullable request_to_validate = {'name': self.project_name, 'parent_id': None} self.create_project_validator.validate(request_to_validate) request_to_validate = { 'name': self.project_name, 'parent_id': uuid.uuid4().hex, } self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_parent_id_fails(self): """Exception is raised when `parent_id` as a non-id value.""" request_to_validate = {'name': self.project_name, 'parent_id': False} self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) request_to_validate = { 'name': self.project_name, 'parent_id': 'fake project', } self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) def test_validate_project_update_request(self): """Test that we validate a project update request.""" request_to_validate = {'domain_id': uuid.uuid4().hex} self.update_project_validator.validate(request_to_validate) def test_validate_project_update_request_with_no_parameters_fails(self): """Exception is raised when updating project without parameters.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate, ) def test_validate_project_update_request_with_name_too_long_fails(self): """Exception raised when updating a project with `name` too long.""" long_project_name = 'a' * 65 request_to_validate = {'name': long_project_name} self.assertRaises( exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate, ) def test_validate_project_update_fails_with_invalid_name(self): """Exception when validating an update request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate, ) def test_validate_project_update_with_tags(self): request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', 'bar'], } self.update_project_validator.validate(request_to_validate) def test_validate_project_update_with_tags_invalid_char(self): invalid_chars = [',', '/'] for char in invalid_chars: tag = uuid.uuid4().hex + char request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', tag], } self.assertRaises( exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate, ) def test_validate_project_update_with_tag_name_too_long(self): invalid_name = 'a' * 256 request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', invalid_name], } self.assertRaises( exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate, ) def test_validate_project_update_with_too_many_tags(self): tags = [uuid.uuid4().hex for _ in range(81)] request_to_validate = {'name': uuid.uuid4().hex, 'tags': tags} self.assertRaises( exception.SchemaValidationError, self.update_project_validator.validate, request_to_validate, ) def test_validate_project_create_request_with_valid_domain_id(self): """Test that we validate `domain_id` in create project requests.""" # domain_id is nullable for domain_id in [None, uuid.uuid4().hex]: request_to_validate = { 'name': self.project_name, 'domain_id': domain_id, } self.create_project_validator.validate(request_to_validate) def test_validate_project_request_with_invalid_domain_id_fails(self): """Exception is raised when `domain_id` is a non-id value.""" for domain_id in [False, 'fake_project']: request_to_validate = { 'name': self.project_name, 'domain_id': domain_id, } self.assertRaises( exception.SchemaValidationError, self.create_project_validator.validate, request_to_validate, ) class DomainValidationTestCase(unit.BaseTestCase): """Test for V3 Domain API validation.""" def setUp(self): super().setUp() self.domain_name = 'My Domain' create = resource_schema.domain_create update = resource_schema.domain_update self.create_domain_validator = validators.SchemaValidator(create) self.update_domain_validator = validators.SchemaValidator(update) def test_validate_domain_request(self): """Make sure we successfully validate a create domain request.""" request_to_validate = {'name': self.domain_name} self.create_domain_validator.validate(request_to_validate) def test_validate_domain_request_without_name_fails(self): """Make sure we raise an exception when `name` isn't included.""" request_to_validate = {'enabled': True} self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_request_with_enabled(self): """Validate `enabled` as boolean-like values for domains.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = { 'name': self.domain_name, 'enabled': valid_enabled, } self.create_domain_validator.validate(request_to_validate) def test_validate_domain_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = { 'name': self.domain_name, 'enabled': invalid_enabled, } self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_request_with_valid_description(self): """Test that we validate `description` in create domain requests.""" request_to_validate = { 'name': self.domain_name, 'description': 'My Domain', } self.create_domain_validator.validate(request_to_validate) def test_validate_domain_request_with_invalid_description_fails(self): """Exception is raised when `description` is a non-string value.""" request_to_validate = {'name': self.domain_name, 'description': False} self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_request_with_name_too_long(self): """Exception is raised when `name` is too long.""" long_domain_name = 'a' * 65 request_to_validate = {'name': long_domain_name} self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_create_fails_with_invalid_name(self): """Exception when validating a create request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_create_with_tags(self): request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', 'bar'], } self.create_domain_validator.validate(request_to_validate) def test_validate_domain_create_with_tags_invalid_char(self): invalid_chars = [',', '/'] for char in invalid_chars: tag = uuid.uuid4().hex + char request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', tag], } self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_create_with_tag_name_too_long(self): invalid_name = 'a' * 256 request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', invalid_name], } self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_create_with_too_many_tags(self): tags = [uuid.uuid4().hex for _ in range(81)] request_to_validate = {'name': uuid.uuid4().hex, 'tags': tags} self.assertRaises( exception.SchemaValidationError, self.create_domain_validator.validate, request_to_validate, ) def test_validate_domain_update_request(self): """Test that we validate a domain update request.""" request_to_validate = {'domain_id': uuid.uuid4().hex} self.update_domain_validator.validate(request_to_validate) def test_validate_domain_update_request_with_no_parameters_fails(self): """Exception is raised when updating a domain without parameters.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate, ) def test_validate_domain_update_request_with_name_too_long_fails(self): """Exception raised when updating a domain with `name` too long.""" long_domain_name = 'a' * 65 request_to_validate = {'name': long_domain_name} self.assertRaises( exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate, ) def test_validate_domain_update_fails_with_invalid_name(self): """Exception when validating an update request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate, ) def test_validate_domain_update_with_tags(self): request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', 'bar'], } self.update_domain_validator.validate(request_to_validate) def test_validate_domain_update_with_tags_invalid_char(self): invalid_chars = [',', '/'] for char in invalid_chars: tag = uuid.uuid4().hex + char request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', tag], } self.assertRaises( exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate, ) def test_validate_domain_update_with_tag_name_too_long(self): invalid_name = 'a' * 256 request_to_validate = { 'name': uuid.uuid4().hex, 'tags': ['foo', invalid_name], } self.assertRaises( exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate, ) def test_validate_domain_update_with_too_many_tags(self): tags = [uuid.uuid4().hex for _ in range(81)] request_to_validate = {'name': uuid.uuid4().hex, 'tags': tags} self.assertRaises( exception.SchemaValidationError, self.update_domain_validator.validate, request_to_validate, ) class RoleValidationTestCase(unit.BaseTestCase): """Test for V3 Role API validation.""" def setUp(self): super().setUp() self.role_name = 'My Role' create = assignment_schema.role_create update = assignment_schema.role_update self.create_role_validator = validators.SchemaValidator(create) self.update_role_validator = validators.SchemaValidator(update) def test_validate_role_request(self): """Test we can successfully validate a create role request.""" request_to_validate = {'name': self.role_name} self.create_role_validator.validate(request_to_validate) def test_validate_role_create_without_name_raises_exception(self): """Test that we raise an exception when `name` isn't included.""" request_to_validate = {'enabled': True} self.assertRaises( exception.SchemaValidationError, self.create_role_validator.validate, request_to_validate, ) def test_validate_role_create_fails_with_invalid_name(self): """Exception when validating a create request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.create_role_validator.validate, request_to_validate, ) def test_validate_role_create_request_with_name_too_long_fails(self): """Exception raised when creating a role with `name` too long.""" long_role_name = 'a' * 256 request_to_validate = {'name': long_role_name} self.assertRaises( exception.SchemaValidationError, self.create_role_validator.validate, request_to_validate, ) def test_validate_role_request_with_valid_description(self): """Test we can validate`description` in create role request.""" request_to_validate = { 'name': self.role_name, 'description': 'My Role', } self.create_role_validator.validate(request_to_validate) def test_validate_role_request_fails_with_invalid_description(self): """Exception is raised when `description` as a non-string value.""" request_to_validate = {'name': self.role_name, 'description': False} self.assertRaises( exception.SchemaValidationError, self.create_role_validator.validate, request_to_validate, ) def test_validate_role_update_request(self): """Test that we validate a role update request.""" request_to_validate = {'name': 'My New Role'} self.update_role_validator.validate(request_to_validate) def test_validate_role_update_fails_with_invalid_name(self): """Exception when validating an update request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.update_role_validator.validate, request_to_validate, ) def test_validate_role_update_request_with_name_too_long_fails(self): """Exception raised when updating a role with `name` too long.""" long_role_name = 'a' * 256 request_to_validate = {'name': long_role_name} self.assertRaises( exception.SchemaValidationError, self.update_role_validator.validate, request_to_validate, ) class PolicyValidationTestCase(unit.BaseTestCase): """Test for V3 Policy API validation.""" def setUp(self): super().setUp() create = policy_schema.policy_create update = policy_schema.policy_update self.create_policy_validator = validators.SchemaValidator(create) self.update_policy_validator = validators.SchemaValidator(update) def test_validate_policy_succeeds(self): """Test that we validate a create policy request.""" request_to_validate = { 'blob': 'some blob information', 'type': 'application/json', } self.create_policy_validator.validate(request_to_validate) def test_validate_policy_without_blob_fails(self): """Exception raised without `blob` in request.""" request_to_validate = {'type': 'application/json'} self.assertRaises( exception.SchemaValidationError, self.create_policy_validator.validate, request_to_validate, ) def test_validate_policy_without_type_fails(self): """Exception raised without `type` in request.""" request_to_validate = {'blob': 'some blob information'} self.assertRaises( exception.SchemaValidationError, self.create_policy_validator.validate, request_to_validate, ) def test_validate_policy_create_with_extra_parameters_succeeds(self): """Validate policy create with extra parameters.""" request_to_validate = { 'blob': 'some blob information', 'type': 'application/json', 'extra': 'some extra stuff', } self.create_policy_validator.validate(request_to_validate) def test_validate_policy_create_with_invalid_type_fails(self): """Exception raised when `blob` and `type` are boolean.""" for prop in ['blob', 'type']: request_to_validate = {prop: False} self.assertRaises( exception.SchemaValidationError, self.create_policy_validator.validate, request_to_validate, ) def test_validate_policy_update_without_parameters_fails(self): """Exception raised when updating policy without parameters.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_policy_validator.validate, request_to_validate, ) def test_validate_policy_update_with_extra_parameters_succeeds(self): """Validate policy update request with extra parameters.""" request_to_validate = { 'blob': 'some blob information', 'type': 'application/json', 'extra': 'some extra stuff', } self.update_policy_validator.validate(request_to_validate) def test_validate_policy_update_succeeds(self): """Test that we validate a policy update request.""" request_to_validate = { 'blob': 'some blob information', 'type': 'application/json', } self.update_policy_validator.validate(request_to_validate) def test_validate_policy_update_with_invalid_type_fails(self): """Exception raised when invalid `type` on policy update.""" for prop in ['blob', 'type']: request_to_validate = {prop: False} self.assertRaises( exception.SchemaValidationError, self.update_policy_validator.validate, request_to_validate, ) class CredentialValidationTestCase(unit.BaseTestCase): """Test for V3 Credential API validation.""" def setUp(self): super().setUp() create = credential_schema.credential_create update = credential_schema.credential_update self.create_credential_validator = validators.SchemaValidator(create) self.update_credential_validator = validators.SchemaValidator(update) def test_validate_credential_succeeds(self): """Test that we validate a credential request.""" request_to_validate = { 'blob': 'some string', 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex, } self.create_credential_validator.validate(request_to_validate) def test_validate_credential_without_blob_fails(self): """Exception raised without `blob` in create request.""" request_to_validate = {'type': 'ec2', 'user_id': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate, ) def test_validate_credential_without_user_id_fails(self): """Exception raised without `user_id` in create request.""" request_to_validate = {'blob': 'some credential blob', 'type': 'ec2'} self.assertRaises( exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate, ) def test_validate_credential_without_type_fails(self): """Exception raised without `type` in create request.""" request_to_validate = { 'blob': 'some credential blob', 'user_id': uuid.uuid4().hex, } self.assertRaises( exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate, ) def test_validate_credential_ec2_without_project_id_fails(self): """Validate `project_id` is required for ec2. Test that a SchemaValidationError is raised when type is ec2 and no `project_id` is provided in create request. """ request_to_validate = { 'blob': 'some credential blob', 'type': 'ec2', 'user_id': uuid.uuid4().hex, } self.assertRaises( exception.SchemaValidationError, self.create_credential_validator.validate, request_to_validate, ) def test_validate_credential_with_project_id_succeeds(self): """Test that credential request works for all types.""" cred_types = ['ec2', 'cert', uuid.uuid4().hex] for c_type in cred_types: request_to_validate = { 'blob': 'some blob', 'project_id': uuid.uuid4().hex, 'type': c_type, 'user_id': uuid.uuid4().hex, } # Make sure an exception isn't raised self.create_credential_validator.validate(request_to_validate) def test_validate_credential_non_ec2_without_project_id_succeeds(self): """Validate `project_id` is not required for non-ec2. Test that create request without `project_id` succeeds for any non-ec2 credential. """ cred_types = ['cert', uuid.uuid4().hex] for c_type in cred_types: request_to_validate = { 'blob': 'some blob', 'type': c_type, 'user_id': uuid.uuid4().hex, } # Make sure an exception isn't raised self.create_credential_validator.validate(request_to_validate) def test_validate_credential_with_extra_parameters_succeeds(self): """Validate create request with extra parameters.""" request_to_validate = { 'blob': 'some string', 'extra': False, 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex, } self.create_credential_validator.validate(request_to_validate) def test_validate_credential_update_succeeds(self): """Test that a credential request is properly validated.""" request_to_validate = { 'blob': 'some string', 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex, } self.update_credential_validator.validate(request_to_validate) def test_validate_credential_update_without_parameters_fails(self): """Exception is raised on update without parameters.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_credential_validator.validate, request_to_validate, ) def test_validate_credential_update_with_extra_parameters_succeeds(self): """Validate credential update with extra parameters.""" request_to_validate = { 'blob': 'some string', 'extra': False, 'project_id': uuid.uuid4().hex, 'type': 'ec2', 'user_id': uuid.uuid4().hex, } self.update_credential_validator.validate(request_to_validate) class RegionValidationTestCase(unit.BaseTestCase): """Test for V3 Region API validation.""" def setUp(self): super().setUp() self.region_name = 'My Region' create = catalog_schema.region_create update = catalog_schema.region_update self.create_region_validator = validators.SchemaValidator(create) self.update_region_validator = validators.SchemaValidator(update) def test_validate_region_request(self): """Test that we validate a basic region request.""" # Create_region doesn't take any parameters in the request so let's # make sure we cover that case. request_to_validate = {} self.create_region_validator.validate(request_to_validate) def test_validate_region_create_request_with_parameters(self): """Test that we validate a region request with parameters.""" request_to_validate = { 'id': 'us-east', 'description': 'US East Region', 'parent_region_id': 'US Region', } self.create_region_validator.validate(request_to_validate) def test_validate_region_create_with_uuid(self): """Test that we validate a region request with a UUID as the id.""" request_to_validate = { 'id': uuid.uuid4().hex, 'description': 'US East Region', 'parent_region_id': uuid.uuid4().hex, } self.create_region_validator.validate(request_to_validate) def test_validate_region_create_fails_with_invalid_region_id(self): """Exception raised when passing invalid `id` in request.""" request_to_validate = {'id': 1234, 'description': 'US East Region'} self.assertRaises( exception.SchemaValidationError, self.create_region_validator.validate, request_to_validate, ) def test_validate_region_create_succeeds_with_extra_parameters(self): """Validate create region request with extra values.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.create_region_validator.validate(request_to_validate) def test_validate_region_create_succeeds_with_no_parameters(self): """Validate create region request with no parameters.""" request_to_validate = {} self.create_region_validator.validate(request_to_validate) def test_validate_region_update_succeeds(self): """Test that we validate a region update request.""" request_to_validate = { 'id': 'us-west', 'description': 'US West Region', 'parent_region_id': 'us-region', } self.update_region_validator.validate(request_to_validate) def test_validate_region_update_succeeds_with_extra_parameters(self): """Validate extra attributes in the region update request.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_region_validator.validate(request_to_validate) def test_validate_region_update_fails_with_no_parameters(self): """Exception raised when passing no parameters in a region update.""" # An update request should consist of at least one value to update request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_region_validator.validate, request_to_validate, ) class ServiceValidationTestCase(unit.BaseTestCase): """Test for V3 Service API validation.""" def setUp(self): super().setUp() create = catalog_schema.service_create update = catalog_schema.service_update self.create_service_validator = validators.SchemaValidator(create) self.update_service_validator = validators.SchemaValidator(update) def test_validate_service_create_succeeds(self): """Test that we validate a service create request.""" request_to_validate = { 'name': 'Nova', 'description': 'OpenStack Compute Service', 'enabled': True, 'type': 'compute', } self.create_service_validator.validate(request_to_validate) def test_validate_service_create_succeeds_with_required_parameters(self): """Validate a service create request with the required parameters.""" # The only parameter type required for service creation is 'type' request_to_validate = {'type': 'compute'} self.create_service_validator.validate(request_to_validate) def test_validate_service_create_fails_without_type(self): """Exception raised when trying to create a service without `type`.""" request_to_validate = {'name': 'Nova'} self.assertRaises( exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate, ) def test_validate_service_create_succeeds_with_extra_parameters(self): """Test that extra parameters pass validation on create service.""" request_to_validate = { 'other_attr': uuid.uuid4().hex, 'type': uuid.uuid4().hex, } self.create_service_validator.validate(request_to_validate) def test_validate_service_create_succeeds_with_valid_enabled(self): """Validate boolean values as enabled values on service create.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = { 'enabled': valid_enabled, 'type': uuid.uuid4().hex, } self.create_service_validator.validate(request_to_validate) def test_validate_service_create_fails_with_invalid_enabled(self): """Exception raised when boolean-like parameters as `enabled`. On service create, make sure an exception is raised if `enabled` is not a boolean value. """ for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = { 'enabled': invalid_enabled, 'type': uuid.uuid4().hex, } self.assertRaises( exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate, ) def test_validate_service_create_fails_when_name_too_long(self): """Exception raised when `name` is greater than 255 characters.""" long_name = 'a' * 256 request_to_validate = {'type': 'compute', 'name': long_name} self.assertRaises( exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate, ) def test_validate_service_create_fails_when_name_too_short(self): """Exception is raised when `name` is too short.""" request_to_validate = {'type': 'compute', 'name': ''} self.assertRaises( exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate, ) def test_validate_service_create_fails_when_type_too_long(self): """Exception is raised when `type` is too long.""" long_type_name = 'a' * 256 request_to_validate = {'type': long_type_name} self.assertRaises( exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate, ) def test_validate_service_create_fails_when_type_too_short(self): """Exception is raised when `type` is too short.""" request_to_validate = {'type': ''} self.assertRaises( exception.SchemaValidationError, self.create_service_validator.validate, request_to_validate, ) def test_validate_service_update_request_succeeds(self): """Test that we validate a service update request.""" request_to_validate = { 'name': 'Cinder', 'type': 'volume', 'description': 'OpenStack Block Storage', 'enabled': False, } self.update_service_validator.validate(request_to_validate) def test_validate_service_update_fails_with_no_parameters(self): """Exception raised when updating a service without values.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate, ) def test_validate_service_update_succeeds_with_extra_parameters(self): """Validate updating a service with extra parameters.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_service_validator.validate(request_to_validate) def test_validate_service_update_succeeds_with_valid_enabled(self): """Validate boolean formats as `enabled` on service update.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled} self.update_service_validator.validate(request_to_validate) def test_validate_service_update_fails_with_invalid_enabled(self): """Exception raised when boolean-like values as `enabled`.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled} self.assertRaises( exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate, ) def test_validate_service_update_fails_with_name_too_long(self): """Exception is raised when `name` is too long on update.""" long_name = 'a' * 256 request_to_validate = {'name': long_name} self.assertRaises( exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate, ) def test_validate_service_update_fails_with_name_too_short(self): """Exception is raised when `name` is too short on update.""" request_to_validate = {'name': ''} self.assertRaises( exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate, ) def test_validate_service_update_fails_with_type_too_long(self): """Exception is raised when `type` is too long on update.""" long_type_name = 'a' * 256 request_to_validate = {'type': long_type_name} self.assertRaises( exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate, ) def test_validate_service_update_fails_with_type_too_short(self): """Exception is raised when `type` is too short on update.""" request_to_validate = {'type': ''} self.assertRaises( exception.SchemaValidationError, self.update_service_validator.validate, request_to_validate, ) class EndpointValidationTestCase(unit.BaseTestCase): """Test for V3 Endpoint API validation.""" def setUp(self): super().setUp() create = catalog_schema.endpoint_create update = catalog_schema.endpoint_update self.create_endpoint_validator = validators.SchemaValidator(create) self.update_endpoint_validator = validators.SchemaValidator(update) def test_validate_endpoint_request_succeeds(self): """Test that we validate an endpoint request.""" request_to_validate = { 'enabled': True, 'interface': 'admin', 'region_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_succeeds_with_required_parameters(self): """Validate an endpoint request with only the required parameters.""" # According to the Identity V3 API endpoint creation requires # 'service_id', 'interface', and 'url' request_to_validate = { 'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/', } self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_succeeds_with_valid_enabled(self): """Validate an endpoint with boolean values. Validate boolean values as `enabled` in endpoint create requests. """ for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = { 'enabled': valid_enabled, 'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/', } self.create_endpoint_validator.validate(request_to_validate) def test_validate_create_endpoint_fails_with_invalid_enabled(self): """Exception raised when boolean-like values as `enabled`.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = { 'enabled': invalid_enabled, 'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_create_succeeds_with_extra_parameters(self): """Test that extra parameters pass validation on create endpoint.""" request_to_validate = { 'other_attr': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'interface': 'public', 'url': 'https://service.example.com:5000/', } self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_fails_without_service_id(self): """Exception raised when `service_id` isn't in endpoint request.""" request_to_validate = { 'interface': 'public', 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_create_fails_without_interface(self): """Exception raised when `interface` isn't in endpoint request.""" request_to_validate = { 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_create_fails_without_url(self): """Exception raised when `url` isn't in endpoint request.""" request_to_validate = { 'service_id': uuid.uuid4().hex, 'interface': 'public', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_create_succeeds_with_url(self): """Validate `url` attribute in endpoint create request.""" request_to_validate = { 'service_id': uuid.uuid4().hex, 'interface': 'public', } for url in _VALID_URLS: request_to_validate['url'] = url self.create_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_create_fails_with_invalid_url(self): """Exception raised when passing invalid `url` in request.""" request_to_validate = { 'service_id': uuid.uuid4().hex, 'interface': 'public', } for url in _INVALID_URLS: request_to_validate['url'] = url self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_create_fails_with_invalid_interface(self): """Exception raised with invalid `interface`.""" request_to_validate = { 'interface': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_create_fails_with_invalid_region_id(self): """Exception raised when passing invalid `region(_id)` in request.""" request_to_validate = { 'interface': 'admin', 'region_id': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) request_to_validate = { 'interface': 'admin', 'region': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_update_fails_with_invalid_enabled(self): """Exception raised when `enabled` is boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled} self.assertRaises( exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_update_succeeds_with_valid_enabled(self): """Validate `enabled` as boolean values.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled} self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_fails_with_invalid_interface(self): """Exception raised when invalid `interface` on endpoint update.""" request_to_validate = { 'interface': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_update_request_succeeds(self): """Test that we validate an endpoint update request.""" request_to_validate = { 'enabled': True, 'interface': 'admin', 'region_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_fails_with_no_parameters(self): """Exception raised when no parameters on endpoint update.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_update_succeeds_with_extra_parameters(self): """Test that extra parameters pass validation on update endpoint.""" request_to_validate = { 'enabled': True, 'interface': 'admin', 'region_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', 'other_attr': uuid.uuid4().hex, } self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_succeeds_with_url(self): """Validate `url` attribute in endpoint update request.""" request_to_validate = { 'service_id': uuid.uuid4().hex, 'interface': 'public', } for url in _VALID_URLS: request_to_validate['url'] = url self.update_endpoint_validator.validate(request_to_validate) def test_validate_endpoint_update_fails_with_invalid_url(self): """Exception raised when passing invalid `url` in request.""" request_to_validate = { 'service_id': uuid.uuid4().hex, 'interface': 'public', } for url in _INVALID_URLS: request_to_validate['url'] = url self.assertRaises( exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate, ) def test_validate_endpoint_update_fails_with_invalid_region_id(self): """Exception raised when passing invalid `region(_id)` in request.""" request_to_validate = { 'interface': 'admin', 'region_id': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate, ) request_to_validate = { 'interface': 'admin', 'region': 1234, 'service_id': uuid.uuid4().hex, 'url': 'https://service.example.com:5000/', } self.assertRaises( exception.SchemaValidationError, self.update_endpoint_validator.validate, request_to_validate, ) class EndpointGroupValidationTestCase(unit.BaseTestCase): """Test for V3 Endpoint Group API validation.""" def setUp(self): super().setUp() create = catalog_schema.endpoint_group_create update = catalog_schema.endpoint_group_update self.create_endpoint_grp_validator = validators.SchemaValidator(create) self.update_endpoint_grp_validator = validators.SchemaValidator(update) def test_validate_endpoint_group_request_succeeds(self): """Test that we validate an endpoint group request.""" request_to_validate = { 'description': 'endpoint group description', 'filters': {'interface': 'admin'}, 'name': 'endpoint_group_name', } self.create_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_create_succeeds_with_req_parameters(self): """Validate required endpoint group parameters. This test ensure that validation succeeds with only the required parameters passed for creating an endpoint group. """ request_to_validate = { 'filters': {'interface': 'admin'}, 'name': 'endpoint_group_name', } self.create_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_create_succeeds_with_valid_filters(self): """Validate `filters` in endpoint group create requests.""" request_to_validate = { 'description': 'endpoint group description', 'name': 'endpoint_group_name', } for valid_filters in _VALID_FILTERS: request_to_validate['filters'] = valid_filters self.create_endpoint_grp_validator.validate(request_to_validate) def test_validate_create_endpoint_group_fails_with_invalid_filters(self): """Validate invalid `filters` value in endpoint group parameters. This test ensures that exception is raised when non-dict values is used as `filters` in endpoint group create request. """ request_to_validate = { 'description': 'endpoint group description', 'name': 'endpoint_group_name', } for invalid_filters in _INVALID_FILTERS: request_to_validate['filters'] = invalid_filters self.assertRaises( exception.SchemaValidationError, self.create_endpoint_grp_validator.validate, request_to_validate, ) def test_validate_endpoint_group_create_fails_without_name(self): """Exception raised when `name` isn't in endpoint group request.""" request_to_validate = { 'description': 'endpoint group description', 'filters': {'interface': 'admin'}, } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_grp_validator.validate, request_to_validate, ) def test_validate_endpoint_group_create_fails_without_filters(self): """Exception raised when `filters` isn't in endpoint group request.""" request_to_validate = { 'description': 'endpoint group description', 'name': 'endpoint_group_name', } self.assertRaises( exception.SchemaValidationError, self.create_endpoint_grp_validator.validate, request_to_validate, ) def test_validate_endpoint_group_update_request_succeeds(self): """Test that we validate an endpoint group update request.""" request_to_validate = { 'description': 'endpoint group description', 'filters': {'interface': 'admin'}, 'name': 'endpoint_group_name', } self.update_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_update_fails_with_no_parameters(self): """Exception raised when no parameters on endpoint group update.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_endpoint_grp_validator.validate, request_to_validate, ) def test_validate_endpoint_group_update_succeeds_with_name(self): """Validate request with only `name` in endpoint group update. This test ensures that passing only a `name` passes validation on update endpoint group request. """ request_to_validate = {'name': 'endpoint_group_name'} self.update_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_update_succeeds_with_valid_filters(self): """Validate `filters` as dict values.""" for valid_filters in _VALID_FILTERS: request_to_validate = {'filters': valid_filters} self.update_endpoint_grp_validator.validate(request_to_validate) def test_validate_endpoint_group_update_fails_with_invalid_filters(self): """Exception raised when passing invalid `filters` in request.""" for invalid_filters in _INVALID_FILTERS: request_to_validate = {'filters': invalid_filters} self.assertRaises( exception.SchemaValidationError, self.update_endpoint_grp_validator.validate, request_to_validate, ) class TrustValidationTestCase(unit.BaseTestCase): """Test for V3 Trust API validation.""" _valid_roles = [ {'name': 'member'}, {'id': uuid.uuid4().hex}, {'id': str(uuid.uuid4())}, {'name': '_member_'}, ] _invalid_roles = [False, True, 123, None] def setUp(self): super().setUp() create = trust_schema.trust_create self.create_trust_validator = validators.SchemaValidator(create) def test_validate_trust_succeeds(self): """Test that we can validate a trust request.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, } self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_all_parameters_succeeds(self): """Test that we can validate a trust request with all parameters.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'project_id': uuid.uuid4().hex, 'roles': [{'id': uuid.uuid4().hex}, {'id': uuid.uuid4().hex}], 'expires_at': 'some timestamp', 'remaining_uses': 2, } self.create_trust_validator.validate(request_to_validate) def test_validate_trust_without_trustor_id_fails(self): """Validate trust request fails without `trustor_id`.""" request_to_validate = { 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, } self.assertRaises( exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate, ) def test_validate_trust_without_trustee_id_fails(self): """Validate trust request fails without `trustee_id`.""" request_to_validate = { 'trusor_user_id': uuid.uuid4().hex, 'impersonation': False, } self.assertRaises( exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate, ) def test_validate_trust_without_impersonation_fails(self): """Validate trust request fails without `impersonation`.""" request_to_validate = { 'trustee_user_id': uuid.uuid4().hex, 'trustor_user_id': uuid.uuid4().hex, } self.assertRaises( exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate, ) def test_validate_trust_with_extra_parameters_succeeds(self): """Test that we can validate a trust request with extra parameters.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'project_id': uuid.uuid4().hex, 'roles': [{'id': uuid.uuid4().hex}, {'id': uuid.uuid4().hex}], 'expires_at': 'some timestamp', 'remaining_uses': 2, 'extra': 'something extra!', } self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_invalid_impersonation_fails(self): """Validate trust request with invalid `impersonation` fails.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': 2, } self.assertRaises( exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate, ) def test_validate_trust_with_null_remaining_uses_succeeds(self): """Validate trust request with null `remaining_uses`.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'remaining_uses': None, } self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_remaining_uses_succeeds(self): """Validate trust request with `remaining_uses` succeeds.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'remaining_uses': 2, } self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_period_in_user_id_string(self): """Validate trust request with a period in the user id string.""" request_to_validate = { 'trustor_user_id': 'john.smith', 'trustee_user_id': 'joe.developer', 'impersonation': False, } self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_invalid_expires_at_fails(self): """Validate trust request with invalid `expires_at` fails.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'expires_at': 3, } self.assertRaises( exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate, ) def test_validate_trust_with_role_types_succeeds(self): """Validate trust request with `roles` succeeds.""" for role in self._valid_roles: request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'roles': [role], } self.create_trust_validator.validate(request_to_validate) def test_validate_trust_with_invalid_role_type_fails(self): """Validate trust request with invalid `roles` fails.""" for role in self._invalid_roles: request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'roles': role, } self.assertRaises( exception.SchemaValidationError, self.create_trust_validator.validate, request_to_validate, ) def test_validate_trust_with_list_of_valid_roles_succeeds(self): """Validate trust request with a list of valid `roles`.""" request_to_validate = { 'trustor_user_id': uuid.uuid4().hex, 'trustee_user_id': uuid.uuid4().hex, 'impersonation': False, 'roles': self._valid_roles, } self.create_trust_validator.validate(request_to_validate) class ServiceProviderValidationTestCase(unit.BaseTestCase): """Test for V3 Service Provider API validation.""" def setUp(self): super().setUp() self.valid_auth_url = 'https://' + uuid.uuid4().hex + '.com' self.valid_sp_url = 'https://' + uuid.uuid4().hex + '.com' create = federation_schema.service_provider_create update = federation_schema.service_provider_update self.create_sp_validator = validators.SchemaValidator(create) self.update_sp_validator = validators.SchemaValidator(update) def test_validate_sp_request(self): """Test that we validate `auth_url` and `sp_url` in request.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, } self.create_sp_validator.validate(request_to_validate) def test_validate_sp_request_with_invalid_auth_url_fails(self): """Validate request fails with invalid `auth_url`.""" request_to_validate = { 'auth_url': uuid.uuid4().hex, 'sp_url': self.valid_sp_url, } self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) def test_validate_sp_request_with_invalid_sp_url_fails(self): """Validate request fails with invalid `sp_url`.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': uuid.uuid4().hex, } self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) def test_validate_sp_request_without_auth_url_fails(self): """Validate request fails without `auth_url`.""" request_to_validate = {'sp_url': self.valid_sp_url} self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) request_to_validate = {'auth_url': None, 'sp_url': self.valid_sp_url} self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) def test_validate_sp_request_without_sp_url_fails(self): """Validate request fails without `sp_url`.""" request_to_validate = { 'auth_url': self.valid_auth_url, } self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': None, } self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) def test_validate_sp_request_with_enabled(self): """Validate `enabled` as boolean-like values.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'enabled': valid_enabled, } self.create_sp_validator.validate(request_to_validate) def test_validate_sp_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'enabled': invalid_enabled, } self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) def test_validate_sp_request_with_valid_description(self): """Test that we validate `description` in create requests.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'description': 'My Service Provider', } self.create_sp_validator.validate(request_to_validate) def test_validate_sp_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" request_to_validate = { 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'description': False, } self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) def test_validate_sp_request_with_extra_field_fails(self): """Exception raised when passing extra fields in the body.""" # 'id' can't be passed in the body since it is passed in the URL request_to_validate = { 'id': 'ACME', 'auth_url': self.valid_auth_url, 'sp_url': self.valid_sp_url, 'description': 'My Service Provider', } self.assertRaises( exception.SchemaValidationError, self.create_sp_validator.validate, request_to_validate, ) def test_validate_sp_update_request(self): """Test that we validate a update request.""" request_to_validate = {'description': uuid.uuid4().hex} self.update_sp_validator.validate(request_to_validate) def test_validate_sp_update_request_with_no_parameters_fails(self): """Exception is raised when updating without parameters.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate, ) def test_validate_sp_update_request_with_invalid_auth_url_fails(self): """Exception raised when updating with invalid `auth_url`.""" request_to_validate = {'auth_url': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate, ) request_to_validate = {'auth_url': None} self.assertRaises( exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate, ) def test_validate_sp_update_request_with_invalid_sp_url_fails(self): """Exception raised when updating with invalid `sp_url`.""" request_to_validate = {'sp_url': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate, ) request_to_validate = {'sp_url': None} self.assertRaises( exception.SchemaValidationError, self.update_sp_validator.validate, request_to_validate, ) class UserValidationTestCase(unit.BaseTestCase): """Test for V3 User API validation.""" def setUp(self): super().setUp() self.user_name = uuid.uuid4().hex create = identity_schema.user_create update = identity_schema.user_update self.create_user_validator = validators.SchemaValidator(create) self.update_user_validator = validators.SchemaValidator(update) def test_validate_user_create_request_succeeds(self): """Test that validating a user create request succeeds.""" request_to_validate = {'name': self.user_name} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_with_all_valid_parameters_succeeds(self): """Test that validating a user create request succeeds.""" request_to_validate = unit.new_user_ref( domain_id=uuid.uuid4().hex, name=self.user_name ) self.create_user_validator.validate(request_to_validate) def test_validate_user_create_fails_without_name(self): """Exception raised when validating a user without name.""" request_to_validate = {'email': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate, ) def test_validate_user_create_succeeds_with_valid_enabled_formats(self): """Validate acceptable enabled formats in create user requests.""" for enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'name': self.user_name, 'enabled': enabled} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_fails_with_invalid_enabled_formats(self): """Exception raised when enabled is not an acceptable format.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = { 'name': self.user_name, 'enabled': invalid_enabled, } self.assertRaises( exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate, ) def test_validate_user_create_succeeds_with_extra_attributes(self): """Validate extra parameters on user create requests.""" request_to_validate = { 'name': self.user_name, 'other_attr': uuid.uuid4().hex, } self.create_user_validator.validate(request_to_validate) def test_validate_user_create_succeeds_with_password_of_zero_length(self): """Validate empty password on user create requests.""" request_to_validate = {'name': self.user_name, 'password': ''} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_succeeds_with_null_password(self): """Validate that password is nullable on create user.""" request_to_validate = {'name': self.user_name, 'password': None} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_fails_with_invalid_password_type(self): """Exception raised when user password is of the wrong type.""" request_to_validate = {'name': self.user_name, 'password': True} self.assertRaises( exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate, ) def test_validate_user_create_succeeds_with_null_description(self): """Validate that description can be nullable on create user.""" request_to_validate = {'name': self.user_name, 'description': None} self.create_user_validator.validate(request_to_validate) def test_validate_user_create_fails_with_invalid_name(self): """Exception when validating a create request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate, ) def test_validate_user_update_succeeds(self): """Validate an update user request.""" request_to_validate = {'email': uuid.uuid4().hex} self.update_user_validator.validate(request_to_validate) def test_validate_user_update_fails_with_no_parameters(self): """Exception raised when updating nothing.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_user_validator.validate, request_to_validate, ) def test_validate_user_update_succeeds_with_extra_parameters(self): """Validate user update requests with extra parameters.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_user_validator.validate(request_to_validate) def test_validate_user_update_fails_with_invalid_name(self): """Exception when validating an update request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.update_user_validator.validate, request_to_validate, ) def test_user_create_succeeds_with_empty_options(self): request_to_validate = {'name': self.user_name, 'options': {}} self.create_user_validator.validate(request_to_validate) def test_user_create_options_fails_invalid_option(self): request_to_validate = { 'name': self.user_name, 'options': {'whatever': True}, } self.assertRaises( exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate, ) def test_user_create_with_options_change_password_required(self): request_to_validate = { 'name': self.user_name, 'options': {ro.IGNORE_CHANGE_PASSWORD_OPT.option_name: True}, } self.create_user_validator.validate(request_to_validate) def test_user_create_options_change_password_required_wrong_type(self): request_to_validate = { 'name': self.user_name, 'options': {ro.IGNORE_CHANGE_PASSWORD_OPT.option_name: 'whatever'}, } self.assertRaises( exception.SchemaValidationError, self.create_user_validator.validate, request_to_validate, ) def test_user_create_options_change_password_required_none(self): request_to_validate = { 'name': self.user_name, 'options': {ro.IGNORE_CHANGE_PASSWORD_OPT.option_name: None}, } self.create_user_validator.validate(request_to_validate) def test_user_update_with_options_change_password_required(self): request_to_validate = { 'options': {ro.IGNORE_CHANGE_PASSWORD_OPT.option_name: False} } self.update_user_validator.validate(request_to_validate) def test_user_create_with_options_lockout_password(self): request_to_validate = { 'name': self.user_name, 'options': {ro.IGNORE_LOCKOUT_ATTEMPT_OPT.option_name: True}, } self.create_user_validator.validate(request_to_validate) def test_user_update_with_options_lockout_password(self): request_to_validate = { 'options': {ro.IGNORE_LOCKOUT_ATTEMPT_OPT.option_name: False} } self.update_user_validator.validate(request_to_validate) def test_user_update_with_two_options(self): request_to_validate = { 'options': { ro.IGNORE_CHANGE_PASSWORD_OPT.option_name: True, ro.IGNORE_LOCKOUT_ATTEMPT_OPT.option_name: True, } } self.update_user_validator.validate(request_to_validate) def test_user_create_with_two_options(self): request_to_validate = { 'name': self.user_name, 'options': { ro.IGNORE_CHANGE_PASSWORD_OPT.option_name: False, ro.IGNORE_LOCKOUT_ATTEMPT_OPT.option_name: True, }, } self.create_user_validator.validate(request_to_validate) def test_user_create_with_mfa_rules(self): request_to_validate = { 'name': self.user_name, 'options': { ro.MFA_RULES_OPT.option_name: [ [uuid.uuid4().hex, uuid.uuid4().hex], [uuid.uuid4().hex], ] }, } self.create_user_validator.validate(request_to_validate) def test_user_update_with_mfa_rules(self): request_to_validate = { 'options': { ro.MFA_RULES_OPT.option_name: [ [uuid.uuid4().hex, uuid.uuid4().hex], [uuid.uuid4().hex], ] } } self.update_user_validator.validate(request_to_validate) def test_user_create_with_mfa_rules_enabled(self): request_to_validate = { 'name': self.user_name, 'options': {ro.MFA_ENABLED_OPT.option_name: True}, } self.create_user_validator.validate(request_to_validate) def test_user_update_mfa_rules_enabled(self): request_to_validate = { 'options': {ro.MFA_ENABLED_OPT.option_name: False} } self.update_user_validator.validate(request_to_validate) def test_user_option_validation_with_invalid_mfa_rules_fails(self): # Test both json schema validation and the validator method in # keystone.identity.backends.resource_options test_cases = [ # Main Element Not an Array (True, TypeError), # Sub-Element Not an Array ([True, False], TypeError), # Sub-element Element not string ([[True], [True, False]], TypeError), # Duplicate sub-array ([['duplicate_array'] for x in range(0, 2)], ValueError), # Empty Sub element ([[uuid.uuid4().hex], []], ValueError), # Duplicate strings in sub-element ([['duplicate' for x in range(0, 2)]], ValueError), ] for ruleset, exception_class in test_cases: request_to_validate = { 'options': {ro.MFA_RULES_OPT.option_name: ruleset} } # JSON Schema Validation self.assertRaises( exception.SchemaValidationError, self.update_user_validator.validate, request_to_validate, ) # Data Store Validation self.assertRaises( exception_class, ro._mfa_rules_validator_list_of_lists_of_strings_no_duplicates, ruleset, ) class GroupValidationTestCase(unit.BaseTestCase): """Test for V3 Group API validation.""" def setUp(self): super().setUp() self.group_name = uuid.uuid4().hex create = identity_schema.group_create update = identity_schema.group_update self.create_group_validator = validators.SchemaValidator(create) self.update_group_validator = validators.SchemaValidator(update) def test_validate_group_create_succeeds(self): """Validate create group requests.""" request_to_validate = {'name': self.group_name} self.create_group_validator.validate(request_to_validate) def test_validate_group_create_succeeds_with_all_parameters(self): """Validate create group requests with all parameters.""" request_to_validate = { 'name': self.group_name, 'description': uuid.uuid4().hex, 'domain_id': uuid.uuid4().hex, } self.create_group_validator.validate(request_to_validate) def test_validate_group_create_fails_without_group_name(self): """Exception raised when group name is not provided in request.""" request_to_validate = {'description': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.create_group_validator.validate, request_to_validate, ) def test_validate_group_create_succeeds_with_extra_parameters(self): """Validate extra attributes on group create requests.""" request_to_validate = { 'name': self.group_name, 'other_attr': uuid.uuid4().hex, } self.create_group_validator.validate(request_to_validate) def test_validate_group_create_fails_with_invalid_name(self): """Exception when validating a create request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.create_group_validator.validate, request_to_validate, ) def test_validate_group_update_succeeds(self): """Validate group update requests.""" request_to_validate = {'description': uuid.uuid4().hex} self.update_group_validator.validate(request_to_validate) def test_validate_group_update_fails_with_no_parameters(self): """Exception raised when no parameters passed in on update.""" request_to_validate = {} self.assertRaises( exception.SchemaValidationError, self.update_group_validator.validate, request_to_validate, ) def test_validate_group_update_succeeds_with_extra_parameters(self): """Validate group update requests with extra parameters.""" request_to_validate = {'other_attr': uuid.uuid4().hex} self.update_group_validator.validate(request_to_validate) def test_validate_group_update_fails_with_invalid_name(self): """Exception when validating an update request with invalid `name`.""" for invalid_name in _INVALID_NAMES: request_to_validate = {'name': invalid_name} self.assertRaises( exception.SchemaValidationError, self.update_group_validator.validate, request_to_validate, ) class ChangePasswordValidationTestCase(unit.BaseTestCase): """Test for Change Password API validation.""" def setUp(self): super().setUp() self.original_password = uuid.uuid4().hex self.password = uuid.uuid4().hex change = identity_schema.password_change self.change_password_validator = validators.SchemaValidator(change) def test_validate_password_change_request_succeeds(self): """Test that validating a password change request succeeds.""" request_to_validate = { 'original_password': self.original_password, 'password': self.password, } self.change_password_validator.validate(request_to_validate) def test_validate_password_change_fails_without_all_fields(self): """Test that validating a password change fails without all values.""" request_to_validate = {'original_password': self.original_password} self.assertRaises( exception.SchemaValidationError, self.change_password_validator.validate, request_to_validate, ) request_to_validate = {'password': self.password} self.assertRaises( exception.SchemaValidationError, self.change_password_validator.validate, request_to_validate, ) def test_validate_password_change_fails_with_invalid_values(self): """Test that validating a password change fails with bad values.""" request_to_validate = {'original_password': None, 'password': None} self.assertRaises( exception.SchemaValidationError, self.change_password_validator.validate, request_to_validate, ) request_to_validate = {'original_password': 42, 'password': True} self.assertRaises( exception.SchemaValidationError, self.change_password_validator.validate, request_to_validate, ) class IdentityProviderValidationTestCase(unit.BaseTestCase): """Test for V3 Identity Provider API validation.""" def setUp(self): super().setUp() create = federation_schema.identity_provider_create update = federation_schema.identity_provider_update self.create_idp_validator = validators.SchemaValidator(create) self.update_idp_validator = validators.SchemaValidator(update) def test_validate_idp_request_succeeds(self): """Test that we validate an identity provider request.""" request_to_validate = { 'description': 'identity provider description', 'enabled': True, 'remote_ids': [uuid.uuid4().hex, uuid.uuid4().hex], } self.create_idp_validator.validate(request_to_validate) self.update_idp_validator.validate(request_to_validate) def test_validate_idp_request_fails_with_invalid_params(self): """Exception raised when unknown parameter is found.""" request_to_validate = {'bogus': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate, ) self.assertRaises( exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate, ) def test_validate_idp_request_with_enabled(self): """Validate `enabled` as boolean-like values.""" for valid_enabled in _VALID_ENABLED_FORMATS: request_to_validate = {'enabled': valid_enabled} self.create_idp_validator.validate(request_to_validate) self.update_idp_validator.validate(request_to_validate) def test_validate_idp_request_with_invalid_enabled_fails(self): """Exception is raised when `enabled` isn't a boolean-like value.""" for invalid_enabled in _INVALID_ENABLED_FORMATS: request_to_validate = {'enabled': invalid_enabled} self.assertRaises( exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate, ) self.assertRaises( exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate, ) def test_validate_idp_request_no_parameters(self): """Test that schema validation with empty request body.""" request_to_validate = {} self.create_idp_validator.validate(request_to_validate) # Exception raised when no property on IdP update. self.assertRaises( exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate, ) def test_validate_idp_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" request_to_validate = {'description': False} self.assertRaises( exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate, ) self.assertRaises( exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate, ) def test_validate_idp_request_with_invalid_remote_id_fails(self): """Exception is raised when `remote_ids` is not a array.""" request_to_validate = {"remote_ids": uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate, ) self.assertRaises( exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate, ) def test_validate_idp_request_with_duplicated_remote_id(self): """Exception is raised when the duplicated `remote_ids` is found.""" idp_id = uuid.uuid4().hex request_to_validate = {"remote_ids": [idp_id, idp_id]} self.assertRaises( exception.SchemaValidationError, self.create_idp_validator.validate, request_to_validate, ) self.assertRaises( exception.SchemaValidationError, self.update_idp_validator.validate, request_to_validate, ) def test_validate_idp_request_remote_id_nullable(self): """Test that `remote_ids` could be explicitly set to None.""" request_to_validate = {'remote_ids': None} self.create_idp_validator.validate(request_to_validate) self.update_idp_validator.validate(request_to_validate) class FederationProtocolValidationTestCase(unit.BaseTestCase): """Test for V3 Federation Protocol API validation.""" def setUp(self): super().setUp() create = federation_schema.protocol_create update = federation_schema.protocol_update self.create_protocol_validator = validators.SchemaValidator(create) self.update_protocol_validator = validators.SchemaValidator(update) def test_validate_protocol_request_succeeds(self): """Test that we validate a protocol request successfully.""" request_to_validate = {'mapping_id': uuid.uuid4().hex} self.create_protocol_validator.validate(request_to_validate) def test_validate_protocol_request_succeeds_with_nonuuid_mapping_id(self): """Test that we allow underscore in mapping_id value.""" request_to_validate = {'mapping_id': 'my_mapping_id'} self.create_protocol_validator.validate(request_to_validate) def test_validate_protocol_request_fails_with_invalid_params(self): """Exception raised when unknown parameter is found.""" request_to_validate = {'bogus': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.create_protocol_validator.validate, request_to_validate, ) def test_validate_protocol_request_no_parameters(self): """Test that schema validation with empty request body.""" request_to_validate = {} # 'mapping_id' is required. self.assertRaises( exception.SchemaValidationError, self.create_protocol_validator.validate, request_to_validate, ) def test_validate_protocol_request_fails_with_invalid_mapping_id(self): """Exception raised when mapping_id is not string.""" request_to_validate = {'mapping_id': 12334} self.assertRaises( exception.SchemaValidationError, self.create_protocol_validator.validate, request_to_validate, ) def test_validate_protocol_request_succeeds_on_update(self): """Test that we validate a protocol update request successfully.""" request_to_validate = {'mapping_id': uuid.uuid4().hex} self.update_protocol_validator.validate(request_to_validate) def test_validate_update_protocol_request_succeeds_with_nonuuid_id(self): """Test that we allow underscore in mapping_id value when updating.""" request_to_validate = {'mapping_id': 'my_mapping_id'} self.update_protocol_validator.validate(request_to_validate) def test_validate_update_protocol_request_fails_with_invalid_params(self): """Exception raised when unknown parameter in protocol update.""" request_to_validate = {'bogus': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.update_protocol_validator.validate, request_to_validate, ) def test_validate_update_protocol_with_no_parameters_fails(self): """Test that updating a protocol requires at least one attribute.""" request_to_validate = {} # 'mapping_id' is required. self.assertRaises( exception.SchemaValidationError, self.update_protocol_validator.validate, request_to_validate, ) def test_validate_update_protocol_request_fails_with_invalid_id(self): """Test that updating a protocol with a non-string mapping_id fail.""" for bad_mapping_id in [12345, True]: request_to_validate = {'mapping_id': bad_mapping_id} self.assertRaises( exception.SchemaValidationError, self.update_protocol_validator.validate, request_to_validate, ) class OAuth1ValidationTestCase(unit.BaseTestCase): """Test for V3 Identity OAuth1 API validation.""" def setUp(self): super().setUp() create = oauth1_schema.consumer_create update = oauth1_schema.consumer_update authorize = oauth1_schema.request_token_authorize self.create_consumer_validator = validators.SchemaValidator(create) self.update_consumer_validator = validators.SchemaValidator(update) self.authorize_request_token_validator = validators.SchemaValidator( authorize ) def test_validate_consumer_request_succeeds(self): """Test that we validate a consumer request successfully.""" request_to_validate = { 'description': uuid.uuid4().hex, 'name': uuid.uuid4().hex, } self.create_consumer_validator.validate(request_to_validate) self.update_consumer_validator.validate(request_to_validate) def test_validate_consumer_request_with_no_parameters(self): """Test that schema validation with empty request body.""" request_to_validate = {} self.create_consumer_validator.validate(request_to_validate) # At least one property should be given. self.assertRaises( exception.SchemaValidationError, self.update_consumer_validator.validate, request_to_validate, ) def test_validate_consumer_request_with_invalid_description_fails(self): """Exception is raised when `description` as a non-string value.""" for invalid_desc in _INVALID_DESC_FORMATS: request_to_validate = {'description': invalid_desc} self.assertRaises( exception.SchemaValidationError, self.create_consumer_validator.validate, request_to_validate, ) self.assertRaises( exception.SchemaValidationError, self.update_consumer_validator.validate, request_to_validate, ) def test_validate_update_consumer_request_fails_with_secret(self): """Exception raised when secret is given.""" request_to_validate = {'secret': uuid.uuid4().hex} self.assertRaises( exception.SchemaValidationError, self.update_consumer_validator.validate, request_to_validate, ) def test_validate_consumer_request_with_none_desc(self): """Test that schema validation with None desc.""" request_to_validate = {'description': None} self.create_consumer_validator.validate(request_to_validate) self.update_consumer_validator.validate(request_to_validate) def test_validate_authorize_request_token(self): request_to_validate = [ {"id": "711aa6371a6343a9a43e8a310fbe4a6f"}, {"name": "test_role"}, ] self.authorize_request_token_validator.validate(request_to_validate) def test_validate_authorize_request_token_with_additional_properties(self): request_to_validate = [ { "id": "711aa6371a6343a9a43e8a310fbe4a6f", "fake_key": "fake_value", } ] self.assertRaises( exception.SchemaValidationError, self.authorize_request_token_validator.validate, request_to_validate, ) def test_validate_authorize_request_token_with_id_and_name(self): request_to_validate = [ {"id": "711aa6371a6343a9a43e8a310fbe4a6f", "name": "admin"} ] self.assertRaises( exception.SchemaValidationError, self.authorize_request_token_validator.validate, request_to_validate, ) def test_validate_authorize_request_token_with_non_id_or_name(self): request_to_validate = [{"fake_key": "fake_value"}] self.assertRaises( exception.SchemaValidationError, self.authorize_request_token_validator.validate, request_to_validate, ) class PasswordValidationTestCase(unit.TestCase): def setUp(self): super().setUp() # passwords requires: 1 letter, 1 digit, 7 chars self.config_fixture.config( group='security_compliance', password_regex=(r'^(?=.*\d)(?=.*[a-zA-Z]).{7,}$'), ) def test_password_validate_with_valid_strong_password(self): password = 'mypassword2' validators.validate_password(password) def test_password_validate_with_invalid_strong_password(self): # negative test: None password = None self.assertRaises( exception.PasswordValidationError, validators.validate_password, password, ) # negative test: numeric password = 1234 self.assertRaises( exception.PasswordValidationError, validators.validate_password, password, ) # negative test: boolean password = True self.assertRaises( exception.PasswordValidationError, validators.validate_password, password, ) def test_password_validate_with_invalid_password_regex(self): # invalid regular expression, missing beginning '[' self.config_fixture.config( group='security_compliance', password_regex=r'\S]+' ) password = 'mypassword2' self.assertRaises( exception.PasswordValidationError, validators.validate_password, password, ) # fix regular expression and validate self.config_fixture.config( group='security_compliance', password_regex=r'[\S]+' ) validators.validate_password(password) class LimitValidationTestCase(unit.BaseTestCase): """Test for V3 Limits API validation.""" def setUp(self): super().setUp() create_registered_limits = limit_schema.registered_limit_create update_registered_limits = limit_schema.registered_limit_update create_limits = limit_schema.limit_create update_limits = limit_schema.limit_update self.create_registered_limits_validator = validators.SchemaValidator( create_registered_limits ) self.update_registered_limits_validator = validators.SchemaValidator( update_registered_limits ) self.create_limits_validator = validators.SchemaValidator( create_limits ) self.update_limits_validator = validators.SchemaValidator( update_limits ) def test_validate_registered_limit_create_request_succeeds(self): request_to_validate = [ { 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'default_limit': 10, 'description': 'test description', } ] self.create_registered_limits_validator.validate(request_to_validate) def test_validate_registered_limit_create_request_without_optional(self): request_to_validate = [ { 'service_id': uuid.uuid4().hex, 'resource_name': 'volume', 'default_limit': 10, } ] self.create_registered_limits_validator.validate(request_to_validate) def test_validate_registered_limit_update_request_without_region(self): request_to_validate = { 'service_id': uuid.uuid4().hex, 'resource_name': 'volume', 'default_limit': 10, } self.update_registered_limits_validator.validate(request_to_validate) def test_validate_registered_limit_request_with_no_parameters(self): request_to_validate = [] # At least one property should be given. self.assertRaises( exception.SchemaValidationError, self.create_registered_limits_validator.validate, request_to_validate, ) def test_validate_registered_limit_create_request_with_invalid_input(self): _INVALID_FORMATS = [ {'service_id': 'fake_id'}, {'region_id': 123}, {'resource_name': 123}, {'resource_name': ''}, {'resource_name': 'a' * 256}, {'default_limit': 'not_int'}, {'default_limit': -10}, {'default_limit': 10000000000000000}, {'description': 123}, {'description': True}, ] for invalid_desc in _INVALID_FORMATS: request_to_validate = [ { 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'default_limit': 10, 'description': 'test description', } ] request_to_validate[0].update(invalid_desc) self.assertRaises( exception.SchemaValidationError, self.create_registered_limits_validator.validate, request_to_validate, ) def test_validate_registered_limit_update_request_with_invalid_input(self): _INVALID_FORMATS = [ {'service_id': 'fake_id'}, {'region_id': 123}, {'resource_name': 123}, {'resource_name': ''}, {'resource_name': 'a' * 256}, {'default_limit': 'not_int'}, {'default_limit': -10}, {'default_limit': 10000000000000000}, {'description': 123}, ] for invalid_desc in _INVALID_FORMATS: request_to_validate = { 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'default_limit': 10, 'description': 'test description', } request_to_validate.update(invalid_desc) self.assertRaises( exception.SchemaValidationError, self.update_registered_limits_validator.validate, request_to_validate, ) def test_validate_registered_limit_create_request_with_addition(self): request_to_validate = [ { 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'default_limit': 10, 'more_key': 'more_value', } ] self.assertRaises( exception.SchemaValidationError, self.create_registered_limits_validator.validate, request_to_validate, ) def test_validate_registered_limit_update_request_with_addition(self): request_to_validate = { 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'default_limit': 10, 'more_key': 'more_value', } self.assertRaises( exception.SchemaValidationError, self.update_registered_limits_validator.validate, request_to_validate, ) def test_validate_registered_limit_create_request_without_required(self): for key in ['service_id', 'resource_name', 'default_limit']: request_to_validate = [ { 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'default_limit': 10, } ] request_to_validate[0].pop(key) self.assertRaises( exception.SchemaValidationError, self.create_registered_limits_validator.validate, request_to_validate, ) def test_validate_project_limit_create_request_succeeds(self): request_to_validate = [ { 'project_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, 'description': 'test description', } ] self.create_limits_validator.validate(request_to_validate) def test_validate_domain_limit_create_request_succeeds(self): request_to_validate = [ { 'domain_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, 'description': 'test description', } ] self.create_limits_validator.validate(request_to_validate) def test_validate_limit_create_request_without_optional(self): request_to_validate = [ { 'project_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'resource_name': 'volume', 'resource_limit': 10, } ] self.create_limits_validator.validate(request_to_validate) def test_validate_limit_update_request_succeeds(self): request_to_validate = { 'resource_limit': 10, 'description': 'test description', } self.update_limits_validator.validate(request_to_validate) def test_validate_limit_update_request_without_optional(self): request_to_validate = {'resource_limit': 10} self.update_limits_validator.validate(request_to_validate) def test_validate_limit_request_with_no_parameters(self): request_to_validate = [] # At least one property should be given. self.assertRaises( exception.SchemaValidationError, self.create_limits_validator.validate, request_to_validate, ) def test_validate_limit_create_request_with_invalid_input(self): _INVALID_FORMATS = [ {'project_id': 'fake_id'}, {'service_id': 'fake_id'}, {'region_id': 123}, {'resource_name': 123}, {'resource_name': ''}, {'resource_name': 'a' * 256}, {'resource_limit': -10}, {'resource_limit': 10000000000000000}, {'resource_limit': 'not_int'}, {'description': 123}, ] for invalid_attribute in _INVALID_FORMATS: request_to_validate = [ { 'project_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, 'description': 'test description', } ] request_to_validate[0].update(invalid_attribute) self.assertRaises( exception.SchemaValidationError, self.create_limits_validator.validate, request_to_validate, ) def test_validate_limit_create_request_with_invalid_domain(self): request_to_validate = [ { 'domain_id': 'fake_id', 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, 'description': 'test description', } ] self.assertRaises( exception.SchemaValidationError, self.create_limits_validator.validate, request_to_validate, ) def test_validate_limit_update_request_with_invalid_input(self): _INVALID_FORMATS = [ {'resource_name': 123}, {'resource_limit': 'not_int'}, {'resource_name': ''}, {'resource_name': 'a' * 256}, {'resource_limit': -10}, {'resource_limit': 10000000000000000}, {'description': 123}, ] for invalid_desc in _INVALID_FORMATS: request_to_validate = [ { 'project_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, 'description': 'test description', } ] request_to_validate[0].update(invalid_desc) self.assertRaises( exception.SchemaValidationError, self.update_limits_validator.validate, request_to_validate, ) def test_validate_limit_create_request_with_addition_input_fails(self): request_to_validate = [ { 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, 'more_key': 'more_value', } ] self.assertRaises( exception.SchemaValidationError, self.create_limits_validator.validate, request_to_validate, ) def test_validate_limit_update_request_with_addition_input_fails(self): request_to_validate = { 'id': uuid.uuid4().hex, 'resource_limit': 10, 'more_key': 'more_value', } self.assertRaises( exception.SchemaValidationError, self.update_limits_validator.validate, request_to_validate, ) def test_validate_project_limit_create_request_without_required_fails( self, ): for key in [ 'project_id', 'service_id', 'resource_name', 'resource_limit', ]: request_to_validate = [ { 'project_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, } ] request_to_validate[0].pop(key) self.assertRaises( exception.SchemaValidationError, self.create_limits_validator.validate, request_to_validate, ) def test_validate_domain_limit_create_request_without_required_fails(self): for key in [ 'domain_id', 'service_id', 'resource_name', 'resource_limit', ]: request_to_validate = [ { 'domain_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, } ] request_to_validate[0].pop(key) self.assertRaises( exception.SchemaValidationError, self.create_limits_validator.validate, request_to_validate, ) def test_validate_limit_create_request_with_both_project_and_domain(self): request_to_validate = [ { 'project_id': uuid.uuid4().hex, 'domain_id': uuid.uuid4().hex, 'service_id': uuid.uuid4().hex, 'region_id': 'RegionOne', 'resource_name': 'volume', 'resource_limit': 10, 'description': 'test description', } ] self.assertRaises( exception.SchemaValidationError, self.create_limits_validator.validate, request_to_validate, ) class ApplicationCredentialValidatorTestCase(unit.TestCase): _valid_roles = [ {'name': 'member'}, {'id': uuid.uuid4().hex}, {'id': str(uuid.uuid4())}, {'name': '_member_'}, ] _invalid_roles = [True, 123, None, {'badkey': 'badval'}] def setUp(self): super().setUp() create = app_cred_schema.application_credential_create self.create_app_cred_validator = validators.SchemaValidator(create) def test_validate_app_cred_request(self): request_to_validate = { 'name': 'myappcred', 'description': 'My App Cred', 'roles': [{'name': 'member'}], 'expires_at': 'tomorrow', } self.create_app_cred_validator.validate(request_to_validate) def test_validate_app_cred_request_without_name_fails(self): request_to_validate = { 'description': 'My App Cred', 'roles': [{'name': 'member'}], 'expires_at': 'tomorrow', } self.assertRaises( exception.SchemaValidationError, self.create_app_cred_validator.validate, request_to_validate, ) def test_validate_app_cred_with_invalid_expires_at_fails(self): request_to_validate = { 'name': 'myappcred', 'description': 'My App Cred', 'roles': [{'name': 'member'}], 'expires_at': 3, } self.assertRaises( exception.SchemaValidationError, self.create_app_cred_validator.validate, request_to_validate, ) def test_validate_app_cred_with_null_expires_at_succeeds(self): request_to_validate = { 'name': 'myappcred', 'description': 'My App Cred', 'roles': [{'name': 'member'}], } self.create_app_cred_validator.validate(request_to_validate) def test_validate_app_cred_with_unrestricted_flag_succeeds(self): request_to_validate = { 'name': 'myappcred', 'description': 'My App Cred', 'roles': [{'name': 'member'}], 'unrestricted': True, } self.create_app_cred_validator.validate(request_to_validate) def test_validate_app_cred_with_secret_succeeds(self): request_to_validate = { 'name': 'myappcred', 'description': 'My App Cred', 'roles': [{'name': 'member'}], 'secret': 'secretsecretsecretsecret', } self.create_app_cred_validator.validate(request_to_validate) def test_validate_app_cred_invalid_roles_fails(self): for role in self._invalid_roles: request_to_validate = { 'name': 'myappcred', 'description': 'My App Cred', 'roles': [role], } self.assertRaises( exception.SchemaValidationError, self.create_app_cred_validator.validate, request_to_validate, ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/test_versions.py0000664000175000017500000012055700000000000022740 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import functools import http.client import random from oslo_serialization import jsonutils from testtools import matchers as tt_matchers import webob from keystone.api import discovery from keystone.common import json_home from keystone.tests import unit v3_MEDIA_TYPES = [ { "base": "application/json", "type": "application/vnd.openstack.identity-v3+json", } ] v3_EXPECTED_RESPONSE = { "id": "v3.14", "status": "stable", "updated": "2020-04-07T00:00:00Z", "links": [ { "rel": "self", "href": "", # Will get filled in after initialization } ], "media-types": v3_MEDIA_TYPES, } v3_VERSION_RESPONSE = {"version": v3_EXPECTED_RESPONSE} VERSIONS_RESPONSE = { "versions": { "values": [ v3_EXPECTED_RESPONSE, ] } } _build_ec2tokens_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EC2', extension_version='1.0', ) REVOCATIONS_RELATION = json_home.build_v3_extension_resource_relation( 'OS-PKI', '1.0', 'revocations' ) _build_simple_cert_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-SIMPLE-CERT', extension_version='1.0', ) _build_trust_relation = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-TRUST', extension_version='1.0', ) _build_federation_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-FEDERATION', extension_version='1.0', ) _build_oauth1_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-OAUTH1', extension_version='1.0', ) _build_ep_policy_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-ENDPOINT-POLICY', extension_version='1.0', ) _build_ep_filter_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-EP-FILTER', extension_version='1.0', ) _build_os_inherit_rel = functools.partial( json_home.build_v3_extension_resource_relation, extension_name='OS-INHERIT', extension_version='1.0', ) TRUST_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-TRUST', '1.0', 'trust_id' ) IDP_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'idp_id' ) PROTOCOL_ID_PARAM_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'protocol_id' ) MAPPING_ID_PARAM_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'mapping_id' ) SP_ID_PARAMETER_RELATION = json_home.build_v3_extension_parameter_relation( 'OS-FEDERATION', '1.0', 'sp_id' ) CONSUMER_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-OAUTH1', '1.0', 'consumer_id' ) ) REQUEST_TOKEN_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-OAUTH1', '1.0', 'request_token_id' ) ) ACCESS_TOKEN_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-OAUTH1', '1.0', 'access_token_id' ) ) ENDPOINT_GROUP_ID_PARAMETER_RELATION = ( json_home.build_v3_extension_parameter_relation( 'OS-EP-FILTER', '1.0', 'endpoint_group_id' ) ) BASE_IDP_PROTOCOL = '/OS-FEDERATION/identity_providers/{idp_id}/protocols' BASE_EP_POLICY = '/policies/{policy_id}/OS-ENDPOINT-POLICY' BASE_EP_FILTER_PREFIX = '/OS-EP-FILTER' BASE_EP_FILTER = BASE_EP_FILTER_PREFIX + '/endpoint_groups/{endpoint_group_id}' BASE_ACCESS_TOKEN = ( '/users/{user_id}/OS-OAUTH1/access_tokens/{access_token_id}' ) FEDERATED_AUTH_URL = ( '/OS-FEDERATION/identity_providers/{idp_id}' '/protocols/{protocol_id}/auth' ) FEDERATED_IDP_SPECIFIC_WEBSSO = ( '/auth/OS-FEDERATION/identity_providers/' '{idp_id}/protocols/{protocol_id}/websso' ) APPLICATION_CREDENTIAL = ( '/users/{user_id}/application_credentials/{application_credential_id}' ) APPLICATION_CREDENTIALS = '/users/{user_id}/application_credentials' APPLICATION_CREDENTIAL_RELATION = json_home.build_v3_parameter_relation( 'application_credential_id' ) ACCESS_RULE = '/users/{user_id}/access_rules/{access_rule_id}' ACCESS_RULES = '/users/{user_id}/access_rules' ACCESS_RULE_RELATION = json_home.build_v3_parameter_relation('access_rule_id') V3_JSON_HOME_RESOURCES = { json_home.build_v3_resource_relation('auth_tokens'): { 'href': '/auth/tokens' }, json_home.build_v3_resource_relation('auth_catalog'): { 'href': '/auth/catalog' }, json_home.build_v3_resource_relation('auth_projects'): { 'href': '/auth/projects' }, json_home.build_v3_resource_relation('auth_domains'): { 'href': '/auth/domains' }, json_home.build_v3_resource_relation('auth_system'): { 'href': '/auth/system' }, json_home.build_v3_resource_relation('credential'): { 'href-template': '/credentials/{credential_id}', 'href-vars': { 'credential_id': json_home.build_v3_parameter_relation( 'credential_id' ) }, }, json_home.build_v3_resource_relation('credentials'): { 'href': '/credentials' }, json_home.build_v3_resource_relation('system_user_role'): { 'href-template': '/system/users/{user_id}/roles/{role_id}', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, }, }, json_home.build_v3_resource_relation('system_user_roles'): { 'href-template': '/system/users/{user_id}/roles', 'href-vars': {'user_id': json_home.Parameters.USER_ID}, }, json_home.build_v3_resource_relation('system_group_role'): { 'href-template': '/system/groups/{group_id}/roles/{role_id}', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, }, json_home.build_v3_resource_relation('system_group_roles'): { 'href-template': '/system/groups/{group_id}/roles', 'href-vars': {'group_id': json_home.Parameters.GROUP_ID}, }, json_home.build_v3_resource_relation('domain'): { 'href-template': '/domains/{domain_id}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, }, }, json_home.build_v3_resource_relation('domain_group_role'): { 'href-template': '/domains/{domain_id}/groups/{group_id}/roles/{role_id}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, }, json_home.build_v3_resource_relation('domain_group_roles'): { 'href-template': '/domains/{domain_id}/groups/{group_id}/roles', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, }, }, json_home.build_v3_resource_relation('domain_user_role'): { 'href-template': '/domains/{domain_id}/users/{user_id}/roles/{role_id}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('domain_user_roles'): { 'href-template': '/domains/{domain_id}/users/{user_id}/roles', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('domains'): {'href': '/domains'}, json_home.build_v3_resource_relation('endpoint'): { 'href-template': '/endpoints/{endpoint_id}', 'href-vars': { 'endpoint_id': json_home.build_v3_parameter_relation( 'endpoint_id' ), }, }, json_home.build_v3_resource_relation('endpoints'): {'href': '/endpoints'}, _build_ec2tokens_relation(resource_name='ec2tokens'): { 'href': '/ec2tokens' }, _build_ec2tokens_relation(resource_name='user_credential'): { 'href-template': '/users/{user_id}/credentials/OS-EC2/{credential_id}', 'href-vars': { 'credential_id': json_home.build_v3_parameter_relation( 'credential_id' ), 'user_id': json_home.Parameters.USER_ID, }, }, _build_ec2tokens_relation(resource_name='user_credentials'): { 'href-template': '/users/{user_id}/credentials/OS-EC2', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }, }, REVOCATIONS_RELATION: {'href': '/auth/tokens/OS-PKI/revoked'}, 'https://docs.openstack.org/api/openstack-identity/3/ext/OS-REVOKE/1.0/rel' '/events': {'href': '/OS-REVOKE/events'}, _build_simple_cert_relation(resource_name='ca_certificate'): { 'href': '/OS-SIMPLE-CERT/ca' }, _build_simple_cert_relation(resource_name='certificates'): { 'href': '/OS-SIMPLE-CERT/certificates' }, _build_trust_relation(resource_name='trust'): { 'href-template': '/OS-TRUST/trusts/{trust_id}', 'href-vars': { 'trust_id': TRUST_ID_PARAMETER_RELATION, }, }, _build_trust_relation(resource_name='trust_role'): { 'href-template': '/OS-TRUST/trusts/{trust_id}/roles/{role_id}', 'href-vars': { 'role_id': json_home.Parameters.ROLE_ID, 'trust_id': TRUST_ID_PARAMETER_RELATION, }, }, _build_trust_relation(resource_name='trust_roles'): { 'href-template': '/OS-TRUST/trusts/{trust_id}/roles', 'href-vars': { 'trust_id': TRUST_ID_PARAMETER_RELATION, }, }, _build_trust_relation(resource_name='trusts'): { 'href': '/OS-TRUST/trusts' }, 'https://docs.openstack.org/api/openstack-identity/3/ext/s3tokens/1.0/rel/' 's3tokens': {'href': '/s3tokens'}, json_home.build_v3_resource_relation('group'): { 'href-template': '/groups/{group_id}', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, }, }, json_home.build_v3_resource_relation('group_user'): { 'href-template': '/groups/{group_id}/users/{user_id}', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('group_users'): { 'href-template': '/groups/{group_id}/users', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, }, }, json_home.build_v3_resource_relation('groups'): {'href': '/groups'}, json_home.build_v3_resource_relation('policies'): {'href': '/policies'}, json_home.build_v3_resource_relation('policy'): { 'href-template': '/policies/{policy_id}', 'href-vars': { 'policy_id': json_home.build_v3_parameter_relation('policy_id'), }, }, json_home.build_v3_resource_relation('project'): { 'href-template': '/projects/{project_id}', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, }, }, json_home.build_v3_resource_relation('project_group_role'): { 'href-template': '/projects/{project_id}/groups/{group_id}/roles/{role_id}', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, }, }, json_home.build_v3_resource_relation('project_group_roles'): { 'href-template': '/projects/{project_id}/groups/{group_id}/roles', 'href-vars': { 'group_id': json_home.Parameters.GROUP_ID, 'project_id': json_home.Parameters.PROJECT_ID, }, }, json_home.build_v3_resource_relation('project_tags'): { 'href-template': '/projects/{project_id}/tags/{value}', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'value': json_home.Parameters.TAG_VALUE, }, }, json_home.build_v3_resource_relation('project_user_role'): { 'href-template': '/projects/{project_id}/users/{user_id}/roles/{role_id}', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('project_user_roles'): { 'href-template': '/projects/{project_id}/users/{user_id}/roles', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('projects'): {'href': '/projects'}, json_home.build_v3_resource_relation('region'): { 'href-template': '/regions/{region_id}', 'href-vars': { 'region_id': json_home.build_v3_parameter_relation('region_id'), }, }, json_home.build_v3_resource_relation('regions'): {'href': '/regions'}, json_home.build_v3_resource_relation('role'): { 'href-template': '/roles/{role_id}', 'href-vars': { 'role_id': json_home.Parameters.ROLE_ID, }, }, json_home.build_v3_resource_relation('implied_roles'): { 'href-template': '/roles/{prior_role_id}/implies', 'href-vars': {'prior_role_id': json_home.Parameters.ROLE_ID}, }, json_home.build_v3_resource_relation('implied_role'): { 'href-template': '/roles/{prior_role_id}/implies/{implied_role_id}', 'href-vars': { 'prior_role_id': json_home.Parameters.ROLE_ID, 'implied_role_id': json_home.Parameters.ROLE_ID, }, }, json_home.build_v3_resource_relation('role_inferences'): { 'href': '/role_inferences', }, json_home.build_v3_resource_relation('role_assignments'): { 'href': '/role_assignments' }, json_home.build_v3_resource_relation('roles'): {'href': '/roles'}, json_home.build_v3_resource_relation('service'): { 'href-template': '/services/{service_id}', 'href-vars': { 'service_id': json_home.build_v3_parameter_relation('service_id') }, }, json_home.build_v3_resource_relation('services'): {'href': '/services'}, json_home.build_v3_resource_relation('user'): { 'href-template': '/users/{user_id}', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('user_change_password'): { 'href-template': '/users/{user_id}/password', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('user_groups'): { 'href-template': '/users/{user_id}/groups', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('user_projects'): { 'href-template': '/users/{user_id}/projects', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }, }, json_home.build_v3_resource_relation('users'): {'href': '/users'}, json_home.build_v3_extension_resource_relation( 'OS-OAUTH2', '1.0', 'token' ): {'href': '/OS-OAUTH2/token'}, _build_federation_rel(resource_name='domains'): {'href': '/auth/domains'}, _build_federation_rel(resource_name='websso'): { 'href-template': '/auth/OS-FEDERATION/websso/{protocol_id}', 'href-vars': { 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }, }, _build_federation_rel(resource_name='projects'): { 'href': '/auth/projects' }, _build_federation_rel(resource_name='saml2'): { 'href': '/auth/OS-FEDERATION/saml2' }, _build_federation_rel(resource_name='ecp'): { 'href': '/auth/OS-FEDERATION/saml2/ecp' }, _build_federation_rel(resource_name='metadata'): { 'href': '/OS-FEDERATION/saml2/metadata' }, _build_federation_rel(resource_name='identity_providers'): { 'href': '/OS-FEDERATION/identity_providers' }, _build_federation_rel(resource_name='service_providers'): { 'href': '/OS-FEDERATION/service_providers' }, _build_federation_rel(resource_name='mappings'): { 'href': '/OS-FEDERATION/mappings' }, _build_federation_rel(resource_name='identity_provider'): { 'href-template': '/OS-FEDERATION/identity_providers/{idp_id}', 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION, }, }, _build_federation_rel(resource_name='identity_providers_websso'): { 'href-template': FEDERATED_IDP_SPECIFIC_WEBSSO, 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }, }, _build_federation_rel(resource_name='service_provider'): { 'href-template': '/OS-FEDERATION/service_providers/{sp_id}', 'href-vars': { 'sp_id': SP_ID_PARAMETER_RELATION, }, }, _build_federation_rel(resource_name='mapping'): { 'href-template': '/OS-FEDERATION/mappings/{mapping_id}', 'href-vars': { 'mapping_id': MAPPING_ID_PARAM_RELATION, }, }, _build_federation_rel(resource_name='identity_provider_protocol'): { 'href-template': BASE_IDP_PROTOCOL + '/{protocol_id}', 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }, }, _build_federation_rel(resource_name='identity_provider_protocols'): { 'href-template': BASE_IDP_PROTOCOL, 'href-vars': {'idp_id': IDP_ID_PARAMETER_RELATION}, }, _build_federation_rel(resource_name='identity_provider_protocol_auth'): { 'href-template': FEDERATED_AUTH_URL, 'href-vars': { 'idp_id': IDP_ID_PARAMETER_RELATION, 'protocol_id': PROTOCOL_ID_PARAM_RELATION, }, }, _build_oauth1_rel(resource_name='access_tokens'): { 'href': '/OS-OAUTH1/access_token' }, _build_oauth1_rel(resource_name='request_tokens'): { 'href': '/OS-OAUTH1/request_token' }, _build_oauth1_rel(resource_name='consumers'): { 'href': '/OS-OAUTH1/consumers' }, _build_oauth1_rel(resource_name='authorize_request_token'): { 'href-template': '/OS-OAUTH1/authorize/{request_token_id}', 'href-vars': { 'request_token_id': REQUEST_TOKEN_ID_PARAMETER_RELATION, }, }, _build_oauth1_rel(resource_name='consumer'): { 'href-template': '/OS-OAUTH1/consumers/{consumer_id}', 'href-vars': { 'consumer_id': CONSUMER_ID_PARAMETER_RELATION, }, }, _build_oauth1_rel(resource_name='user_access_token'): { 'href-template': BASE_ACCESS_TOKEN, 'href-vars': { 'user_id': json_home.Parameters.USER_ID, 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, }, }, _build_oauth1_rel(resource_name='user_access_tokens'): { 'href-template': '/users/{user_id}/OS-OAUTH1/access_tokens', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, }, }, _build_oauth1_rel(resource_name='user_access_token_role'): { 'href-template': BASE_ACCESS_TOKEN + '/roles/{role_id}', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, 'role_id': json_home.Parameters.ROLE_ID, 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, }, }, _build_oauth1_rel(resource_name='user_access_token_roles'): { 'href-template': BASE_ACCESS_TOKEN + '/roles', 'href-vars': { 'user_id': json_home.Parameters.USER_ID, 'access_token_id': ACCESS_TOKEN_ID_PARAMETER_RELATION, }, }, _build_ep_policy_rel(resource_name='endpoint_policy'): { 'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/policy', 'href-vars': { 'endpoint_id': json_home.Parameters.ENDPOINT_ID, }, }, _build_ep_policy_rel(resource_name='endpoint_policy_association'): { 'href-template': BASE_EP_POLICY + '/endpoints/{endpoint_id}', 'href-vars': { 'endpoint_id': json_home.Parameters.ENDPOINT_ID, 'policy_id': json_home.Parameters.POLICY_ID, }, }, _build_ep_policy_rel(resource_name='policy_endpoints'): { 'href-template': BASE_EP_POLICY + '/endpoints', 'href-vars': { 'policy_id': json_home.Parameters.POLICY_ID, }, }, _build_ep_policy_rel( resource_name='region_and_service_policy_association' ): { 'href-template': ( BASE_EP_POLICY + '/services/{service_id}/regions/{region_id}' ), 'href-vars': { 'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, 'region_id': json_home.Parameters.REGION_ID, }, }, _build_ep_policy_rel(resource_name='service_policy_association'): { 'href-template': BASE_EP_POLICY + '/services/{service_id}', 'href-vars': { 'policy_id': json_home.Parameters.POLICY_ID, 'service_id': json_home.Parameters.SERVICE_ID, }, }, _build_ep_filter_rel(resource_name='endpoint_group'): { 'href-template': '/OS-EP-FILTER/endpoint_groups/{endpoint_group_id}', 'href-vars': { 'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, }, }, _build_ep_filter_rel( resource_name='endpoint_group_to_project_association' ): { 'href-template': BASE_EP_FILTER + '/projects/{project_id}', 'href-vars': { 'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, 'project_id': json_home.Parameters.PROJECT_ID, }, }, _build_ep_filter_rel(resource_name='endpoint_groups'): { 'href': '/OS-EP-FILTER/endpoint_groups' }, _build_ep_filter_rel(resource_name='endpoint_projects'): { 'href-template': '/OS-EP-FILTER/endpoints/{endpoint_id}/projects', 'href-vars': { 'endpoint_id': json_home.Parameters.ENDPOINT_ID, }, }, _build_ep_filter_rel(resource_name='endpoints_in_endpoint_group'): { 'href-template': BASE_EP_FILTER + '/endpoints', 'href-vars': { 'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, }, }, _build_ep_filter_rel(resource_name='project_endpoint_groups'): { 'href-template': ( BASE_EP_FILTER_PREFIX + '/projects/{project_id}' + '/endpoint_groups' ), 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, }, }, _build_ep_filter_rel(resource_name='project_endpoint'): { 'href-template': ( '/OS-EP-FILTER/projects/{project_id}/endpoints/{endpoint_id}' ), 'href-vars': { 'endpoint_id': json_home.Parameters.ENDPOINT_ID, 'project_id': json_home.Parameters.PROJECT_ID, }, }, _build_ep_filter_rel(resource_name='project_endpoints'): { 'href-template': '/OS-EP-FILTER/projects/{project_id}/endpoints', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, }, }, _build_ep_filter_rel( resource_name='projects_associated_with_endpoint_group' ): { 'href-template': BASE_EP_FILTER + '/projects', 'href-vars': { 'endpoint_group_id': ENDPOINT_GROUP_ID_PARAMETER_RELATION, }, }, _build_os_inherit_rel( resource_name='domain_user_role_inherited_to_projects' ): { 'href-template': '/OS-INHERIT/domains/{domain_id}/users/' '{user_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }, }, _build_os_inherit_rel( resource_name='domain_group_role_inherited_to_projects' ): { 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/' '{group_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, }, _build_os_inherit_rel( resource_name='domain_user_roles_inherited_to_projects' ): { 'href-template': '/OS-INHERIT/domains/{domain_id}/users/' '{user_id}/roles/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'user_id': json_home.Parameters.USER_ID, }, }, _build_os_inherit_rel( resource_name='domain_group_roles_inherited_to_projects' ): { 'href-template': '/OS-INHERIT/domains/{domain_id}/groups/' '{group_id}/roles/inherited_to_projects', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group_id': json_home.Parameters.GROUP_ID, }, }, _build_os_inherit_rel( resource_name='project_user_role_inherited_to_projects' ): { 'href-template': '/OS-INHERIT/projects/{project_id}/users/' '{user_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'role_id': json_home.Parameters.ROLE_ID, 'user_id': json_home.Parameters.USER_ID, }, }, _build_os_inherit_rel( resource_name='project_group_role_inherited_to_projects' ): { 'href-template': '/OS-INHERIT/projects/{project_id}/groups/' '{group_id}/roles/{role_id}/inherited_to_projects', 'href-vars': { 'project_id': json_home.Parameters.PROJECT_ID, 'group_id': json_home.Parameters.GROUP_ID, 'role_id': json_home.Parameters.ROLE_ID, }, }, json_home.build_v3_resource_relation('domain_config'): { 'href-template': '/domains/{domain_id}/config', 'href-vars': {'domain_id': json_home.Parameters.DOMAIN_ID}, }, json_home.build_v3_resource_relation('domain_config_group'): { 'href-template': '/domains/{domain_id}/config/{group}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': json_home.build_v3_parameter_relation('config_group'), }, }, json_home.build_v3_resource_relation('domain_config_option'): { 'href-template': '/domains/{domain_id}/config/{group}/{option}', 'href-vars': { 'domain_id': json_home.Parameters.DOMAIN_ID, 'group': json_home.build_v3_parameter_relation('config_group'), 'option': json_home.build_v3_parameter_relation('config_option'), }, }, json_home.build_v3_resource_relation('domain_config_default'): { 'href': '/domains/config/default' }, json_home.build_v3_resource_relation('domain_config_default_group'): { 'href-template': '/domains/config/{group}/default', 'href-vars': { 'group': json_home.build_v3_parameter_relation('config_group') }, }, json_home.build_v3_resource_relation('domain_config_default_option'): { 'href-template': '/domains/config/{group}/{option}/default', 'href-vars': { 'group': json_home.build_v3_parameter_relation('config_group'), 'option': json_home.build_v3_parameter_relation('config_option'), }, }, json_home.build_v3_resource_relation('registered_limits'): { 'hints': {'status': 'experimental'}, 'href': '/registered_limits', }, json_home.build_v3_resource_relation('registered_limit'): { 'href-template': '/registered_limits/{registered_limit_id}', 'href-vars': { 'registered_limit_id': json_home.build_v3_parameter_relation( 'registered_limit_id' ) }, 'hints': {'status': 'experimental'}, }, json_home.build_v3_resource_relation('limits'): { 'hints': {'status': 'experimental'}, 'href': '/limits', }, json_home.build_v3_resource_relation('limit'): { 'href-template': '/limits/{limit_id}', 'href-vars': { 'limit_id': json_home.build_v3_parameter_relation('limit_id') }, 'hints': {'status': 'experimental'}, }, json_home.build_v3_resource_relation('limit_model'): { 'href': '/limits/model', 'hints': {'status': 'experimental'}, }, json_home.build_v3_resource_relation('application_credentials'): { 'href-template': APPLICATION_CREDENTIALS, 'href-vars': { 'user_id': json_home.build_v3_parameter_relation('user_id') }, }, json_home.build_v3_resource_relation('application_credential'): { 'href-template': APPLICATION_CREDENTIAL, 'href-vars': { 'application_credential_id': APPLICATION_CREDENTIAL_RELATION, 'user_id': json_home.build_v3_parameter_relation('user_id'), }, }, json_home.build_v3_resource_relation('access_rules'): { 'href-template': ACCESS_RULES, 'href-vars': { 'user_id': json_home.build_v3_parameter_relation('user_id') }, }, json_home.build_v3_resource_relation('access_rule'): { 'href-template': ACCESS_RULE, 'href-vars': { 'access_rule_id': ACCESS_RULE_RELATION, 'user_id': json_home.build_v3_parameter_relation('user_id'), }, }, } class TestClient: def __init__(self, app=None, token=None): self.app = app self.token = token def request(self, method, path, headers=None, body=None): if headers is None: headers = {} if self.token: headers.setdefault('X-Auth-Token', self.token) req = webob.Request.blank(path) req.method = method for k, v in headers.items(): req.headers[k] = v if body: req.body = body return req.get_response(self.app) def get(self, path, headers=None): return self.request('GET', path=path, headers=headers) def post(self, path, headers=None, body=None): return self.request('POST', path=path, headers=headers, body=body) def put(self, path, headers=None, body=None): return self.request('PUT', path=path, headers=headers, body=body) class _VersionsEqual(tt_matchers.MatchesListwise): def __init__(self, expected): super().__init__( [ tt_matchers.KeysEqual(expected), tt_matchers.KeysEqual(expected['versions']), tt_matchers.HasLength(len(expected['versions']['values'])), tt_matchers.ContainsAll(expected['versions']['values']), ] ) def match(self, other): return super().match( [ other, other['versions'], other['versions']['values'], other['versions']['values'], ] ) class VersionTestCase(unit.TestCase): def setUp(self): super().setUp() self.load_backends() self.public_app = self.loadapp('public') self.public_port = random.randint(40000, 60000) self.config_fixture.config( public_endpoint='http://localhost:%d' % self.public_port ) def config_overrides(self): super().config_overrides() def _paste_in_port(self, response, port): for link in response['links']: if link['rel'] == 'self': link['href'] = port def test_public_versions(self): client = TestClient(self.public_app) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = VERSIONS_RESPONSE for version in expected['versions']['values']: if version['id'].startswith('v3'): self._paste_in_port( version, 'http://localhost:%s/v3/' % self.public_port ) self.assertThat(data, _VersionsEqual(expected)) def test_use_site_url_if_endpoint_unset(self): self.config_fixture.config(public_endpoint=None) for app in (self.public_app,): client = TestClient(app) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = VERSIONS_RESPONSE for version in expected['versions']['values']: # localhost happens to be the site url for tests if version['id'].startswith('v3'): self._paste_in_port(version, 'http://localhost/v3/') self.assertThat(data, _VersionsEqual(expected)) def test_public_version_v3(self): client = TestClient(self.public_app) resp = client.get('/v3/') self.assertEqual(http.client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v3_VERSION_RESPONSE self._paste_in_port( expected['version'], 'http://localhost:%s/v3/' % self.public_port ) self.assertEqual(expected, data) def test_use_site_url_if_endpoint_unset_v3(self): self.config_fixture.config(public_endpoint=None) for app in (self.public_app,): client = TestClient(app) resp = client.get('/v3/') self.assertEqual(http.client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v3_VERSION_RESPONSE self._paste_in_port(expected['version'], 'http://localhost/v3/') self.assertEqual(expected, data) def test_v2_disabled(self): # NOTE(morgan): This test should be kept, v2.0 is removed and should # never return, this prevents regression[s]/v2.0 discovery doc # slipping back in. client = TestClient(self.public_app) # request to /v2.0 should fail resp = client.get('/v2.0/') # NOTE(morgan): getting a 418 here is indicative of a 404, but from # the flask app itself (not a handled 404 such as UserNotFound) self.assertEqual(418, resp.status_int) # request to /v3 should pass resp = client.get('/v3/') self.assertEqual(http.client.OK, resp.status_int) data = jsonutils.loads(resp.body) expected = v3_VERSION_RESPONSE self._paste_in_port( expected['version'], 'http://localhost:%s/v3/' % self.public_port ) self.assertEqual(expected, data) # only v3 information should be displayed by requests to / v3_only_response = {"versions": {"values": [v3_EXPECTED_RESPONSE]}} self._paste_in_port( v3_only_response['versions']['values'][0], 'http://localhost:%s/v3/' % self.public_port, ) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) self.assertEqual(v3_only_response, data) def _test_json_home(self, path, exp_json_home_data): client = TestClient(self.public_app) resp = client.get(path, headers={'Accept': 'application/json-home'}) self.assertThat(resp.status, tt_matchers.Equals('200 OK')) self.assertThat( resp.headers['Content-Type'], tt_matchers.Equals('application/json-home'), ) maxDiff = self.maxDiff self.maxDiff = None # NOTE(morgan): Changed from tt_matchers.Equals to make it easier to # determine issues. Reset maxDiff to the original value at the end # of the assert. self.assertDictEqual(exp_json_home_data, jsonutils.loads(resp.body)) self.maxDiff = maxDiff def test_json_home_v3(self): # If the request is /v3 and the Accept header is application/json-home # then the server responds with a JSON Home document. exp_json_home_data = {'resources': V3_JSON_HOME_RESOURCES} self._test_json_home('/v3', exp_json_home_data) def test_json_home_root(self): # If the request is / and the Accept header is application/json-home # then the server responds with a JSON Home document. exp_json_home_data = copy.deepcopy( {'resources': V3_JSON_HOME_RESOURCES} ) json_home.translate_urls(exp_json_home_data, '/v3') self._test_json_home('/', exp_json_home_data) def test_accept_type_handling(self): # Accept headers with multiple types and qvalues are handled. def make_request(accept_types=None): client = TestClient(self.public_app) headers = None if accept_types: headers = {'Accept': accept_types} resp = client.get('/v3', headers=headers) self.assertThat(resp.status, tt_matchers.Equals('200 OK')) return resp.headers['Content-Type'] JSON = discovery.MimeTypes.JSON JSON_HOME = discovery.MimeTypes.JSON_HOME JSON_MATCHER = tt_matchers.Equals(JSON) JSON_HOME_MATCHER = tt_matchers.Equals(JSON_HOME) # Default is JSON. self.assertThat(make_request(), JSON_MATCHER) # Can request JSON and get JSON. self.assertThat(make_request(JSON), JSON_MATCHER) # Can request JSONHome and get JSONHome. self.assertThat(make_request(JSON_HOME), JSON_HOME_MATCHER) # If request JSON, JSON Home get JSON. accept_types = f'{JSON}, {JSON_HOME}' self.assertThat(make_request(accept_types), JSON_MATCHER) # If request JSON Home, JSON get JSON. accept_types = f'{JSON_HOME}, {JSON}' self.assertThat(make_request(accept_types), JSON_MATCHER) # If request JSON Home, JSON;q=0.5 get JSON Home. accept_types = f'{JSON_HOME}, {JSON};q=0.5' self.assertThat(make_request(accept_types), JSON_HOME_MATCHER) # If request some unknown mime-type, get JSON. self.assertThat(make_request(self.getUniqueString()), JSON_MATCHER) class VersionSingleAppTestCase(unit.TestCase): """Test running with a single application loaded. These are important because when Keystone is running in Apache httpd there's only one application loaded for each instance. """ def setUp(self): super().setUp() self.load_backends() self.public_port = random.randint(40000, 60000) self.config_fixture.config( public_endpoint='http://localhost:%d' % self.public_port ) def config_overrides(self): super().config_overrides() def _paste_in_port(self, response, port): for link in response['links']: if link['rel'] == 'self': link['href'] = port def _test_version(self, app_name): app = self.loadapp(app_name) client = TestClient(app) resp = client.get('/') self.assertEqual(300, resp.status_int) data = jsonutils.loads(resp.body) expected = VERSIONS_RESPONSE url_with_port = 'http://localhost:%s/v3/' % self.public_port for version in expected['versions']['values']: # TODO(morgan): Eliminate the need to do the "paste-in-port" part # of the tests. Ultimately, this is very hacky and shows we are # not setting up the test case sanely. if version['id'].startswith('v3'): self._paste_in_port(version, url_with_port) # Explicitly check that a location header is set and it is pointing # to v3 (The preferred location for now)! self.assertIn('Location', resp.headers) self.assertEqual(url_with_port, resp.headers['Location']) self.assertThat(data, _VersionsEqual(expected)) def test_public(self): self._test_version('public') def test_admin(self): self._test_version('admin') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/tests/0000775000175000017500000000000000000000000020607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/tests/__init__.py0000664000175000017500000000000000000000000022706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/tests/test_core.py0000664000175000017500000000576300000000000023163 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys import warnings from oslo_log import log from sqlalchemy import exc from testtools import matchers from keystone.tests import unit LOG = log.getLogger(__name__) class BaseTestTestCase(unit.BaseTestCase): def test_unexpected_exit(self): # if a test calls sys.exit it raises rather than exiting. self.assertThat( lambda: sys.exit(), matchers.raises(unit.UnexpectedExit) ) class TestOverrideSkipping(unit.BaseTestCase): class TestParent(unit.BaseTestCase): def test_in_parent(self): pass class TestChild(TestParent): def test_in_parent(self): self.skip_test_overrides('some message') def test_not_in_parent(self): self.skip_test_overrides('some message') def test_skip_test_override_success(self): # NOTE(dstanek): let's run the test and see what happens test = self.TestChild('test_in_parent') result = test.run() # NOTE(dstanek): reach into testtools to ensure the test succeeded self.assertEqual([], result.decorated.errors) def test_skip_test_override_fails_for_missing_parent_test_case(self): # NOTE(dstanek): let's run the test and see what happens test = self.TestChild('test_not_in_parent') result = test.run() # NOTE(dstanek): reach into testtools to ensure the test failed # the way we expected observed_error = result.decorated.errors[0] observed_error_msg = observed_error[1] expected_error_msg = ( "'test_not_in_parent' is not a previously defined test method" ) self.assertIn(expected_error_msg, observed_error_msg) class TestTestCase(unit.TestCase): def test_bad_log(self): # If the arguments are invalid for the string in a log it raises an # exception during testing. self.assertThat( lambda: LOG.warning('String %(p1)s %(p2)s', {'p1': 'something'}), matchers.raises(KeyError), ) def test_sa_warning(self): self.assertThat( lambda: warnings.warn('test sa warning error', exc.SAWarning), matchers.raises(exc.SAWarning), ) def test_deprecation_warnings_are_raised_as_exceptions_in_tests(self): self.assertThat( lambda: warnings.warn('this is deprecated', DeprecationWarning), matchers.raises(DeprecationWarning), ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/tests/test_utils.py0000664000175000017500000000241500000000000023362 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from testtools import matchers from testtools import testcase from keystone.tests.unit import utils class TestWipDecorator(testcase.TestCase): def test_raises_SkipError_when_broken_test_fails(self): @utils.wip('waiting on bug #000000') def test(): raise Exception('i expected a failure - this is a WIP') e = self.assertRaises(unittest.SkipTest, test) self.assertThat(str(e), matchers.Contains('#000000')) def test_raises_AssertionError_when_test_passes(self): @utils.wip('waiting on bug #000000') def test(): pass # literally e = self.assertRaises(AssertionError, test) self.assertThat(str(e), matchers.Contains('#000000')) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/token/0000775000175000017500000000000000000000000020565 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/token/__init__.py0000664000175000017500000000000000000000000022664 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/token/test_fernet_provider.py0000664000175000017500000011107700000000000025402 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import hashlib import os from unittest import mock import uuid import fixtures from oslo_log import log from oslo_utils import timeutils from keystone import auth from keystone.common import fernet_utils from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.federation import constants as federation_constants from keystone.models import token_model from keystone.tests import unit from keystone.tests.unit import default_fixtures from keystone.tests.unit import ksfixtures from keystone.tests.unit.ksfixtures import database from keystone.token import provider from keystone.token.providers import fernet from keystone.token import token_formatters CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestFernetTokenProvider(unit.TestCase): def setUp(self): super().setUp() self.provider = fernet.Provider() def test_invalid_token_raises_token_not_found(self): token_id = uuid.uuid4().hex self.assertRaises( exception.TokenNotFound, self.provider.validate_token, token_id ) def test_log_warning_when_token_exceeds_max_token_size_default(self): self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO)) token = token_model.TokenModel() token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef' token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef' token.expires_at = utils.isotime( provider.default_expire_time(), subsecond=True ) token.methods = ['password'] token.audit_id = provider.random_urlsafe_str() token_id, issued_at = self.provider.generate_id_and_issued_at(token) expected_output = ( f'Fernet token created with length of {len(token_id)} characters, ' 'which exceeds 255 characters' ) self.assertIn(expected_output, self.logging.output) def test_log_warning_when_token_exceeds_max_token_size_override(self): self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO)) self.config_fixture.config(max_token_size=250) token = token_model.TokenModel() token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef' token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef' token.expires_at = utils.isotime( provider.default_expire_time(), subsecond=True ) token.methods = ['password'] token.audit_id = provider.random_urlsafe_str() token_id, issued_at = self.provider.generate_id_and_issued_at(token) expected_output = ( f'Fernet token created with length of {len(token_id)} characters, ' 'which exceeds 250 characters' ) self.assertIn(expected_output, self.logging.output) def test_no_warning_when_token_does_not_exceed_max_token_size(self): self.config_fixture.config(max_token_size=300) self.logging = self.useFixture(fixtures.FakeLogger(level=log.INFO)) token = token_model.TokenModel() token.user_id = '0123456789abcdef0123456789abcdef0123456789abcdef' token.project_id = '0123456789abcdef0123456789abcdef0123456789abcdef' token.expires_at = utils.isotime( provider.default_expire_time(), subsecond=True ) token.methods = ['password'] token.audit_id = provider.random_urlsafe_str() token_id, issued_at = self.provider.generate_id_and_issued_at(token) expected_output = ( f'Fernet token created with length of {len(token_id)} characters, ' 'which exceeds 255 characters' ) self.assertNotIn(expected_output, self.logging.output) class TestValidate(unit.TestCase): def setUp(self): super().setUp() self.useFixture(database.Database()) self.load_backends() PROVIDERS.resource_api.create_domain( default_fixtures.ROOT_DOMAIN['id'], default_fixtures.ROOT_DOMAIN ) def config_overrides(self): super().config_overrides() self.config_fixture.config(group='token', provider='fernet') def test_validate_v3_token_simple(self): # Check the fields in the token result when use validate_v3_token # with a simple token. domain_ref = unit.new_domain_ref() domain_ref = PROVIDERS.resource_api.create_domain( domain_ref['id'], domain_ref ) user_ref = unit.new_user_ref(domain_ref['id']) user_ref = PROVIDERS.identity_api.create_user(user_ref) method_names = ['password'] token = PROVIDERS.token_provider_api.issue_token( user_ref['id'], method_names ) token = PROVIDERS.token_provider_api.validate_token(token.id) self.assertIsInstance(token.audit_ids, list) self.assertIsInstance(token.expires_at, str) self.assertIsInstance(token.issued_at, str) self.assertEqual(method_names, token.methods) self.assertEqual(user_ref['id'], token.user_id) self.assertEqual(user_ref['name'], token.user['name']) self.assertDictEqual(domain_ref, token.user_domain) self.assertEqual( user_ref['password_expires_at'], token.user['password_expires_at'] ) def _test_validate_v3_token_federted_info(self, group_ids): # Check the user fields in the token result when use validate_v3_token # when the token has federated info. domain_ref = unit.new_domain_ref() domain_ref = PROVIDERS.resource_api.create_domain( domain_ref['id'], domain_ref ) user_ref = unit.new_user_ref(domain_ref['id']) user_ref = PROVIDERS.identity_api.create_user(user_ref) method_names = ['mapped'] idp_id = uuid.uuid4().hex idp_ref = { 'id': idp_id, 'description': uuid.uuid4().hex, 'enabled': True, } self.federation_api.create_idp(idp_id, idp_ref) protocol = uuid.uuid4().hex auth_context_params = { 'user_id': user_ref['id'], 'user_name': user_ref['name'], 'group_ids': group_ids, federation_constants.IDENTITY_PROVIDER: idp_id, federation_constants.PROTOCOL: protocol, } auth_context = auth.core.AuthContext(**auth_context_params) token = PROVIDERS.token_provider_api.issue_token( user_ref['id'], method_names, auth_context=auth_context ) token = PROVIDERS.token_provider_api.validate_token(token.id) self.assertEqual(user_ref['id'], token.user_id) self.assertEqual(user_ref['name'], token.user['name']) self.assertDictEqual(domain_ref, token.user_domain) exp_group_ids = [{'id': group_id} for group_id in group_ids] self.assertEqual(exp_group_ids, token.federated_groups) self.assertEqual(idp_id, token.identity_provider_id) self.assertEqual(protocol, token.protocol_id) def test_validate_v3_token_federated_info(self): # Check the user fields in the token result when use validate_v3_token # when the token has federated info. group_ids = [ uuid.uuid4().hex, ] self._test_validate_v3_token_federted_info(group_ids) def test_validate_v3_token_federated_info_empty_group(self): # check when federated users got empty group ids self._test_validate_v3_token_federted_info([]) def test_validate_v3_token_trust(self): # Check the trust fields in the token result when use validate_v3_token # when the token has trust info. domain_ref = unit.new_domain_ref() domain_ref = PROVIDERS.resource_api.create_domain( domain_ref['id'], domain_ref ) user_ref = unit.new_user_ref(domain_ref['id']) user_ref = PROVIDERS.identity_api.create_user(user_ref) trustor_user_ref = unit.new_user_ref(domain_ref['id']) trustor_user_ref = PROVIDERS.identity_api.create_user(trustor_user_ref) project_ref = unit.new_project_ref(domain_id=domain_ref['id']) project_ref = PROVIDERS.resource_api.create_project( project_ref['id'], project_ref ) role_ref = unit.new_role_ref() role_ref = PROVIDERS.role_api.create_role(role_ref['id'], role_ref) PROVIDERS.assignment_api.create_grant( role_ref['id'], user_id=user_ref['id'], project_id=project_ref['id'], ) PROVIDERS.assignment_api.create_grant( role_ref['id'], user_id=trustor_user_ref['id'], project_id=project_ref['id'], ) trustor_user_id = trustor_user_ref['id'] trustee_user_id = user_ref['id'] trust_ref = unit.new_trust_ref( trustor_user_id, trustee_user_id, project_id=project_ref['id'], role_ids=[ role_ref['id'], ], ) trust_ref = PROVIDERS.trust_api.create_trust( trust_ref['id'], trust_ref, trust_ref['roles'] ) method_names = ['password'] token = PROVIDERS.token_provider_api.issue_token( user_ref['id'], method_names, project_id=project_ref['id'], trust_id=trust_ref['id'], ) token = PROVIDERS.token_provider_api.validate_token(token.id) self.assertEqual(trust_ref['id'], token.trust_id) self.assertFalse(token.trust['impersonation']) self.assertEqual(user_ref['id'], token.trustee['id']) self.assertEqual(trustor_user_ref['id'], token.trustor['id']) def test_validate_v3_token_validation_error_exc(self): # When the token format isn't recognized, TokenNotFound is raised. # A uuid string isn't a valid Fernet token. token_id = uuid.uuid4().hex self.assertRaises( exception.TokenNotFound, PROVIDERS.token_provider_api.validate_token, token_id, ) class TestValidateWithoutCache(TestValidate): def config_overrides(self): super().config_overrides() self.config_fixture.config(group='token', caching=False) self.config_fixture.config(group='token', cache_on_issue=False) class TestTokenFormatter(unit.TestCase): def test_restore_padding(self): # 'a' will result in '==' padding, 'aa' will result in '=' padding, and # 'aaa' will result in no padding. binary_to_test = [b'a', b'aa', b'aaa'] for binary in binary_to_test: # base64.urlsafe_b64encode takes bytes and returns # bytes. encoded_string = base64.urlsafe_b64encode(binary) encoded_string = encoded_string.decode('utf-8') # encoded_string is now str. encoded_str_without_padding = encoded_string.rstrip('=') self.assertFalse(encoded_str_without_padding.endswith('=')) encoded_str_with_padding_restored = ( token_formatters.TokenFormatter.restore_padding( encoded_str_without_padding ) ) self.assertEqual(encoded_string, encoded_str_with_padding_restored) def test_create_validate_federated_unscoped_token_non_uuid_user_id(self): exp_user_id = hashlib.sha256().hexdigest() exp_methods = ['password'] exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True) exp_audit_ids = [provider.random_urlsafe_str()] exp_federated_group_ids = [{'id': uuid.uuid4().hex}] exp_idp_id = uuid.uuid4().hex exp_protocol_id = uuid.uuid4().hex token_formatter = token_formatters.TokenFormatter() token = token_formatter.create_token( user_id=exp_user_id, expires_at=exp_expires_at, audit_ids=exp_audit_ids, payload_class=token_formatters.FederatedUnscopedPayload, methods=exp_methods, federated_group_ids=exp_federated_group_ids, identity_provider_id=exp_idp_id, protocol_id=exp_protocol_id, ) ( user_id, methods, audit_ids, system, domain_id, project_id, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, issued_at, expires_at, ) = token_formatter.validate_token(token) self.assertEqual(exp_user_id, user_id) self.assertTrue(isinstance(user_id, str)) self.assertEqual(exp_methods, methods) self.assertEqual(exp_audit_ids, audit_ids) self.assertEqual(exp_federated_group_ids, federated_group_ids) self.assertEqual(exp_idp_id, identity_provider_id) self.assertEqual(exp_protocol_id, protocol_id) def test_create_validate_federated_scoped_token_non_uuid_user_id(self): exp_user_id = hashlib.sha256().hexdigest() exp_methods = ['password'] exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True) exp_audit_ids = [provider.random_urlsafe_str()] exp_federated_group_ids = [{'id': uuid.uuid4().hex}] exp_idp_id = uuid.uuid4().hex exp_protocol_id = uuid.uuid4().hex exp_project_id = uuid.uuid4().hex token_formatter = token_formatters.TokenFormatter() token = token_formatter.create_token( user_id=exp_user_id, expires_at=exp_expires_at, audit_ids=exp_audit_ids, payload_class=token_formatters.FederatedProjectScopedPayload, methods=exp_methods, federated_group_ids=exp_federated_group_ids, identity_provider_id=exp_idp_id, protocol_id=exp_protocol_id, project_id=exp_project_id, ) ( user_id, methods, audit_ids, system, domain_id, project_id, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, issued_at, expires_at, ) = token_formatter.validate_token(token) self.assertEqual(exp_user_id, user_id) self.assertTrue(isinstance(user_id, str)) self.assertEqual(exp_methods, methods) self.assertEqual(exp_audit_ids, audit_ids) self.assertEqual(exp_project_id, project_id) self.assertEqual(exp_federated_group_ids, federated_group_ids) self.assertEqual(exp_idp_id, identity_provider_id) self.assertEqual(exp_protocol_id, protocol_id) class TestPayloads(unit.TestCase): def assertTimestampsEqual(self, expected, actual): # The timestamp that we get back when parsing the payload may not # exactly match the timestamp that was put in the payload due to # conversion to and from a float. exp_time = timeutils.parse_isotime(expected) actual_time = timeutils.parse_isotime(actual) # the granularity of timestamp string is microseconds and it's only the # last digit in the representation that's different, so use a delta # just above nanoseconds. return self.assertCloseEnoughForGovernmentWork( exp_time, actual_time, delta=1e-05 ) def test_strings_can_be_converted_to_bytes(self): s = provider.random_urlsafe_str() self.assertIsInstance(s, str) b = token_formatters.BasePayload.random_urlsafe_str_to_bytes(s) self.assertIsInstance(b, bytes) def test_uuid_hex_to_byte_conversions(self): payload_cls = token_formatters.BasePayload expected_hex_uuid = uuid.uuid4().hex uuid_obj = uuid.UUID(expected_hex_uuid) expected_uuid_in_bytes = uuid_obj.bytes actual_uuid_in_bytes = payload_cls.convert_uuid_hex_to_bytes( expected_hex_uuid ) self.assertEqual(expected_uuid_in_bytes, actual_uuid_in_bytes) actual_hex_uuid = payload_cls.convert_uuid_bytes_to_hex( expected_uuid_in_bytes ) self.assertEqual(expected_hex_uuid, actual_hex_uuid) def test_time_string_to_float_conversions(self): payload_cls = token_formatters.BasePayload original_time_str = utils.isotime(subsecond=True) time_obj = timeutils.parse_isotime(original_time_str) expected_time_float = ( timeutils.normalize_time(time_obj) - datetime.datetime.fromtimestamp( 0, datetime.timezone.utc ).replace(tzinfo=None) ).total_seconds() # NOTE(lbragstad): The token expiration time for Fernet tokens is # passed in the payload of the token. This is different from the token # creation time, which is handled by Fernet and doesn't support # subsecond precision because it is a timestamp integer. self.assertIsInstance(expected_time_float, float) actual_time_float = payload_cls._convert_time_string_to_float( original_time_str ) self.assertIsInstance(actual_time_float, float) self.assertEqual(expected_time_float, actual_time_float) # Generate expected_time_str using the same time float. Using # original_time_str from utils.isotime will occasionally fail due to # floating point rounding differences. time_object = datetime.datetime.fromtimestamp( actual_time_float, datetime.timezone.utc ).replace(tzinfo=None) expected_time_str = utils.isotime(time_object, subsecond=True) actual_time_str = payload_cls._convert_float_to_time_string( actual_time_float ) self.assertEqual(expected_time_str, actual_time_str) def test_convert_or_decode_uuid_bytes(self): payload_cls = token_formatters.BasePayload expected_hex_uuid = uuid.uuid4().hex uuid_obj = uuid.UUID(expected_hex_uuid) expected_uuid_in_bytes = uuid_obj.bytes actual_hex_uuid = payload_cls._convert_or_decode( is_stored_as_bytes=True, value=expected_uuid_in_bytes ) self.assertEqual(expected_hex_uuid, actual_hex_uuid) def test_convert_or_decode_binary_type(self): payload_cls = token_formatters.BasePayload expected_hex_uuid = uuid.uuid4().hex actual_hex_uuid = payload_cls._convert_or_decode( is_stored_as_bytes=False, value=expected_hex_uuid.encode('utf-8') ) self.assertEqual(expected_hex_uuid, actual_hex_uuid) def test_convert_or_decode_text_type(self): payload_cls = token_formatters.BasePayload expected_hex_uuid = uuid.uuid4().hex actual_hex_uuid = payload_cls._convert_or_decode( is_stored_as_bytes=False, value=expected_hex_uuid ) self.assertEqual(expected_hex_uuid, actual_hex_uuid) def _test_payload( self, payload_class, exp_user_id=None, exp_methods=None, exp_system=None, exp_project_id=None, exp_domain_id=None, exp_trust_id=None, exp_federated_group_ids=None, exp_identity_provider_id=None, exp_protocol_id=None, exp_access_token_id=None, exp_app_cred_id=None, encode_ids=False, exp_thumbprint=None, ): def _encode_id(value): if value is not None and str(value) and encode_ids: return value.encode('utf-8') return value exp_user_id = exp_user_id or uuid.uuid4().hex exp_methods = exp_methods or ['password'] exp_expires_at = utils.isotime(timeutils.utcnow(), subsecond=True) exp_audit_ids = [provider.random_urlsafe_str()] payload = payload_class.assemble( _encode_id(exp_user_id), exp_methods, _encode_id(exp_system), _encode_id(exp_project_id), exp_domain_id, exp_expires_at, exp_audit_ids, exp_trust_id, _encode_id(exp_federated_group_ids), _encode_id(exp_identity_provider_id), exp_protocol_id, _encode_id(exp_access_token_id), _encode_id(exp_app_cred_id), exp_thumbprint, ) ( user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) = payload_class.disassemble(payload) self.assertEqual(exp_user_id, user_id) self.assertEqual(exp_methods, methods) self.assertTimestampsEqual(exp_expires_at, expires_at) self.assertEqual(exp_audit_ids, audit_ids) self.assertEqual(exp_system, system) self.assertEqual(exp_project_id, project_id) self.assertEqual(exp_domain_id, domain_id) self.assertEqual(exp_federated_group_ids, federated_group_ids) self.assertEqual(exp_identity_provider_id, identity_provider_id) self.assertEqual(exp_protocol_id, protocol_id) self.assertEqual(exp_trust_id, trust_id) self.assertEqual(exp_access_token_id, access_token_id) self.assertEqual(exp_app_cred_id, app_cred_id) def test_unscoped_payload(self): self._test_payload(token_formatters.UnscopedPayload) def test_system_scoped_payload(self): self._test_payload( token_formatters.SystemScopedPayload, exp_system='all' ) def test_project_scoped_payload(self): self._test_payload( token_formatters.ProjectScopedPayload, exp_project_id=uuid.uuid4().hex, ) def test_domain_scoped_payload(self): self._test_payload( token_formatters.DomainScopedPayload, exp_domain_id=uuid.uuid4().hex, ) def test_domain_scoped_payload_with_default_domain(self): self._test_payload( token_formatters.DomainScopedPayload, exp_domain_id=CONF.identity.default_domain_id, ) def test_trust_scoped_payload(self): self._test_payload( token_formatters.TrustScopedPayload, exp_project_id=uuid.uuid4().hex, exp_trust_id=uuid.uuid4().hex, ) def test_unscoped_payload_with_non_uuid_user_id(self): self._test_payload( token_formatters.UnscopedPayload, exp_user_id='someNonUuidUserId' ) def test_unscoped_payload_with_16_char_non_uuid_user_id(self): self._test_payload( token_formatters.UnscopedPayload, exp_user_id='0123456789abcdef' ) def test_project_scoped_payload_with_non_uuid_ids(self): self._test_payload( token_formatters.ProjectScopedPayload, exp_user_id='someNonUuidUserId', exp_project_id='someNonUuidProjectId', ) def test_project_scoped_payload_with_16_char_non_uuid_ids(self): self._test_payload( token_formatters.ProjectScopedPayload, exp_user_id='0123456789abcdef', exp_project_id='0123456789abcdef', ) def test_project_scoped_payload_with_binary_encoded_ids(self): self._test_payload( token_formatters.ProjectScopedPayload, exp_user_id='someNonUuidUserId', exp_project_id='someNonUuidProjectId', encode_ids=True, ) def test_domain_scoped_payload_with_non_uuid_user_id(self): self._test_payload( token_formatters.DomainScopedPayload, exp_user_id='nonUuidUserId', exp_domain_id=uuid.uuid4().hex, ) def test_domain_scoped_payload_with_16_char_non_uuid_user_id(self): self._test_payload( token_formatters.DomainScopedPayload, exp_user_id='0123456789abcdef', exp_domain_id=uuid.uuid4().hex, ) def test_trust_scoped_payload_with_non_uuid_ids(self): self._test_payload( token_formatters.TrustScopedPayload, exp_user_id='someNonUuidUserId', exp_project_id='someNonUuidProjectId', exp_trust_id=uuid.uuid4().hex, ) def test_trust_scoped_payload_with_16_char_non_uuid_ids(self): self._test_payload( token_formatters.TrustScopedPayload, exp_user_id='0123456789abcdef', exp_project_id='0123456789abcdef', exp_trust_id=uuid.uuid4().hex, ) def _test_federated_payload_with_ids(self, exp_user_id, exp_group_id): exp_federated_group_ids = [{'id': exp_group_id}] exp_idp_id = uuid.uuid4().hex exp_protocol_id = uuid.uuid4().hex self._test_payload( token_formatters.FederatedUnscopedPayload, exp_user_id=exp_user_id, exp_federated_group_ids=exp_federated_group_ids, exp_identity_provider_id=exp_idp_id, exp_protocol_id=exp_protocol_id, ) def test_federated_payload_with_non_uuid_ids(self): self._test_federated_payload_with_ids( 'someNonUuidUserId', 'someNonUuidGroupId' ) def test_federated_payload_with_16_char_non_uuid_ids(self): self._test_federated_payload_with_ids( '0123456789abcdef', '0123456789abcdef' ) def test_federated_project_scoped_payload(self): exp_federated_group_ids = [{'id': 'someNonUuidGroupId'}] exp_idp_id = uuid.uuid4().hex exp_protocol_id = uuid.uuid4().hex self._test_payload( token_formatters.FederatedProjectScopedPayload, exp_user_id='someNonUuidUserId', exp_methods=['token'], exp_project_id=uuid.uuid4().hex, exp_federated_group_ids=exp_federated_group_ids, exp_identity_provider_id=exp_idp_id, exp_protocol_id=exp_protocol_id, ) def test_federated_domain_scoped_payload(self): exp_federated_group_ids = [{'id': 'someNonUuidGroupId'}] exp_idp_id = uuid.uuid4().hex exp_protocol_id = uuid.uuid4().hex self._test_payload( token_formatters.FederatedDomainScopedPayload, exp_user_id='someNonUuidUserId', exp_methods=['token'], exp_domain_id=uuid.uuid4().hex, exp_federated_group_ids=exp_federated_group_ids, exp_identity_provider_id=exp_idp_id, exp_protocol_id=exp_protocol_id, ) def test_oauth_scoped_payload(self): self._test_payload( token_formatters.OauthScopedPayload, exp_project_id=uuid.uuid4().hex, exp_access_token_id=uuid.uuid4().hex, ) def test_app_cred_scoped_payload_with_non_uuid_ids(self): self._test_payload( token_formatters.ApplicationCredentialScopedPayload, exp_user_id='someNonUuidUserId', exp_project_id='someNonUuidProjectId', exp_app_cred_id='someNonUuidAppCredId', ) def test_app_cred_scoped_payload_with_16_char_non_uuid_ids(self): self._test_payload( token_formatters.ApplicationCredentialScopedPayload, exp_user_id='0123456789abcdef', exp_project_id='0123456789abcdef', exp_app_cred_id='0123456789abcdef', ) class TestFernetKeyRotation(unit.TestCase): def setUp(self): super().setUp() # A collection of all previously-seen signatures of the key # repository's contents. self.key_repo_signatures = set() @property def keys(self): """Key files converted to numbers.""" return sorted( int(x) for x in os.listdir(CONF.fernet_tokens.key_repository) ) @property def key_repository_size(self): """The number of keys in the key repository.""" return len(self.keys) @property def key_repository_signature(self): """Create a "thumbprint" of the current key repository. Because key files are renamed, this produces a hash of the contents of the key files, ignoring their filenames. The resulting signature can be used, for example, to ensure that you have a unique set of keys after you perform a key rotation (taking a static set of keys, and simply shuffling them, would fail such a test). """ # Load the keys into a list, keys is list of str. key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) keys = key_utils.load_keys() # Sort the list of keys by the keys themselves (they were previously # sorted by filename). keys.sort() # Create the thumbprint using all keys in the repository. signature = hashlib.sha1() for key in keys: # Need to convert key to bytes for update. signature.update(key.encode('utf-8')) return signature.hexdigest() def assertRepositoryState(self, expected_size): """Validate the state of the key repository.""" self.assertEqual(expected_size, self.key_repository_size) self.assertUniqueRepositoryState() def assertUniqueRepositoryState(self): """Ensure that the current key repo state has not been seen before.""" # This is assigned to a variable because it takes some work to # calculate. signature = self.key_repository_signature # Ensure the signature is not in the set of previously seen signatures. self.assertNotIn(signature, self.key_repo_signatures) # Add the signature to the set of repository signatures to validate # that we don't see it again later. self.key_repo_signatures.add(signature) def test_rotation(self): # Initializing a key repository results in this many keys. We don't # support max_active_keys being set any lower. min_active_keys = 2 # Simulate every rotation strategy up to "rotating once a week while # maintaining a year's worth of keys." for max_active_keys in range(min_active_keys, 52 + 1): self.config_fixture.config( group='fernet_tokens', max_active_keys=max_active_keys ) # Ensure that resetting the key repository always results in 2 # active keys. self.useFixture( ksfixtures.KeyRepository( self.config_fixture, 'fernet_tokens', CONF.fernet_tokens.max_active_keys, ) ) # Validate the initial repository state. self.assertRepositoryState(expected_size=min_active_keys) # The repository should be initialized with a staged key (0) and a # primary key (1). The next key is just auto-incremented. exp_keys = [0, 1] next_key_number = exp_keys[-1] + 1 # keep track of next key self.assertEqual(exp_keys, self.keys) # Rotate the keys just enough times to fully populate the key # repository. key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) for rotation in range(max_active_keys - min_active_keys): key_utils.rotate_keys() self.assertRepositoryState(expected_size=rotation + 3) exp_keys.append(next_key_number) next_key_number += 1 self.assertEqual(exp_keys, self.keys) # We should have a fully populated key repository now. self.assertEqual(max_active_keys, self.key_repository_size) # Rotate an additional number of times to ensure that we maintain # the desired number of active keys. key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) for rotation in range(10): key_utils.rotate_keys() self.assertRepositoryState(expected_size=max_active_keys) exp_keys.pop(1) exp_keys.append(next_key_number) next_key_number += 1 self.assertEqual(exp_keys, self.keys) def test_rotation_disk_write_fail(self): # Make sure that the init key repository contains 2 keys self.assertRepositoryState(expected_size=2) key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) # Simulate the disk full situation mock_open = mock.mock_open() file_handle = mock_open() file_handle.flush.side_effect = IOError('disk full') with mock.patch('keystone.common.fernet_utils.open', mock_open): self.assertRaises(IOError, key_utils.rotate_keys) # Assert that the key repository is unchanged self.assertEqual(self.key_repository_size, 2) with mock.patch('keystone.common.fernet_utils.open', mock_open): self.assertRaises(IOError, key_utils.rotate_keys) # Assert that the key repository is still unchanged, even after # repeated rotation attempts. self.assertEqual(self.key_repository_size, 2) # Rotate the keys normally, without any mocking, to show that the # system can recover. key_utils.rotate_keys() # Assert that the key repository is now expanded. self.assertEqual(self.key_repository_size, 3) def test_rotation_empty_file(self): active_keys = 2 self.assertRepositoryState(expected_size=active_keys) empty_file = os.path.join(CONF.fernet_tokens.key_repository, '2') with open(empty_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) # Rotate the keys to overwrite the empty file key_utils.rotate_keys() self.assertTrue(os.path.isfile(empty_file)) keys = key_utils.load_keys() self.assertEqual(3, len(keys)) self.assertTrue(os.path.getsize(empty_file) > 0) def test_non_numeric_files(self): evil_file = os.path.join(CONF.fernet_tokens.key_repository, '99.bak') with open(evil_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) key_utils.rotate_keys() self.assertTrue(os.path.isfile(evil_file)) keys = 0 for x in os.listdir(CONF.fernet_tokens.key_repository): if x == '99.bak': continue keys += 1 self.assertEqual(3, keys) class TestLoadKeys(unit.TestCase): def assertValidFernetKeys(self, keys): # Make sure each key is a non-empty string for key in keys: self.assertGreater(len(key), 0) self.assertIsInstance(key, str) def test_non_numeric_files(self): evil_file = os.path.join(CONF.fernet_tokens.key_repository, '~1') with open(evil_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) keys = key_utils.load_keys() self.assertEqual(2, len(keys)) self.assertValidFernetKeys(keys) def test_empty_files(self): empty_file = os.path.join(CONF.fernet_tokens.key_repository, '2') with open(empty_file, 'w'): pass key_utils = fernet_utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) keys = key_utils.load_keys() self.assertEqual(2, len(keys)) self.assertValidFernetKeys(keys) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/token/test_jws_provider.py0000664000175000017500000001101200000000000024706 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid from keystone.common import jwt_utils from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.models import token_model from keystone.tests import unit from keystone.tests.unit import ksfixtures from keystone.token import provider from keystone.token.providers import jws CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class TestJWSProvider(unit.TestCase): def setUp(self): super().setUp() self.config_fixture.config(group='token', provider='jws') self.useFixture(ksfixtures.JWSKeyRepository(self.config_fixture)) self.provider = jws.Provider() def test_invalid_token_raises_token_not_found(self): token_id = uuid.uuid4().hex self.assertRaises( exception.TokenNotFound, self.provider.validate_token, token_id ) def test_non_existent_private_key_raises_system_exception(self): private_key = os.path.join( CONF.jwt_tokens.jws_private_key_repository, 'private.pem' ) os.remove(private_key) self.assertRaises(SystemExit, jws.Provider) def test_non_existent_public_key_repo_raises_system_exception(self): for f in os.listdir(CONF.jwt_tokens.jws_public_key_repository): path = os.path.join(CONF.jwt_tokens.jws_public_key_repository, f) os.remove(path) os.rmdir(CONF.jwt_tokens.jws_public_key_repository) self.assertRaises(SystemExit, jws.Provider) def test_empty_public_key_repo_raises_system_exception(self): for f in os.listdir(CONF.jwt_tokens.jws_public_key_repository): path = os.path.join(CONF.jwt_tokens.jws_public_key_repository, f) os.remove(path) self.assertRaises(SystemExit, jws.Provider) def test_unable_to_verify_token_with_missing_public_key(self): # create token, signing with private key token = token_model.TokenModel() token.methods = ['password'] token.user_id = uuid.uuid4().hex token.audit_id = provider.random_urlsafe_str() token.expires_at = utils.isotime( provider.default_expire_time(), subsecond=True ) token_id, issued_at = self.provider.generate_id_and_issued_at(token) # remove the public key for the token we just created current_pub_key = os.path.join( CONF.jwt_tokens.jws_public_key_repository, 'public.pem' ) os.remove(current_pub_key) # create additional public keys for _ in range(2): private_key_path = os.path.join( CONF.jwt_tokens.jws_private_key_repository, uuid.uuid4().hex ) pub_key_path = os.path.join( CONF.jwt_tokens.jws_public_key_repository, uuid.uuid4().hex ) jwt_utils.create_jws_keypair(private_key_path, pub_key_path) # validate token and ensure it returns a 404 self.assertRaises( exception.TokenNotFound, self.provider.validate_token, token_id ) def test_verify_token_with_multiple_public_keys_present(self): token = token_model.TokenModel() token.methods = ['password'] token.user_id = uuid.uuid4().hex token.audit_id = provider.random_urlsafe_str() token.expires_at = utils.isotime( provider.default_expire_time(), subsecond=True ) token_id, issued_at = self.provider.generate_id_and_issued_at(token) for _ in range(2): private_key_path = os.path.join( CONF.jwt_tokens.jws_private_key_repository, uuid.uuid4().hex ) pub_key_path = os.path.join( CONF.jwt_tokens.jws_public_key_repository, uuid.uuid4().hex ) jwt_utils.create_jws_keypair(private_key_path, pub_key_path) # make sure we iterate through all public keys on disk and we can still # validate the token self.provider.validate_token(token_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/token/test_token_serialization.py0000664000175000017500000000564300000000000026263 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock import uuid from oslo_utils import timeutils from keystone.common.cache import _context_cache from keystone.common import utils as ks_utils from keystone import exception from keystone.models import token_model from keystone.tests.unit import base_classes class TestTokenSerialization(base_classes.TestCaseWithBootstrap): def setUp(self): super().setUp() self.admin_user_id = self.bootstrapper.admin_user_id self.admin_username = self.bootstrapper.admin_username self.admin_password = self.bootstrapper.admin_password self.project_id = self.bootstrapper.project_id self.project_name = self.bootstrapper.project_name self.admin_role_id = self.bootstrapper.admin_role_id self.member_role_id = self.bootstrapper.member_role_id self.reader_role_id = self.bootstrapper.reader_role_id self.token_id = uuid.uuid4().hex issued_at = timeutils.utcnow() self.issued_at = ks_utils.isotime(at=issued_at, subsecond=True) # Reach into the cache registry and pull out an instance of the # _TokenModelHandler so that we can interact and test it directly (as # opposed to using PROVIDERS or managers to invoke it). token_handler_id = token_model._TokenModelHandler.identity self.token_handler = _context_cache._registry.get(token_handler_id) self.exp_token = token_model.TokenModel() self.exp_token.user_id = self.admin_user_id self.exp_token.project_id = self.project_id self.exp_token.mint(self.token_id, self.issued_at) def test_serialize_and_deserialize_token_model(self): serialized = self.token_handler.serialize(self.exp_token) token = self.token_handler.deserialize(serialized) self.assertEqual(self.exp_token.user_id, token.user_id) self.assertEqual(self.exp_token.project_id, token.project_id) self.assertEqual(self.exp_token.id, token.id) self.assertEqual(self.exp_token.issued_at, token.issued_at) @mock.patch.object( token_model.TokenModel, '__init__', side_effect=Exception ) def test_error_handling_in_deserialize(self, handler_mock): serialized = self.token_handler.serialize(self.exp_token) self.assertRaises( exception.CacheDeserializationError, self.token_handler.deserialize, serialized, ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/tests/unit/trust/0000775000175000017500000000000000000000000020626 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/trust/__init__.py0000664000175000017500000000000000000000000022725 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/trust/test_backends.py0000664000175000017500000005401100000000000024012 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid from oslo_utils import timeutils from keystone.common import provider_api from keystone import exception from keystone.tests.unit import core PROVIDERS = provider_api.ProviderAPIs class TrustTests: def create_sample_trust(self, new_id, remaining_uses=None): self.trustor = self.user_foo self.trustee = self.user_two expires_at = timeutils.utcnow().replace(year=2032) trust_data = PROVIDERS.trust_api.create_trust( new_id, { 'trustor_user_id': self.trustor['id'], 'trustee_user_id': self.user_two['id'], 'project_id': self.project_bar['id'], 'expires_at': expires_at, 'impersonation': True, 'remaining_uses': remaining_uses, }, roles=[{"id": "member"}, {"id": "other"}, {"id": "browser"}], ) return trust_data def test_delete_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) trust_id = trust_data['id'] self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.get_trust(trust_id) self.assertEqual(new_id, trust_data['id']) PROVIDERS.trust_api.delete_trust(trust_id) self.assertRaises( exception.TrustNotFound, PROVIDERS.trust_api.get_trust, trust_id ) def test_delete_trust_not_found(self): trust_id = uuid.uuid4().hex self.assertRaises( exception.TrustNotFound, PROVIDERS.trust_api.delete_trust, trust_id ) def test_get_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) trust_id = trust_data['id'] self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.get_trust(trust_id) self.assertEqual(new_id, trust_data['id']) PROVIDERS.trust_api.delete_trust(trust_data['id']) def test_get_deleted_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) self.assertIsNotNone(trust_data) self.assertIsNone(trust_data['deleted_at']) PROVIDERS.trust_api.delete_trust(new_id) self.assertRaises( exception.TrustNotFound, PROVIDERS.trust_api.get_trust, new_id ) deleted_trust = PROVIDERS.trust_api.get_trust( trust_data['id'], deleted=True ) self.assertEqual(trust_data['id'], deleted_trust['id']) self.assertIsNotNone(deleted_trust.get('deleted_at')) def test_create_trust(self): new_id = uuid.uuid4().hex trust_data = self.create_sample_trust(new_id) self.assertEqual(new_id, trust_data['id']) self.assertEqual(self.trustee['id'], trust_data['trustee_user_id']) self.assertEqual(self.trustor['id'], trust_data['trustor_user_id']) self.assertGreater( timeutils.normalize_time(trust_data['expires_at']), timeutils.utcnow(), ) self.assertEqual( [{'id': 'member'}, {'id': 'other'}, {'id': 'browser'}], trust_data['roles'], ) def test_list_trust_by_trustee(self): for i in range(3): self.create_sample_trust(uuid.uuid4().hex) trusts = PROVIDERS.trust_api.list_trusts_for_trustee( self.trustee['id'] ) self.assertEqual(3, len(trusts)) self.assertEqual(trusts[0]["trustee_user_id"], self.trustee['id']) trusts = PROVIDERS.trust_api.list_trusts_for_trustee( self.trustor['id'] ) self.assertEqual(0, len(trusts)) def test_list_trust_by_trustor(self): for i in range(3): self.create_sample_trust(uuid.uuid4().hex) trusts = PROVIDERS.trust_api.list_trusts_for_trustor( self.trustor['id'] ) self.assertEqual(3, len(trusts)) self.assertEqual(trusts[0]["trustor_user_id"], self.trustor['id']) trusts = PROVIDERS.trust_api.list_trusts_for_trustor( self.trustee['id'] ) self.assertEqual(0, len(trusts)) def test_list_trusts(self): for i in range(3): self.create_sample_trust(uuid.uuid4().hex) trusts = PROVIDERS.trust_api.list_trusts() self.assertEqual(3, len(trusts)) def test_trust_has_remaining_uses_positive(self): # create a trust with limited uses, check that we have uses left trust_data = self.create_sample_trust( uuid.uuid4().hex, remaining_uses=5 ) self.assertEqual(5, trust_data['remaining_uses']) # create a trust with unlimited uses, check that we have uses left trust_data = self.create_sample_trust(uuid.uuid4().hex) self.assertIsNone(trust_data['remaining_uses']) def test_trust_has_remaining_uses_negative(self): # try to create a trust with no remaining uses, check that it fails self.assertRaises( exception.ValidationError, self.create_sample_trust, uuid.uuid4().hex, remaining_uses=0, ) # try to create a trust with negative remaining uses, # check that it fails self.assertRaises( exception.ValidationError, self.create_sample_trust, uuid.uuid4().hex, remaining_uses=-12, ) def test_consume_use(self): # consume a trust repeatedly until it has no uses anymore trust_data = self.create_sample_trust( uuid.uuid4().hex, remaining_uses=2 ) PROVIDERS.trust_api.consume_use(trust_data['id']) t = PROVIDERS.trust_api.get_trust(trust_data['id']) self.assertEqual(1, t['remaining_uses']) PROVIDERS.trust_api.consume_use(trust_data['id']) # This was the last use, the trust isn't available anymore self.assertRaises( exception.TrustNotFound, PROVIDERS.trust_api.get_trust, trust_data['id'], ) def test_duplicate_trusts_not_allowed(self): self.trustor = self.user_foo self.trustee = self.user_two trust_data = { 'trustor_user_id': self.trustor['id'], 'trustee_user_id': self.user_two['id'], 'project_id': self.project_bar['id'], 'expires_at': timeutils.parse_isotime('2032-02-18T18:10:00Z'), 'impersonation': True, 'remaining_uses': None, } roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] PROVIDERS.trust_api.create_trust(uuid.uuid4().hex, trust_data, roles) self.assertRaises( exception.Conflict, PROVIDERS.trust_api.create_trust, uuid.uuid4().hex, trust_data, roles, ) def test_flush_expired_trusts(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=1 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( date=timeutils.utcnow() ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) def test_flush_expired_trusts_with_all_id(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_foo['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=5 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( project_id=self.project_bar['id'], trustor_user_id=self.user_foo['id'], trustee_user_id=self.user_two['id'], date=timeutils.utcnow(), ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref1['id'], trusts[0]['id']) def test_flush_expired_trusts_with_no_project_id(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=1 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( trustor_user_id=self.user_foo['id'], trustee_user_id=self.user_two['id'], date=timeutils.utcnow(), ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) def test_flush_expired_trusts_with_no_trustor_id(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=1 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( project_id=self.project_bar['id'], trustee_user_id=self.user_two['id'], date=timeutils.utcnow(), ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) def test_flush_expired_trusts_with_no_trustee_id(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=1 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( project_id=self.project_bar['id'], trustor_user_id=self.user_foo['id'], date=timeutils.utcnow(), ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) def test_flush_expired_trusts_with_project_id(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.user_foo['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=5 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( project_id=self.project_bar['id'], date=timeutils.utcnow() ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) def test_flush_expired_trusts_with_trustee_id(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_foo['id'], self.user_foo['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=5 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( trustee_user_id=self.user_two['id'], date=timeutils.utcnow(), ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) def test_flush_expired_trusts_with_trustor_id(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=1 ) trust_ref2 = core.new_trust_ref( self.user_two['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=5 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( trustor_user_id=self.user_foo['id'], date=timeutils.utcnow(), ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) def test_non_expired_soft_deleted_trusts(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=10 ) trust_ref2 = core.new_trust_ref( self.user_two['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=5 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.delete_trust(trust_ref2['id']) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( date=timeutils.utcnow() ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref1['id'], trusts[0]['id']) def test_non_expired_non_deleted_trusts(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=10 ) trust_ref2 = core.new_trust_ref( self.user_two['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=5 ) trust_ref3 = core.new_trust_ref( self.user_two['id'], self.user_foo['id'], project_id=self.project_bar['id'], ) trust_ref3['expires_at'] = None trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.delete_trust(trust_ref2['id']) trust_data = PROVIDERS.trust_api.create_trust( trust_ref3['id'], trust_ref3, roles ) self.assertIsNotNone(trust_data) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( date=timeutils.utcnow() ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 2) def test_flush_expired_trusts_with_date(self): roles = [{"id": "member"}, {"id": "other"}, {"id": "browser"}] trust_ref1 = core.new_trust_ref( self.user_foo['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref1['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=10 ) trust_ref2 = core.new_trust_ref( self.user_two['id'], self.user_two['id'], project_id=self.project_bar['id'], ) trust_ref2['expires_at'] = timeutils.utcnow() + datetime.timedelta( minutes=30 ) trust_ref3 = core.new_trust_ref( self.user_two['id'], self.user_foo['id'], project_id=self.project_bar['id'], ) trust_ref3['expires_at'] = timeutils.utcnow() - datetime.timedelta( minutes=30 ) trust_data = PROVIDERS.trust_api.create_trust( trust_ref1['id'], trust_ref1, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref2['id'], trust_ref2, roles ) self.assertIsNotNone(trust_data) trust_data = PROVIDERS.trust_api.create_trust( trust_ref3['id'], trust_ref3, roles ) self.assertIsNotNone(trust_data) fake_date = timeutils.utcnow() + datetime.timedelta(minutes=15) PROVIDERS.trust_api.flush_expired_and_soft_deleted_trusts( date=fake_date ) trusts = self.trust_api.list_trusts() self.assertEqual(len(trusts), 1) self.assertEqual(trust_ref2['id'], trusts[0]['id']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/tests/unit/utils.py0000664000175000017500000000761700000000000021172 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Useful utilities for tests.""" import functools import os import time import unittest import uuid TZ = None def timezone(func): @functools.wraps(func) def wrapper(*args, **kwargs): tz_original = os.environ.get('TZ') try: if TZ: os.environ['TZ'] = TZ time.tzset() return func(*args, **kwargs) finally: if TZ: if tz_original: os.environ['TZ'] = tz_original else: if 'TZ' in os.environ: del os.environ['TZ'] time.tzset() return wrapper def new_uuid(): """Return a string UUID.""" return uuid.uuid4().hex def wip(message, expected_exception=Exception, bug=None): """Mark a test as work in progress. Based on code by Nat Pryce: https://gist.github.com/npryce/997195#file-wip-py The test will always be run. If the test fails then a SkipTest exception is raised. If the test passes an AssertionError exception is raised so that the developer knows they made the test pass. This is a reminder to remove the decorator. :param message: a string message to help clarify why the test is marked as a work in progress :param expected_exception: an exception class that will be checked for when @wip verifies an exception is raised. The test will fail if a different exception is raised. Default is "any" exception is valid :param bug: (optional) a string for tracking the bug and what bug should cause the @wip decorator to be removed from the testcase Usage: >>> @wip('Expected Error', expected_exception=Exception, bug="#000000") >>> def test(): >>> pass """ if bug: bugstr = " (BugID " + bug + ")" else: bugstr = "" def _wip(f): @functools.wraps(f) def run_test(*args, **kwargs): __e = None try: f(*args, **kwargs) except Exception as __e: # noqa F841 if expected_exception != Exception and not isinstance( __e, expected_exception ): raise AssertionError( 'Work In Progress Test Failed%(bugstr)s with ' 'unexpected exception. Expected "%(expected)s" ' 'got "%(exception)s": %(message)s ' % { 'message': message, 'bugstr': bugstr, 'expected': expected_exception.__class__.__name__, 'exception': __e.__class__.__name__, } ) # NOTE(notmorgan): We got the expected exception we can safely # skip this test. raise unittest.SkipTest( 'Work In Progress Test Failed as ' 'expected%(bugstr)s: %(message)s' % {'message': message, 'bugstr': bugstr} ) raise AssertionError( 'Work In Progress Test Passed%(bugstr)s: ' '%(message)s' % {'message': message, 'bugstr': bugstr} ) return run_test return _wip ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.570113 keystone-26.0.0/keystone/token/0000775000175000017500000000000000000000000016444 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/__init__.py0000664000175000017500000000116600000000000020561 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.token import provider # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/provider.py0000664000175000017500000003302000000000000020646 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Token provider interface.""" import base64 import datetime import uuid from oslo_log import log from oslo_utils import timeutils from keystone.common import cache from keystone.common import manager from keystone.common import provider_api from keystone.common import utils import keystone.conf from keystone import exception from keystone.federation import constants from keystone.i18n import _ from keystone.models import token_model from keystone import notifications CONF = keystone.conf.CONF LOG = log.getLogger(__name__) PROVIDERS = provider_api.ProviderAPIs TOKENS_REGION = cache.create_region(name='tokens') MEMOIZE_TOKENS = cache.get_memoization_decorator( group='token', region=TOKENS_REGION ) # NOTE(morganfainberg): This is for compatibility in case someone was relying # on the old location of the UnsupportedTokenVersionException for their code. UnsupportedTokenVersionException = exception.UnsupportedTokenVersionException # supported token versions V3 = token_model.V3 VERSIONS = token_model.VERSIONS # minimum access rules support ACCESS_RULES_MIN_VERSION = token_model.ACCESS_RULES_MIN_VERSION def default_expire_time(): """Determine when a fresh token should expire. Expiration time varies based on configuration (see ``[token] expiration``). :returns: a naive UTC datetime.datetime object """ expire_delta = datetime.timedelta(seconds=CONF.token.expiration) expires_at = timeutils.utcnow() + expire_delta return expires_at.replace(microsecond=0) def random_urlsafe_str(): """Generate a random URL-safe string. :rtype: str """ # chop the padding (==) off the end of the encoding to save space return base64.urlsafe_b64encode(uuid.uuid4().bytes)[:-2].decode('utf-8') class Manager(manager.Manager): """Default pivot point for the token provider backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.token.provider' _provides_api = 'token_provider_api' V3 = V3 VERSIONS = VERSIONS def __init__(self): super().__init__(CONF.token.provider) self._register_callback_listeners() def _register_callback_listeners(self): # This is used by the @dependency.provider decorator to register the # provider (token_provider_api) manager to listen for trust deletions. callbacks = { notifications.ACTIONS.deleted: [ ['OS-TRUST:trust', self._drop_token_cache], ['user', self._drop_token_cache], ['domain', self._drop_token_cache], ], notifications.ACTIONS.disabled: [ ['user', self._drop_token_cache], ['domain', self._drop_token_cache], ['project', self._drop_token_cache], ], notifications.ACTIONS.internal: [ [notifications.INVALIDATE_TOKEN_CACHE, self._drop_token_cache], ], } for event, cb_info in callbacks.items(): for resource_type, callback_fns in cb_info: notifications.register_event_callback( event, resource_type, callback_fns ) def _drop_token_cache(self, service, resource_type, operation, payload): """Invalidate the entire token cache. This is a handy private utility method that should be used when consuming notifications that signal invalidating the token cache. """ if CONF.token.cache_on_issue or CONF.token.caching: TOKENS_REGION.invalidate() @MEMOIZE_TOKENS def check_revocation_v3(self, token_values): PROVIDERS.revoke_api.check_token(token_values) def check_revocation(self, token): token_values = self.revoke_api.model.build_token_values(token) return self.check_revocation_v3(token_values) def validate_token( self, token_id, window_seconds=0, access_rules_support=None ): if not token_id: raise exception.TokenNotFound(_('No token in the request')) try: token = self._validate_token(token_id) self._is_valid_token(token, window_seconds=window_seconds) self._validate_token_access_rules(token, access_rules_support) return token except exception.Unauthorized as e: LOG.debug('Unable to validate token: %s', e) raise exception.TokenNotFound(token_id=token_id) @MEMOIZE_TOKENS def _validate_token(self, token_id): ( user_id, methods, audit_ids, system, domain_id, project_id, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, issued_at, expires_at, ) = self.driver.validate_token(token_id) token = token_model.TokenModel() token.user_id = user_id token.methods = methods if len(audit_ids) > 1: token.parent_audit_id = audit_ids.pop() token.audit_id = audit_ids.pop() token.system = system token.domain_id = domain_id token.project_id = project_id token.trust_id = trust_id token.access_token_id = access_token_id token.application_credential_id = app_cred_id token.oauth2_thumbprint = thumbprint token.expires_at = expires_at if federated_group_ids is not None: token.is_federated = True token.identity_provider_id = identity_provider_id token.protocol_id = protocol_id token.federated_groups = federated_group_ids token.mint(token_id, issued_at) return token def _is_valid_token(self, token, window_seconds=0): """Verify the token is valid format and has not expired.""" current_time = timeutils.normalize_time(timeutils.utcnow()) try: expiry = timeutils.parse_isotime(token.expires_at) expiry = timeutils.normalize_time(expiry) # add a window in which you can fetch a token beyond expiry expiry += datetime.timedelta(seconds=window_seconds) except Exception: LOG.exception( 'Unexpected error or malformed token ' 'determining token expiry: %s', token, ) raise exception.TokenNotFound(_('Failed to validate token')) if current_time < expiry: self.check_revocation(token) # Token has not expired and has not been revoked. return None else: raise exception.TokenNotFound(_('Failed to validate token')) def _validate_token_access_rules(self, token, access_rules_support=None): if token.application_credential_id: app_cred_api = PROVIDERS.application_credential_api app_cred = app_cred_api.get_application_credential( token.application_credential_id ) if app_cred.get('access_rules') is not None and ( not access_rules_support or (float(access_rules_support) < ACCESS_RULES_MIN_VERSION) ): LOG.exception( 'Attempted to use application credential' ' access rules with a middleware that does not' ' understand them. You must upgrade' ' keystonemiddleware on all services that' ' accept application credentials as an' ' authentication method.' ) raise exception.TokenNotFound(_('Failed to validate token')) def issue_token( self, user_id, method_names, expires_at=None, system=None, project_id=None, domain_id=None, auth_context=None, trust_id=None, app_cred_id=None, thumbprint=None, parent_audit_id=None, ): # NOTE(lbragstad): Grab a blank token object and use composition to # build the token according to the authentication and authorization # context. This cuts down on the amount of logic we have to stuff into # the TokenModel's __init__() method. token = token_model.TokenModel() token.methods = method_names token.system = system token.domain_id = domain_id token.project_id = project_id token.trust_id = trust_id token.application_credential_id = app_cred_id token.audit_id = random_urlsafe_str() token.oauth2_thumbprint = thumbprint token.parent_audit_id = parent_audit_id if auth_context: if constants.IDENTITY_PROVIDER in auth_context: token.is_federated = True token.protocol_id = auth_context[constants.PROTOCOL] idp_id = auth_context[constants.IDENTITY_PROVIDER] if isinstance(idp_id, bytes): idp_id = idp_id.decode('utf-8') token.identity_provider_id = idp_id token.user_id = auth_context['user_id'] token.federated_groups = [ {'id': group} for group in auth_context['group_ids'] ] if 'access_token_id' in auth_context: token.access_token_id = auth_context['access_token_id'] if not token.user_id: token.user_id = user_id token.user_domain_id = token.user['domain_id'] if isinstance(expires_at, datetime.datetime): token.expires_at = utils.isotime(expires_at, subsecond=True) if isinstance(expires_at, str): token.expires_at = expires_at elif not expires_at: token.expires_at = utils.isotime( default_expire_time(), subsecond=True ) # NOTE(d34dh0r53): If this token is being issued with an application # credential and the application credential expires before the token # we need to set the token expiration to be the same as the application # credential. See CVE-2022-2447 for more information. if app_cred_id is not None: app_cred_api = PROVIDERS.application_credential_api app_cred = app_cred_api.get_application_credential( token.application_credential_id ) token_time = timeutils.normalize_time( timeutils.parse_isotime(token.expires_at) ) if (app_cred['expires_at'] is not None) and ( token_time > app_cred['expires_at'] ): token.expires_at = utils.isotime( app_cred['expires_at'], subsecond=True ) LOG.debug( 'Resetting token expiration to the application' ' credential expiration: %s', token.expires_at, ) token_id, issued_at = self.driver.generate_id_and_issued_at(token) token.mint(token_id, issued_at) # cache the token object and with ID if CONF.token.cache_on_issue or CONF.token.caching: # NOTE(amakarov): here and above TOKENS_REGION is to be passed # to serve as required positional "self" argument. It's ignored, # so I've put it here for convenience - any placeholder is fine. self._validate_token.set(token, self, token.id) return token def invalidate_individual_token_cache(self, token): # NOTE(morganfainberg): invalidate takes the exact same arguments as # the normal method, this means we need to pass "self" in (which gets # stripped off). # FIXME(morganfainberg): Does this cache actually need to be # invalidated? We maintain a cached revocation list, which should be # consulted before accepting a token as valid. For now we will # do the explicit individual token invalidation. self._validate_token.invalidate(self, token.id) token_values = self.revoke_api.model.build_token_values(token) self.check_revocation_v3.invalidate(self, token_values) def revoke_token(self, token_id, revoke_chain=False): token = self.validate_token(token_id) project_id = token.project_id if token.project_scoped else None domain_id = token.domain_id if token.domain_scoped else None if revoke_chain: PROVIDERS.revoke_api.revoke_by_audit_chain_id( token.parent_audit_id, project_id=project_id, domain_id=domain_id, ) else: PROVIDERS.revoke_api.revoke_by_audit_id(token.audit_id) # FIXME(morganfainberg): Does this cache actually need to be # invalidated? We maintain a cached revocation list, which should be # consulted before accepting a token as valid. For now we will # do the explicit individual token invalidation. self.invalidate_individual_token_cache(token) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/keystone/token/providers/0000775000175000017500000000000000000000000020461 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/providers/__init__.py0000664000175000017500000000000000000000000022560 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/providers/base.py0000664000175000017500000000556500000000000021760 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class Provider(metaclass=abc.ABCMeta): """Interface description for a Token provider.""" @abc.abstractmethod def validate_token(self, token_id): """Validate a given token by its ID and return the token_data. :param token_id: the unique ID of the token :type token_id: str :returns: token data as a tuple in the form of: (user_id, methods, audit_ids, system, domain_id, project_id, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, issued_at, expires_at) ``user_id`` is the unique ID of the user as a string ``methods`` a list of authentication methods used to obtain the token ``audit_ids`` a list of audit IDs for the token ``system`` a dictionary containing system scope if system-scoped ``domain_id`` the unique ID of the domain if domain-scoped ``project_id`` the unique ID of the project if project-scoped ``trust_id`` the unique identifier of the trust if trust-scoped ``federated_group_ids`` list of federated group IDs ``identity_provider_id`` unique ID of the user's identity provider ``protocol_id`` unique ID of the protocol used to obtain the token ``access_token_id`` the unique ID of the access_token for OAuth1 tokens ``app_cred_id`` the unique ID of the application credential ``param thumbprint`` thumbprint of the certificate for OAuth2.0 mTLS ``issued_at`` a datetime object of when the token was minted ``expires_at`` a datetime object of when the token expires :raises keystone.exception.TokenNotFound: If the token doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def generate_id_and_issued_at(self, token): """Generate a token based on the information provided. :param token: A token object containing information about the authorization context of the request. :type token: `keystone.models.token.TokenModel` :returns: tuple containing an ID for the token and the issued at time of the token (token_id, issued_at). """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/keystone/token/providers/fernet/0000775000175000017500000000000000000000000021744 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/providers/fernet/__init__.py0000664000175000017500000000113500000000000024055 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.token.providers.fernet.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/providers/fernet/core.py0000664000175000017500000001002300000000000023242 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystone.common import utils as ks_utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.token.providers import base from keystone.token import token_formatters as tf CONF = keystone.conf.CONF class Provider(base.Provider): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # NOTE(lbragstad): We add these checks here because if the fernet # provider is going to be used and either the `key_repository` is empty # or doesn't exist we should fail, hard. It doesn't make sense to start # keystone and just 500 because we can't do anything with an empty or # non-existant key repository. if not os.path.exists(CONF.fernet_tokens.key_repository): subs = {'key_repo': CONF.fernet_tokens.key_repository} raise SystemExit(_('%(key_repo)s does not exist') % subs) if not os.listdir(CONF.fernet_tokens.key_repository): subs = {'key_repo': CONF.fernet_tokens.key_repository} raise SystemExit( _( '%(key_repo)s does not contain keys, use ' 'keystone-manage fernet_setup to create ' 'Fernet keys.' ) % subs ) self.token_formatter = tf.TokenFormatter() def _determine_payload_class_from_token(self, token): if token.oauth_scoped: return tf.OauthScopedPayload elif token.trust_scoped: return tf.TrustScopedPayload elif token.is_federated: if token.project_scoped: return tf.FederatedProjectScopedPayload elif token.domain_scoped: return tf.FederatedDomainScopedPayload elif token.unscoped: return tf.FederatedUnscopedPayload elif token.application_credential_id: return tf.ApplicationCredentialScopedPayload elif token.oauth2_thumbprint: return tf.Oauth2CredentialsScopedPayload elif token.project_scoped: return tf.ProjectScopedPayload elif token.domain_scoped: return tf.DomainScopedPayload elif token.system_scoped: return tf.SystemScopedPayload else: return tf.UnscopedPayload def generate_id_and_issued_at(self, token): token_payload_class = self._determine_payload_class_from_token(token) token_id = self.token_formatter.create_token( token.user_id, token.expires_at, token.audit_ids, token_payload_class, methods=token.methods, system=token.system, domain_id=token.domain_id, project_id=token.project_id, trust_id=token.trust_id, federated_group_ids=token.federated_groups, identity_provider_id=token.identity_provider_id, protocol_id=token.protocol_id, access_token_id=token.access_token_id, app_cred_id=token.application_credential_id, thumbprint=token.oauth2_thumbprint, ) creation_datetime_obj = self.token_formatter.creation_time(token_id) issued_at = ks_utils.isotime(at=creation_datetime_obj, subsecond=True) return token_id, issued_at def validate_token(self, token_id): try: return self.token_formatter.validate_token(token_id) except exception.ValidationError as e: raise exception.TokenNotFound(e) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/keystone/token/providers/jws/0000775000175000017500000000000000000000000021264 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/providers/jws/__init__.py0000664000175000017500000000113200000000000023372 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.token.providers.jws.core import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/providers/jws/core.py0000664000175000017500000002077200000000000022576 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import os import jwt from oslo_utils import timeutils from keystone.common import utils import keystone.conf from keystone import exception from keystone.i18n import _ from keystone.token.providers import base CONF = keystone.conf.CONF class Provider(base.Provider): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) # NOTE(lbragstad): We add these checks here because if the jws # provider is going to be used and either the `key_repository` is empty # or doesn't exist we should fail, hard. It doesn't make sense to start # keystone and just 500 because we can't do anything with an empty or # non-existant key repository. private_key = os.path.join( CONF.jwt_tokens.jws_private_key_repository, 'private.pem' ) public_key_repo = CONF.jwt_tokens.jws_public_key_repository if not os.path.exists(private_key): subs = {'private_key': private_key} raise SystemExit( _( '%(private_key)s does not exist. You can generate a key pair ' 'using `keystone-manage create_jws_keypair`.' ) % subs ) if not os.path.exists(public_key_repo): subs = {'public_key_repo': public_key_repo} raise SystemExit( _( '%(public_key_repo)s does not exist. Please make sure the ' 'directory exists and is readable by the process running ' 'keystone.' ) % subs ) if len(os.listdir(public_key_repo)) == 0: subs = {'public_key_repo': public_key_repo} msg = _( '%(public_key_repo)s must contain at least one public ' 'key but it is empty. You can generate a key pair using ' '`keystone-manage create_jws_keypair`.' ) raise SystemExit(msg % subs) self.token_formatter = JWSFormatter() def generate_id_and_issued_at(self, token): return self.token_formatter.create_token( token.user_id, token.expires_at, token.audit_ids, token.methods, system=token.system, domain_id=token.domain_id, project_id=token.project_id, trust_id=token.trust_id, federated_group_ids=token.federated_groups, identity_provider_id=token.identity_provider_id, protocol_id=token.protocol_id, access_token_id=token.access_token_id, app_cred_id=token.application_credential_id, thumbprint=token.oauth2_thumbprint, ) def validate_token(self, token_id): return self.token_formatter.validate_token(token_id) class JWSFormatter: # NOTE(lbragstad): If in the future we expand support for different # algorithms, make this configurable and validate it against a blessed list # of supported algorithms. algorithm = 'ES256' @property def private_key(self): private_key_path = os.path.join( CONF.jwt_tokens.jws_private_key_repository, 'private.pem' ) with open(private_key_path) as f: key = f.read() return key @property def public_keys(self): keys = [] key_repo = CONF.jwt_tokens.jws_public_key_repository for keyfile in os.listdir(key_repo): with open(os.path.join(key_repo, keyfile)) as f: keys.append(f.read()) return keys def create_token( self, user_id, expires_at, audit_ids, methods, system=None, domain_id=None, project_id=None, trust_id=None, federated_group_ids=None, identity_provider_id=None, protocol_id=None, access_token_id=None, app_cred_id=None, thumbprint=None, ): issued_at = utils.isotime(subsecond=True) issued_at_int = self._convert_time_string_to_int(issued_at) expires_at_int = self._convert_time_string_to_int(expires_at) payload = { # public claims 'sub': user_id, 'iat': issued_at_int, 'exp': expires_at_int, # private claims 'openstack_methods': methods, 'openstack_audit_ids': audit_ids, 'openstack_system': system, 'openstack_domain_id': domain_id, 'openstack_project_id': project_id, 'openstack_trust_id': trust_id, 'openstack_group_ids': federated_group_ids, 'openstack_idp_id': identity_provider_id, 'openstack_protocol_id': protocol_id, 'openstack_access_token_id': access_token_id, 'openstack_app_cred_id': app_cred_id, 'openstack_thumbprint': thumbprint, } # NOTE(lbragstad): Calling .items() on a dictionary in python 2 returns # a list but returns an iterable in python 3. Casting to a list makes # it safe to modify the dictionary while iterating over it, regardless # of the python version. for k, v in list(payload.items()): if v is None: payload.pop(k) token_id = jwt.encode( payload, self.private_key, algorithm=JWSFormatter.algorithm ) return token_id, issued_at def validate_token(self, token_id): payload = self._decode_token_from_id(token_id) user_id = payload['sub'] expires_at_int = payload['exp'] issued_at_int = payload['iat'] methods = payload['openstack_methods'] audit_ids = payload['openstack_audit_ids'] system = payload.get('openstack_system', None) domain_id = payload.get('openstack_domain_id', None) project_id = payload.get('openstack_project_id', None) trust_id = payload.get('openstack_trust_id', None) federated_group_ids = payload.get('openstack_group_ids', None) identity_provider_id = payload.get('openstack_idp_id', None) protocol_id = payload.get('openstack_protocol_id', None) access_token_id = payload.get('openstack_access_token_id', None) app_cred_id = payload.get('openstack_app_cred_id', None) thumbprint = payload.get('openstack_thumbprint', None) issued_at = self._convert_time_int_to_string(issued_at_int) expires_at = self._convert_time_int_to_string(expires_at_int) return ( user_id, methods, audit_ids, system, domain_id, project_id, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, issued_at, expires_at, ) def _decode_token_from_id(self, token_id): options = dict() options['verify_exp'] = False for public_key in self.public_keys: try: return jwt.decode( token_id, public_key, algorithms=JWSFormatter.algorithm, options=options, ) except (jwt.InvalidSignatureError, jwt.DecodeError): pass # nosec: We want to exhaustively try all public keys raise exception.TokenNotFound(token_id=token_id) def _convert_time_string_to_int(self, time_str): time_object = timeutils.parse_isotime(time_str) normalized = timeutils.normalize_time(time_object) epoch = datetime.datetime.fromtimestamp( 0, datetime.timezone.utc ).replace(tzinfo=None) return int((normalized - epoch).total_seconds()) def _convert_time_int_to_string(self, time_int): time_object = datetime.datetime.fromtimestamp( time_int, datetime.timezone.utc ).replace(tzinfo=None) return utils.isotime(at=time_object, subsecond=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/token/token_formatters.py0000664000175000017500000011162700000000000022414 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import datetime import struct import uuid from cryptography import fernet import msgpack from oslo_log import log from oslo_utils import timeutils from keystone.auth import plugins as auth_plugins from keystone.common import fernet_utils as utils from keystone.common import utils as ks_utils import keystone.conf from keystone import exception from keystone.i18n import _ CONF = keystone.conf.CONF LOG = log.getLogger(__name__) # Fernet byte indexes as computed by pypi/keyless_fernet and defined in # https://github.com/fernet/spec TIMESTAMP_START = 1 TIMESTAMP_END = 9 class TokenFormatter: """Packs and unpacks payloads into tokens for transport.""" @property def crypto(self): """Return a cryptography instance. You can extend this class with a custom crypto @property to provide your own token encoding / decoding. For example, using a different cryptography library (e.g. ``python-keyczar``) or to meet arbitrary security requirements. This @property just needs to return an object that implements ``encrypt(plaintext)`` and ``decrypt(ciphertext)``. """ fernet_utils = utils.FernetUtils( CONF.fernet_tokens.key_repository, CONF.fernet_tokens.max_active_keys, 'fernet_tokens', ) keys = fernet_utils.load_keys() if not keys: raise exception.KeysNotFound() fernet_instances = [fernet.Fernet(key) for key in keys] return fernet.MultiFernet(fernet_instances) def pack(self, payload): """Pack a payload for transport as a token. :type payload: bytes :rtype: str """ # base64 padding (if any) is not URL-safe return self.crypto.encrypt(payload).rstrip(b'=').decode('utf-8') def unpack(self, token): """Unpack a token, and validate the payload. :type token: str :rtype: bytes """ token = TokenFormatter.restore_padding(token) try: return self.crypto.decrypt(token.encode('utf-8')) except fernet.InvalidToken: raise exception.ValidationError( _('Could not recognize Fernet token') ) @classmethod def restore_padding(cls, token): """Restore padding based on token size. :param token: token to restore padding on :type token: str :returns: token with correct padding """ # Re-inflate the padding mod_returned = len(token) % 4 if mod_returned: missing_padding = 4 - mod_returned token += '=' * missing_padding return token @classmethod def creation_time(cls, fernet_token): """Return the creation time of a valid Fernet token. :type fernet_token: str """ fernet_token = TokenFormatter.restore_padding(fernet_token) # fernet_token is str # Fernet tokens are base64 encoded, so we need to unpack them first # urlsafe_b64decode() requires bytes token_bytes = base64.urlsafe_b64decode(fernet_token.encode('utf-8')) # slice into the byte array to get just the timestamp timestamp_bytes = token_bytes[TIMESTAMP_START:TIMESTAMP_END] # convert those bytes to an integer # (it's a 64-bit "unsigned long long int" in C) timestamp_int = struct.unpack(">Q", timestamp_bytes)[0] # and with an integer, it's trivial to produce a datetime object issued_at = datetime.datetime.fromtimestamp( timestamp_int, datetime.timezone.utc ).replace(tzinfo=None) return issued_at def create_token( self, user_id, expires_at, audit_ids, payload_class, methods=None, system=None, domain_id=None, project_id=None, trust_id=None, federated_group_ids=None, identity_provider_id=None, protocol_id=None, access_token_id=None, app_cred_id=None, thumbprint=None, ): """Given a set of payload attributes, generate a Fernet token.""" version = payload_class.version payload = payload_class.assemble( user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) versioned_payload = (version,) + payload serialized_payload = msgpack.packb(versioned_payload) token = self.pack(serialized_payload) # NOTE(lbragstad): We should warn against Fernet tokens that are over # 255 characters in length. This is mostly due to persisting the tokens # in a backend store of some kind that might have a limit of 255 # characters. Even though Keystone isn't storing a Fernet token # anywhere, we can't say it isn't being stored somewhere else with # those kind of backend constraints. if len(token) > CONF.max_token_size: LOG.info( f'Fernet token created with length of {len(token)} ' f'characters, which exceeds {CONF.max_token_size} characters', ) return token def validate_token(self, token): """Validate a Fernet token and returns the payload attributes. :type token: str """ serialized_payload = self.unpack(token) # TODO(melwitt): msgpack changed their data format in version 1.0, so # in order to support a rolling upgrade, we must pass raw=True to # support the old format. The try-except may be removed once the # N-1 release no longer supports msgpack < 1.0. try: versioned_payload = msgpack.unpackb(serialized_payload) except UnicodeDecodeError: versioned_payload = msgpack.unpackb(serialized_payload, raw=True) version, payload = versioned_payload[0], versioned_payload[1:] for payload_class in _PAYLOAD_CLASSES: if version == payload_class.version: ( user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) = payload_class.disassemble(payload) break else: # If the token_format is not recognized, raise ValidationError. raise exception.ValidationError( _('This is not a recognized Fernet payload version: %s') % version ) # FIXME(lbragstad): Without this, certain token validation tests fail # when running with python 3. Once we get further along in this # refactor, we should be better about handling string encoding/types at # the edges of the application. if isinstance(system, bytes): system = system.decode('utf-8') # rather than appearing in the payload, the creation time is encoded # into the token format itself issued_at = TokenFormatter.creation_time(token) issued_at = ks_utils.isotime(at=issued_at, subsecond=True) expires_at = timeutils.parse_isotime(expires_at) expires_at = ks_utils.isotime(at=expires_at, subsecond=True) return ( user_id, methods, audit_ids, system, domain_id, project_id, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, issued_at, expires_at, ) class BasePayload: # each payload variant should have a unique version version: int @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): """Assemble the payload of a token. :param user_id: identifier of the user in the token request :param methods: list of authentication methods used :param system: a string including system scope information :param project_id: ID of the project to scope to :param domain_id: ID of the domain to scope to :param expires_at: datetime of the token's expiration :param audit_ids: list of the token's audit IDs :param trust_id: ID of the trust in effect :param federated_group_ids: list of group IDs from SAML assertion :param identity_provider_id: ID of the user's identity provider :param protocol_id: federated protocol used for authentication :param access_token_id: ID of the secret in OAuth1 authentication :param app_cred_id: ID of the application credential in effect :param thumbprint: thumbprint of the certificate in OAuth2 mTLS :returns: the payload of a token """ raise NotImplementedError() @classmethod def disassemble(cls, payload): """Disassemble an unscoped payload into the component data. The tuple consists of:: (user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id,` access_token_id, app_cred_id) * ``methods`` are the auth methods. Fields will be set to None if they didn't apply to this payload type. :param payload: this variant of payload :returns: a tuple of the payloads component data """ raise NotImplementedError() @classmethod def convert_uuid_hex_to_bytes(cls, uuid_string): """Compress UUID formatted strings to bytes. :param uuid_string: uuid string to compress to bytes :returns: a byte representation of the uuid """ uuid_obj = uuid.UUID(uuid_string) return uuid_obj.bytes @classmethod def convert_uuid_bytes_to_hex(cls, uuid_byte_string): """Generate uuid.hex format based on byte string. :param uuid_byte_string: uuid string to generate from :returns: uuid hex formatted string """ uuid_obj = uuid.UUID(bytes=uuid_byte_string) return uuid_obj.hex @classmethod def _convert_time_string_to_float(cls, time_string): """Convert a time formatted string to a float. :param time_string: time formatted string :returns: a timestamp as a float """ time_object = timeutils.parse_isotime(time_string) return ( timeutils.normalize_time(time_object) - datetime.datetime.fromtimestamp( 0, datetime.timezone.utc ).replace(tzinfo=None) ).total_seconds() @classmethod def _convert_float_to_time_string(cls, time_float): """Convert a floating point timestamp to a string. :param time_float: integer representing timestamp :returns: a time formatted strings """ time_object = datetime.datetime.fromtimestamp( time_float, datetime.timezone.utc ).replace(tzinfo=None) return ks_utils.isotime(time_object, subsecond=True) @classmethod def attempt_convert_uuid_hex_to_bytes(cls, value): """Attempt to convert value to bytes or return value. :param value: value to attempt to convert to bytes :returns: tuple containing boolean indicating whether user_id was stored as bytes and uuid value as bytes or the original value """ try: return (True, cls.convert_uuid_hex_to_bytes(value)) except (ValueError, TypeError): # ValueError: this might not be a UUID, depending on the # situation (i.e. federation) # TypeError: the provided value may be binary encoded # in which case just return the value (i.e. Python 3) return (False, value) @classmethod def base64_encode(cls, s): """Encode a URL-safe string. :type s: str :rtype: str """ # urlsafe_b64encode() returns bytes so need to convert to # str, might as well do it before stripping. return base64.urlsafe_b64encode(s).decode('utf-8').rstrip('=') @classmethod def random_urlsafe_str_to_bytes(cls, s): """Convert string from :func:`random_urlsafe_str()` to bytes. :type s: str :rtype: bytes """ # urlsafe_b64decode() requires str, unicode isn't accepted. s = str(s) # restore the padding (==) at the end of the string return base64.urlsafe_b64decode(s + '==') @classmethod def _convert_or_decode(cls, is_stored_as_bytes, value): """Convert a value to text type, translating uuid -> hex if required. :param is_stored_as_bytes: whether value is already bytes :type is_stored_as_bytes: boolean :param value: value to attempt to convert to bytes :type value: str or bytes :rtype: str """ if is_stored_as_bytes: return cls.convert_uuid_bytes_to_hex(value) elif isinstance(value, bytes): return value.decode('utf-8') return value class UnscopedPayload(BasePayload): version = 0 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) expires_at_str = cls._convert_float_to_time_string(payload[2]) audit_ids = list(map(cls.base64_encode, payload[3])) system = None project_id = None domain_id = None trust_id = None federated_group_ids = None identity_provider_id = None protocol_id = None access_token_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class DomainScopedPayload(BasePayload): version = 1 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) try: b_domain_id = cls.convert_uuid_hex_to_bytes(domain_id) except ValueError: # the default domain ID is configurable, and probably isn't a UUID if domain_id == CONF.identity.default_domain_id: b_domain_id = domain_id else: raise expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, b_domain_id, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) try: domain_id = cls.convert_uuid_bytes_to_hex(payload[2]) except ValueError: # the default domain ID is configurable, and probably isn't a UUID if isinstance(payload[2], bytes): payload[2] = payload[2].decode('utf-8') if payload[2] == CONF.identity.default_domain_id: domain_id = payload[2] else: raise expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(cls.base64_encode, payload[4])) system = None project_id = None trust_id = None federated_group_ids = None identity_provider_id = None protocol_id = None access_token_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class ProjectScopedPayload(BasePayload): version = 2 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, b_project_id, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] project_id = cls._convert_or_decode(is_stored_as_bytes, project_id) expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(cls.base64_encode, payload[4])) system = None domain_id = None trust_id = None federated_group_ids = None identity_provider_id = None protocol_id = None access_token_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class TrustScopedPayload(BasePayload): version = 3 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) b_trust_id = cls.convert_uuid_hex_to_bytes(trust_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) return ( b_user_id, methods, b_project_id, expires_at_int, b_audit_ids, b_trust_id, ) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] project_id = cls._convert_or_decode(is_stored_as_bytes, project_id) expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(cls.base64_encode, payload[4])) trust_id = cls.convert_uuid_bytes_to_hex(payload[5]) system = None domain_id = None federated_group_ids = None identity_provider_id = None protocol_id = None access_token_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class FederatedUnscopedPayload(BasePayload): version = 4 @classmethod def pack_group_id(cls, group_dict): return cls.attempt_convert_uuid_hex_to_bytes(group_dict['id']) @classmethod def unpack_group_id(cls, group_id_in_bytes): (is_stored_as_bytes, group_id) = group_id_in_bytes group_id = cls._convert_or_decode(is_stored_as_bytes, group_id) return {'id': group_id} @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_group_ids = list(map(cls.pack_group_id, federated_group_ids)) b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(identity_provider_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) return ( b_user_id, methods, b_group_ids, b_idp_id, protocol_id, expires_at_int, b_audit_ids, ) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) group_ids = list(map(cls.unpack_group_id, payload[2])) (is_stored_as_bytes, idp_id) = payload[3] idp_id = cls._convert_or_decode(is_stored_as_bytes, idp_id) protocol_id = payload[4] if isinstance(protocol_id, bytes): protocol_id = protocol_id.decode('utf-8') expires_at_str = cls._convert_float_to_time_string(payload[5]) audit_ids = list(map(cls.base64_encode, payload[6])) system = None project_id = None domain_id = None trust_id = None access_token_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, group_ids, idp_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class FederatedScopedPayload(FederatedUnscopedPayload): @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_scope_id = cls.attempt_convert_uuid_hex_to_bytes( project_id or domain_id ) b_group_ids = list(map(cls.pack_group_id, federated_group_ids)) b_idp_id = cls.attempt_convert_uuid_hex_to_bytes(identity_provider_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) return ( b_user_id, methods, b_scope_id, b_group_ids, b_idp_id, protocol_id, expires_at_int, b_audit_ids, ) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, scope_id) = payload[2] scope_id = cls._convert_or_decode(is_stored_as_bytes, scope_id) project_id = ( scope_id if cls.version == FederatedProjectScopedPayload.version else None ) domain_id = ( scope_id if cls.version == FederatedDomainScopedPayload.version else None ) group_ids = list(map(cls.unpack_group_id, payload[3])) (is_stored_as_bytes, idp_id) = payload[4] idp_id = cls._convert_or_decode(is_stored_as_bytes, idp_id) protocol_id = payload[5] if isinstance(protocol_id, bytes): protocol_id = protocol_id.decode('utf-8') expires_at_str = cls._convert_float_to_time_string(payload[6]) audit_ids = list(map(cls.base64_encode, payload[7])) system = None trust_id = None access_token_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, group_ids, idp_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class FederatedProjectScopedPayload(FederatedScopedPayload): version = 5 class FederatedDomainScopedPayload(FederatedScopedPayload): version = 6 class OauthScopedPayload(BasePayload): version = 7 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) b_access_token_id = cls.attempt_convert_uuid_hex_to_bytes( access_token_id ) return ( b_user_id, methods, b_project_id, b_access_token_id, expires_at_int, b_audit_ids, ) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] project_id = cls._convert_or_decode(is_stored_as_bytes, project_id) (is_stored_as_bytes, access_token_id) = payload[3] access_token_id = cls._convert_or_decode( is_stored_as_bytes, access_token_id ) expires_at_str = cls._convert_float_to_time_string(payload[4]) audit_ids = list(map(cls.base64_encode, payload[5])) system = None domain_id = None trust_id = None federated_group_ids = None identity_provider_id = None protocol_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class SystemScopedPayload(BasePayload): version = 8 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) return (b_user_id, methods, system, expires_at_int, b_audit_ids) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) system = payload[2] expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(cls.base64_encode, payload[4])) project_id = None domain_id = None trust_id = None federated_group_ids = None identity_provider_id = None protocol_id = None access_token_id = None app_cred_id = None thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class ApplicationCredentialScopedPayload(BasePayload): version = 9 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) b_app_cred_id = cls.attempt_convert_uuid_hex_to_bytes(app_cred_id) return ( b_user_id, methods, b_project_id, expires_at_int, b_audit_ids, b_app_cred_id, ) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] project_id = cls._convert_or_decode(is_stored_as_bytes, project_id) expires_at_str = cls._convert_float_to_time_string(payload[3]) audit_ids = list(map(cls.base64_encode, payload[4])) system = None domain_id = None trust_id = None federated_group_ids = None identity_provider_id = None protocol_id = None access_token_id = None (is_stored_as_bytes, app_cred_id) = payload[5] app_cred_id = cls._convert_or_decode(is_stored_as_bytes, app_cred_id) thumbprint = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) class Oauth2CredentialsScopedPayload(BasePayload): version = 10 @classmethod def assemble( cls, user_id, methods, system, project_id, domain_id, expires_at, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ): b_user_id = cls.attempt_convert_uuid_hex_to_bytes(user_id) methods = auth_plugins.convert_method_list_to_integer(methods) b_project_id = cls.attempt_convert_uuid_hex_to_bytes(project_id) b_domain_id = cls.attempt_convert_uuid_hex_to_bytes(domain_id) expires_at_int = cls._convert_time_string_to_float(expires_at) b_audit_ids = list(map(cls.random_urlsafe_str_to_bytes, audit_ids)) b_thumbprint = (False, thumbprint) return ( b_user_id, methods, b_project_id, b_domain_id, expires_at_int, b_audit_ids, b_thumbprint, ) @classmethod def disassemble(cls, payload): (is_stored_as_bytes, user_id) = payload[0] user_id = cls._convert_or_decode(is_stored_as_bytes, user_id) methods = auth_plugins.convert_integer_to_method_list(payload[1]) (is_stored_as_bytes, project_id) = payload[2] project_id = cls._convert_or_decode(is_stored_as_bytes, project_id) (is_stored_as_bytes, domain_id) = payload[3] domain_id = cls._convert_or_decode(is_stored_as_bytes, domain_id) expires_at_str = cls._convert_float_to_time_string(payload[4]) audit_ids = list(map(cls.base64_encode, payload[5])) (is_stored_as_bytes, thumbprint) = payload[6] thumbprint = cls._convert_or_decode(is_stored_as_bytes, thumbprint) system = None trust_id = None federated_group_ids = None identity_provider_id = None protocol_id = None access_token_id = None app_cred_id = None return ( user_id, methods, system, project_id, domain_id, expires_at_str, audit_ids, trust_id, federated_group_ids, identity_provider_id, protocol_id, access_token_id, app_cred_id, thumbprint, ) _PAYLOAD_CLASSES = [ UnscopedPayload, DomainScopedPayload, ProjectScopedPayload, TrustScopedPayload, FederatedUnscopedPayload, FederatedProjectScopedPayload, FederatedDomainScopedPayload, OauthScopedPayload, SystemScopedPayload, ApplicationCredentialScopedPayload, Oauth2CredentialsScopedPayload, ] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/keystone/trust/0000775000175000017500000000000000000000000016505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/trust/__init__.py0000664000175000017500000000116400000000000020620 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.trust.core import * # noqa ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/keystone/trust/backends/0000775000175000017500000000000000000000000020257 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/trust/backends/__init__.py0000664000175000017500000000000000000000000022356 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/trust/backends/base.py0000664000175000017500000000625700000000000021555 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc from keystone import exception class TrustDriverBase(metaclass=abc.ABCMeta): @abc.abstractmethod def create_trust(self, trust_id, trust, roles): """Create a new trust. :returns: a new trust """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def get_trust(self, trust_id, deleted=False): """Get a trust by the trust id. :param trust_id: the trust identifier :type trust_id: string :param deleted: return the trust even if it is deleted, expired, or has no consumptions left :type deleted: bool """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_trusts(self): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_trusts_for_trustee(self, trustee): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def list_trusts_for_trustor(self, trustor, redelegated_trust_id=None): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_trust(self, trust_id): raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def consume_use(self, trust_id): """Consume one use of a trust. One use of a trust is consumed when the trust was created with a limitation on its uses, provided there are still uses available. :raises keystone.exception.TrustUseLimitReached: If no remaining uses for trust. :raises keystone.exception.TrustNotFound: If the trust doesn't exist. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def delete_trusts_for_project(self, project_id): """Delete all trusts for a project. :param project_id: ID of a project to filter trusts by. """ raise exception.NotImplemented() # pragma: no cover @abc.abstractmethod def flush_expired_and_soft_deleted_trusts( self, project_id=None, trustor_user_id=None, trustee_user_id=None, date=None, ): """Flush expired and non-expired soft deleted trusts from the backend. :param project_id: ID of a project to filter trusts by. :param trustor_user_id: ID of a trustor_user_id to filter trusts by. :param trustee_user_id: ID of a trustee_user_id to filter trusts by. :param date: date to filter trusts by. :type date: datetime """ raise exception.NotImplemented() # pragma: no cover ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/trust/backends/sql.py0000664000175000017500000002353100000000000021434 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_utils import timeutils import sqlalchemy from sqlalchemy.ext.hybrid import hybrid_property from keystone.common import sql from keystone import exception from keystone.trust.backends import base # The maximum number of iterations that will be attempted for optimistic # locking on consuming a limited-use trust. MAXIMUM_CONSUME_ATTEMPTS = 10 class TrustModel(sql.ModelBase, sql.ModelDictMixinWithExtras): __tablename__ = 'trust' attributes = [ 'id', 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', 'remaining_uses', 'deleted_at', 'redelegated_trust_id', 'redelegation_count', ] id = sql.Column(sql.String(64), primary_key=True) # user id of owner trustor_user_id = sql.Column( sql.String(64), nullable=False, ) # user_id of user allowed to consume this preauth trustee_user_id = sql.Column(sql.String(64), nullable=False) project_id = sql.Column(sql.String(64)) impersonation = sql.Column(sql.Boolean, nullable=False) deleted_at = sql.Column(sql.DateTime) _expires_at = sql.Column('expires_at', sql.DateTime) expires_at_int = sql.Column(sql.DateTimeInt(), nullable=True) remaining_uses = sql.Column(sql.Integer, nullable=True) redelegated_trust_id = sql.Column(sql.String(64), nullable=True) redelegation_count = sql.Column(sql.Integer, nullable=True) extra = sql.Column(sql.JsonBlob()) __table_args__ = ( sql.UniqueConstraint( 'trustor_user_id', 'trustee_user_id', 'project_id', 'impersonation', 'expires_at', name='duplicate_trust_constraint', ), ) @hybrid_property def expires_at(self): return self.expires_at_int or self._expires_at @expires_at.setter # type: ignore[no-redef] def expires_at(self, value): self._expires_at = value self.expires_at_int = value class TrustRole(sql.ModelBase): __tablename__ = 'trust_role' attributes = ['trust_id', 'role_id'] trust_id = sql.Column(sql.String(64), primary_key=True, nullable=False) role_id = sql.Column(sql.String(64), primary_key=True, nullable=False) class Trust(base.TrustDriverBase): @sql.handle_conflicts(conflict_type='trust') def create_trust(self, trust_id, trust, roles): with sql.session_for_write() as session: ref = TrustModel.from_dict(trust) ref['id'] = trust_id if ref.get('expires_at') and ref['expires_at'].tzinfo is not None: ref['expires_at'] = timeutils.normalize_time(ref['expires_at']) session.add(ref) added_roles = [] for role in roles: trust_role = TrustRole() trust_role.trust_id = trust_id trust_role.role_id = role['id'] added_roles.append({'id': role['id']}) session.add(trust_role) trust_dict = ref.to_dict() trust_dict['roles'] = added_roles return trust_dict def _add_roles(self, trust_id, session, trust_dict): roles = [] for role in session.query(TrustRole).filter_by(trust_id=trust_id): roles.append({'id': role.role_id}) trust_dict['roles'] = roles def consume_use(self, trust_id): for attempt in range(MAXIMUM_CONSUME_ATTEMPTS): with sql.session_for_write() as session: try: query_result = ( session.query(TrustModel.remaining_uses) .filter_by(id=trust_id) .filter_by(deleted_at=None) .one() ) except sql.NotFound: raise exception.TrustNotFound(trust_id=trust_id) remaining_uses = query_result.remaining_uses if remaining_uses is None: # unlimited uses, do nothing break elif remaining_uses > 0: # NOTE(morganfainberg): use an optimistic locking method # to ensure we only ever update a trust that has the # expected number of remaining uses. rows_affected = ( session.query(TrustModel) .filter_by(id=trust_id) .filter_by(deleted_at=None) .filter_by(remaining_uses=remaining_uses) .update( {'remaining_uses': (remaining_uses - 1)}, synchronize_session=False, ) ) if rows_affected == 1: # Successfully consumed a single limited-use trust. # Since trust_id is the PK on the Trust table, there is # no case we should match more than 1 row in the # update. We either update 1 row or 0 rows. break else: raise exception.TrustUseLimitReached(trust_id=trust_id) else: # NOTE(morganfainberg): In the case the for loop is not prematurely # broken out of, this else block is executed. This means the trust # was not unlimited nor was it consumed (we hit the maximum # iteration limit). This is just an indicator that we were unable # to get the optimistic lock rather than silently failing or # incorrectly indicating a trust was consumed. raise exception.TrustConsumeMaximumAttempt(trust_id=trust_id) def get_trust(self, trust_id, deleted=False): with sql.session_for_read() as session: query = session.query(TrustModel).filter_by(id=trust_id) if not deleted: query = query.filter_by(deleted_at=None) ref = query.first() if ref is None: raise exception.TrustNotFound(trust_id=trust_id) if ref.expires_at is not None and not deleted: now = timeutils.utcnow() if now > ref.expires_at: raise exception.TrustNotFound(trust_id=trust_id) # Do not return trusts that can't be used anymore if ref.remaining_uses is not None and not deleted: if ref.remaining_uses <= 0: raise exception.TrustNotFound(trust_id=trust_id) trust_dict = ref.to_dict() self._add_roles(trust_id, session, trust_dict) return trust_dict def list_trusts(self): with sql.session_for_read() as session: trusts = session.query(TrustModel).filter_by(deleted_at=None) return [trust_ref.to_dict() for trust_ref in trusts] def list_trusts_for_trustee(self, trustee_user_id): with sql.session_for_read() as session: trusts = ( session.query(TrustModel) .filter_by(deleted_at=None) .filter_by(trustee_user_id=trustee_user_id) ) return [trust_ref.to_dict() for trust_ref in trusts] def list_trusts_for_trustor( self, trustor_user_id, redelegated_trust_id=None ): with sql.session_for_read() as session: trusts = ( session.query(TrustModel) .filter_by(deleted_at=None) .filter_by(trustor_user_id=trustor_user_id) ) if redelegated_trust_id: trusts = trusts.filter_by( redelegated_trust_id=redelegated_trust_id ) return [trust_ref.to_dict() for trust_ref in trusts] @sql.handle_conflicts(conflict_type='trust') def delete_trust(self, trust_id): with sql.session_for_write() as session: trust_ref = session.get(TrustModel, trust_id) if not trust_ref: raise exception.TrustNotFound(trust_id=trust_id) trust_ref.deleted_at = timeutils.utcnow() def delete_trusts_for_project(self, project_id): with sql.session_for_write() as session: query = session.query(TrustModel) trusts = query.filter_by(project_id=project_id) for trust_ref in trusts: trust_ref.deleted_at = timeutils.utcnow() def flush_expired_and_soft_deleted_trusts( self, project_id=None, trustor_user_id=None, trustee_user_id=None, date=None, ): with sql.session_for_write() as session: query = session.query(TrustModel) query = query.filter( sqlalchemy.or_( TrustModel.deleted_at.isnot(None), sqlalchemy.and_( TrustModel.expires_at.isnot(None), TrustModel.expires_at < date, ), ) ) if project_id: query = query.filter_by(project_id=project_id) if trustor_user_id: query = query.filter_by(trustor_user_id=trustor_user_id) if trustee_user_id: query = query.filter_by(trustee_user_id=trustee_user_id) query.delete(synchronize_session=False) session.flush() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/trust/core.py0000664000175000017500000002214300000000000020011 0ustar00zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Main entry point into the Trust service.""" from keystone.common import manager from keystone.common import provider_api import keystone.conf from keystone import exception from keystone.i18n import _ from keystone import notifications CONF = keystone.conf.CONF PROVIDERS = provider_api.ProviderAPIs class Manager(manager.Manager): """Default pivot point for the Trust backend. See :mod:`keystone.common.manager.Manager` for more details on how this dynamically calls the backend. """ driver_namespace = 'keystone.trust' _provides_api = 'trust_api' _TRUST = "OS-TRUST:trust" def __init__(self): super().__init__(CONF.trust.driver) notifications.register_event_callback( notifications.ACTIONS.deleted, 'user', self._on_user_delete ) def _on_user_delete(self, service, resource_type, operation, payload): # NOTE(davechen): Only delete the user that is maintained by # keystone will delete the related trust, since we don't know # when a LDAP user or federation user is deleted. user_id = payload['resource_info'] trusts = self.driver.list_trusts_for_trustee(user_id) trusts = trusts + self.driver.list_trusts_for_trustor(user_id) for trust in trusts: self.driver.delete_trust(trust['id']) @staticmethod def _validate_redelegation(redelegated_trust, trust): # Validate against: # 0 < redelegation_count <= max_redelegation_count max_redelegation_count = CONF.trust.max_redelegation_count redelegation_depth = redelegated_trust.get('redelegation_count', 0) if not (0 < redelegation_depth <= max_redelegation_count): raise exception.Forbidden( _( 'Remaining redelegation depth of %(redelegation_depth)d' ' out of allowed range of [0..%(max_count)d]' ) % { 'redelegation_depth': redelegation_depth, 'max_count': max_redelegation_count, } ) # remaining_uses is None remaining_uses = trust.get('remaining_uses') if remaining_uses is not None: raise exception.Forbidden( _( 'Field "remaining_uses" is set to %(value)s' ' while it must not be set in order to redelegate a trust' ), value=remaining_uses, ) # expiry times trust_expiry = trust.get('expires_at') redelegated_expiry = redelegated_trust['expires_at'] if trust_expiry: # redelegated trust is from backend and has no tzinfo if redelegated_expiry < trust_expiry.replace(tzinfo=None): raise exception.Forbidden( _( 'Requested expiration time is more ' 'than redelegated trust can provide' ) ) else: trust['expires_at'] = redelegated_expiry # trust roles is a subset of roles of the redelegated trust parent_roles = {role['id'] for role in redelegated_trust['roles']} if not all(role['id'] in parent_roles for role in trust['roles']): raise exception.Forbidden( _('Some of requested roles are not in redelegated trust') ) # forbid to create a trust (with impersonation set to true) from a # redelegated trust (with impersonation set to false) if not redelegated_trust['impersonation'] and trust['impersonation']: raise exception.Forbidden( _( 'Impersonation is not allowed because redelegated trust ' 'does not specify impersonation. Redelegated trust id: %s' ) % redelegated_trust['id'] ) def get_trust_pedigree(self, trust_id): trust = self.driver.get_trust(trust_id) trust_chain = [trust] while trust and trust.get('redelegated_trust_id'): trust = self.driver.get_trust(trust['redelegated_trust_id']) trust_chain.append(trust) return trust_chain def get_trust(self, trust_id, deleted=False): trust = self.driver.get_trust(trust_id, deleted) if trust and trust.get('redelegated_trust_id') and not deleted: trust_chain = self.get_trust_pedigree(trust_id) for parent, child in zip(trust_chain[1:], trust_chain): self._validate_redelegation(parent, child) try: PROVIDERS.identity_api.assert_user_enabled( parent['trustee_user_id'] ) except (AssertionError, exception.NotFound): raise exception.Forbidden( _('One of the trust agents is disabled or deleted') ) return trust def create_trust( self, trust_id, trust, roles, redelegated_trust=None, initiator=None ): """Create a new trust. :returns: a new trust """ # Default for initial trust in chain is max_redelegation_count max_redelegation_count = CONF.trust.max_redelegation_count requested_count = trust.get('redelegation_count') redelegatable = ( trust.pop('allow_redelegation', False) and requested_count != 0 ) if not redelegatable: trust['redelegation_count'] = requested_count = 0 remaining_uses = trust.get('remaining_uses') if remaining_uses is not None and remaining_uses <= 0: msg = _('remaining_uses must be a positive integer or null.') raise exception.ValidationError(msg) else: # Validate requested redelegation depth if requested_count and requested_count > max_redelegation_count: raise exception.Forbidden( _( 'Requested redelegation depth of %(requested_count)d ' 'is greater than allowed %(max_count)d' ) % { 'requested_count': requested_count, 'max_count': max_redelegation_count, } ) # Decline remaining_uses if trust.get('remaining_uses') is not None: raise exception.ValidationError( _( 'remaining_uses must not be set if redelegation is ' 'allowed' ) ) if redelegated_trust: trust['redelegated_trust_id'] = redelegated_trust['id'] remaining_count = redelegated_trust['redelegation_count'] - 1 # Validate depth consistency if ( redelegatable and requested_count and requested_count != remaining_count ): msg = _( 'Modifying "redelegation_count" upon redelegation is ' 'forbidden. Omitting this parameter is advised.' ) raise exception.Forbidden(msg) trust.setdefault('redelegation_count', remaining_count) # Check entire trust pedigree validity pedigree = self.get_trust_pedigree(redelegated_trust['id']) for t in pedigree: self._validate_redelegation(t, trust) trust.setdefault('redelegation_count', max_redelegation_count) ref = self.driver.create_trust(trust_id, trust, roles) notifications.Audit.created(self._TRUST, trust_id, initiator=initiator) return ref def delete_trust(self, trust_id, initiator=None): """Remove a trust. :raises keystone.exception.TrustNotFound: If the trust doesn't exist. Recursively remove given and redelegated trusts """ trust = self.driver.get_trust(trust_id) trusts = self.driver.list_trusts_for_trustor( trust['trustor_user_id'], redelegated_trust_id=trust_id ) for t in trusts: # recursive call to make sure all notifications are sent try: self.delete_trust(t['id']) except exception.TrustNotFound: # nosec # if trust was deleted by concurrent process # consistency must not suffer pass # end recursion self.driver.delete_trust(trust_id) notifications.Audit.deleted(self._TRUST, trust_id, initiator) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/trust/schema.py0000664000175000017500000000346000000000000020322 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from keystone.common import validation from keystone.common.validation import parameter_types _role_properties = { 'type': 'array', 'items': { 'type': 'object', 'properties': { 'id': parameter_types.id_string, 'name': parameter_types.name, }, 'minProperties': 1, 'maxProperties': 1, 'additionalProperties': False, }, } _trust_properties = { # NOTE(lbragstad): These are set as external_id_string because they have # the ability to be read as LDAP user identifiers, which could be something # other than uuid. 'trustor_user_id': parameter_types.external_id_string, 'trustee_user_id': parameter_types.external_id_string, 'impersonation': parameter_types.boolean, 'project_id': validation.nullable(parameter_types.id_string), 'remaining_uses': {'type': ['integer', 'null'], 'minimum': 1}, 'expires_at': {'type': ['null', 'string']}, 'allow_redelegation': {'type': ['boolean', 'null']}, 'redelegation_count': {'type': ['integer', 'null'], 'minimum': 0}, 'roles': _role_properties, } trust_create = { 'type': 'object', 'properties': _trust_properties, 'required': ['trustor_user_id', 'trustee_user_id', 'impersonation'], 'additionalProperties': True, } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone/version.py0000664000175000017500000000111400000000000017360 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. def release_string(): return 'v3.14' ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4941142 keystone-26.0.0/keystone.egg-info/0000775000175000017500000000000000000000000017016 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/PKG-INFO0000664000175000017500000000620400000000000020115 0ustar00zuulzuul00000000000000Metadata-Version: 2.1 Name: keystone Version: 26.0.0 Summary: OpenStack Identity Home-page: https://docs.openstack.org/keystone/latest Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ================== OpenStack Keystone ================== .. image:: https://governance.openstack.org/tc/badges/keystone.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on OpenStack Keystone provides authentication, authorization and service discovery mechanisms via HTTP primarily for use by projects in the OpenStack family. It is most commonly deployed as an HTTP interface to existing identity systems, such as LDAP. Developer documentation, the source of which is in ``doc/source/``, is published at: https://docs.openstack.org/keystone/latest The API reference and documentation are available at: https://docs.openstack.org/api-ref/identity The canonical client library is available at: https://opendev.org/openstack/python-keystoneclient Documentation for cloud administrators is available at: https://docs.openstack.org/ The source of documentation for cloud administrators is available at: https://opendev.org/openstack/openstack-manuals Information about our team meeting is available at: https://wiki.openstack.org/wiki/Meetings/KeystoneMeeting Release notes is available at: https://docs.openstack.org/releasenotes/keystone Bugs and feature requests are tracked on Launchpad at: https://bugs.launchpad.net/keystone Future design work is tracked at: https://specs.openstack.org/openstack/keystone-specs Contributors are encouraged to join IRC (``#openstack-keystone`` on OFTC): https://wiki.openstack.org/wiki/IRC Source for the project: https://opendev.org/openstack/keystone For information on contributing to Keystone, see ``CONTRIBUTING.rst``. Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.8 Classifier: Programming Language :: Python :: 3.9 Classifier: Programming Language :: Python :: 3.10 Classifier: Programming Language :: Python :: 3.11 Requires-Python: >=3.8 Provides-Extra: ldap Provides-Extra: memcache Provides-Extra: test ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/SOURCES.txt0000664000175000017500000021153400000000000020710 0ustar00zuulzuul00000000000000.coveragerc .git-blame-ignore-revs .mailmap .pre-commit-config.yaml .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bindep.txt reno.yaml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/v2-ext/index.rst api-ref/source/v2-ext/ksec2-admin.inc api-ref/source/v2-ext/parameters.yaml api-ref/source/v2-ext/samples/OS-KSEC2/authenticate-request.json api-ref/source/v2-ext/samples/OS-KSEC2/authenticate-response.json api-ref/source/v2-ext/samples/OS-KSEC2/credentials-show-response.json api-ref/source/v2-ext/samples/OS-KSEC2/credentialswithec2-list-response.json api-ref/source/v2-ext/samples/OS-KSEC2/ec2Credentials-create-request.json api-ref/source/v2-ext/samples/OS-KSEC2/ec2Credentials-show-response.json api-ref/source/v3/application-credentials.inc api-ref/source/v3/authenticate-v3.inc api-ref/source/v3/credentials.inc api-ref/source/v3/domains-config-v3.inc api-ref/source/v3/domains.inc api-ref/source/v3/groups.inc api-ref/source/v3/index.rst api-ref/source/v3/inherit.inc api-ref/source/v3/os-pki.inc api-ref/source/v3/parameters.yaml api-ref/source/v3/policies.inc api-ref/source/v3/project-tags.inc api-ref/source/v3/projects.inc api-ref/source/v3/regions-v3.inc api-ref/source/v3/roles.inc api-ref/source/v3/service-catalog.inc api-ref/source/v3/status.yaml api-ref/source/v3/system-roles.inc api-ref/source/v3/unified_limits.inc api-ref/source/v3/users.inc api-ref/source/v3-ext/endpoint-policy.inc api-ref/source/v3-ext/ep-filter.inc api-ref/source/v3-ext/federation.inc api-ref/source/v3-ext/index.rst api-ref/source/v3-ext/oauth.inc api-ref/source/v3-ext/oauth2.inc api-ref/source/v3-ext/parameters.yaml api-ref/source/v3-ext/revoke.inc api-ref/source/v3-ext/simple-cert.inc api-ref/source/v3-ext/trust.inc api-ref/source/v3-ext/federation/assertion/assertion.inc api-ref/source/v3-ext/federation/assertion/parameters.yaml api-ref/source/v3-ext/federation/assertion/samples/ecp-saml-assertion-request.json api-ref/source/v3-ext/federation/assertion/samples/ecp-saml-assertion-response.xml api-ref/source/v3-ext/federation/assertion/samples/metadata-response.xml api-ref/source/v3-ext/federation/assertion/samples/saml-assertion-request.json api-ref/source/v3-ext/federation/assertion/samples/saml-assertion-response.xml api-ref/source/v3-ext/federation/auth/auth.inc api-ref/source/v3-ext/federation/auth/parameters.yaml api-ref/source/v3-ext/federation/auth/samples/scoped-token-request.json api-ref/source/v3-ext/federation/auth/samples/scoped-token-response.json api-ref/source/v3-ext/federation/auth/samples/unscoped-token-response.json api-ref/source/v3-ext/federation/identity-provider/idp.inc api-ref/source/v3-ext/federation/identity-provider/parameters.yaml api-ref/source/v3-ext/federation/identity-provider/samples/add-protocol-request.json api-ref/source/v3-ext/federation/identity-provider/samples/add-protocol-response.json api-ref/source/v3-ext/federation/identity-provider/samples/get-protocol-response.json api-ref/source/v3-ext/federation/identity-provider/samples/get-response.json api-ref/source/v3-ext/federation/identity-provider/samples/list-protocol-response.json api-ref/source/v3-ext/federation/identity-provider/samples/list-response.json api-ref/source/v3-ext/federation/identity-provider/samples/register-request.json api-ref/source/v3-ext/federation/identity-provider/samples/register-response.json api-ref/source/v3-ext/federation/identity-provider/samples/update-protocol-request.json api-ref/source/v3-ext/federation/identity-provider/samples/update-protocol-response.json api-ref/source/v3-ext/federation/identity-provider/samples/update-request.json api-ref/source/v3-ext/federation/identity-provider/samples/update-response.json api-ref/source/v3-ext/federation/mapping/mapping.inc api-ref/source/v3-ext/federation/mapping/parameters.yaml api-ref/source/v3-ext/federation/mapping/samples/create-request.json api-ref/source/v3-ext/federation/mapping/samples/create-response.json api-ref/source/v3-ext/federation/mapping/samples/get-response.json api-ref/source/v3-ext/federation/mapping/samples/list-response.json api-ref/source/v3-ext/federation/mapping/samples/update-request.json api-ref/source/v3-ext/federation/mapping/samples/update-response.json api-ref/source/v3-ext/federation/projects-domains/parameters.yaml api-ref/source/v3-ext/federation/projects-domains/projects-domains.inc api-ref/source/v3-ext/federation/projects-domains/samples/domain-list-response.json api-ref/source/v3-ext/federation/projects-domains/samples/project-list-response.json api-ref/source/v3-ext/federation/service-provider/parameters.yaml api-ref/source/v3-ext/federation/service-provider/sp.inc api-ref/source/v3-ext/federation/service-provider/samples/get-response.json api-ref/source/v3-ext/federation/service-provider/samples/list-response.json api-ref/source/v3-ext/federation/service-provider/samples/register-request.json api-ref/source/v3-ext/federation/service-provider/samples/register-response.json api-ref/source/v3-ext/federation/service-provider/samples/update-request.json api-ref/source/v3-ext/federation/service-provider/samples/update-response.json api-ref/source/v3-ext/samples/OS-ENDPOINT-POLICY/policy-endpoint-associations-list-response.json api-ref/source/v3-ext/samples/OS-ENDPOINT-POLICY/policy-show-response.json api-ref/source/v3-ext/samples/OS-EP-FILTER/create-endpoint-group-request.json api-ref/source/v3-ext/samples/OS-EP-FILTER/endpoint-group-response.json api-ref/source/v3-ext/samples/OS-EP-FILTER/endpoint-groups-response.json api-ref/source/v3-ext/samples/OS-EP-FILTER/endpoint-project-response.json api-ref/source/v3-ext/samples/OS-EP-FILTER/list-associations-by-endpoint-response.json api-ref/source/v3-ext/samples/OS-EP-FILTER/list-associations-by-project-response.json api-ref/source/v3-ext/samples/OS-EP-FILTER/list-service-endpoints.json api-ref/source/v3-ext/samples/OS-EP-FILTER/update-endpoint-group-request.json api-ref/source/v3-ext/samples/OS-EP-FILTER/update-endpoint-group-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-create-response.txt api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-role-show-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-roles-list-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/access-token-show-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/access-tokens-list-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/authorize-request-token-request.json api-ref/source/v3-ext/samples/OS-OAUTH1/authorize-request-token-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-create-request.json api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-create-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-show-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-update-request.json api-ref/source/v3-ext/samples/OS-OAUTH1/consumer-update-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/consumers-list-response.json api-ref/source/v3-ext/samples/OS-OAUTH1/request-token-create-response.txt api-ref/source/v3-ext/samples/OS-OAUTH2/token-create-request.txt api-ref/source/v3-ext/samples/OS-OAUTH2/token-create-response.json api-ref/source/v3-ext/samples/OS-REVOKE/list-revoke-response.json api-ref/source/v3-ext/samples/OS-SIMPLE-CERT/show-ca-certificate-response.txt api-ref/source/v3-ext/samples/OS-SIMPLE-CERT/show-signing-certificate-response.txt api-ref/source/v3-ext/samples/OS-TRUST/trust-auth-redelegated-response.json api-ref/source/v3-ext/samples/OS-TRUST/trust-auth-request.json api-ref/source/v3-ext/samples/OS-TRUST/trust-auth-trust-response.json api-ref/source/v3-ext/samples/OS-TRUST/trust-create-request.json api-ref/source/v3-ext/samples/OS-TRUST/trust-create-response.json api-ref/source/v3-ext/samples/OS-TRUST/trust-get-response.json api-ref/source/v3-ext/samples/OS-TRUST/trust-get-role-delegated-response.json api-ref/source/v3-ext/samples/OS-TRUST/trust-list-response.json api-ref/source/v3-ext/samples/OS-TRUST/trust-list-roles-delegated-response.json api-ref/source/v3/samples/admin/access-rule-get-response.json api-ref/source/v3/samples/admin/access-rules-list-response.json api-ref/source/v3/samples/admin/application-credential-create-request.json api-ref/source/v3/samples/admin/application-credential-create-response.json api-ref/source/v3/samples/admin/application-credential-get-response.json api-ref/source/v3/samples/admin/application-credential-list-response.json api-ref/source/v3/samples/admin/auth-application-credential-id-request.json api-ref/source/v3/samples/admin/auth-application-credential-name-request.json api-ref/source/v3/samples/admin/auth-application-credential-response.json api-ref/source/v3/samples/admin/auth-password-explicit-unscoped-request.json api-ref/source/v3/samples/admin/auth-password-explicit-unscoped-response.json api-ref/source/v3/samples/admin/auth-password-project-scoped-response.json api-ref/source/v3/samples/admin/auth-password-system-scoped-request-with-domain.json api-ref/source/v3/samples/admin/auth-password-unscoped-request-with-domain.json api-ref/source/v3/samples/admin/auth-password-unscoped-request.json api-ref/source/v3/samples/admin/auth-password-unscoped-response.json api-ref/source/v3/samples/admin/auth-password-user-name-unscoped-response-HTTP.txt api-ref/source/v3/samples/admin/auth-token-explicit-unscoped-request.json api-ref/source/v3/samples/admin/auth-token-scoped-request.json api-ref/source/v3/samples/admin/auth-token-scoped-response.json api-ref/source/v3/samples/admin/auth-token-unscoped-request.json api-ref/source/v3/samples/admin/auth-token-unscoped-response.json api-ref/source/v3/samples/admin/create-role-inferences-response.json api-ref/source/v3/samples/admin/credential-create-request.json api-ref/source/v3/samples/admin/credential-create-response.json api-ref/source/v3/samples/admin/credential-show-response.json api-ref/source/v3/samples/admin/credential-update-request.json api-ref/source/v3/samples/admin/credential-update-response.json api-ref/source/v3/samples/admin/credentials-list-response.json api-ref/source/v3/samples/admin/domain-config-create-request.json api-ref/source/v3/samples/admin/domain-config-create-response.json api-ref/source/v3/samples/admin/domain-config-default-response.json api-ref/source/v3/samples/admin/domain-config-group-default-response.json api-ref/source/v3/samples/admin/domain-config-group-option-default-response.json api-ref/source/v3/samples/admin/domain-config-group-option-show-response.json api-ref/source/v3/samples/admin/domain-config-group-option-update-request.json api-ref/source/v3/samples/admin/domain-config-group-option-update-response.json api-ref/source/v3/samples/admin/domain-config-group-show-response.json api-ref/source/v3/samples/admin/domain-config-group-update-request.json api-ref/source/v3/samples/admin/domain-config-group-update-response.json api-ref/source/v3/samples/admin/domain-config-show-response.json api-ref/source/v3/samples/admin/domain-config-update-request.json api-ref/source/v3/samples/admin/domain-config-update-response.json api-ref/source/v3/samples/admin/domain-create-request.json api-ref/source/v3/samples/admin/domain-create-response.json api-ref/source/v3/samples/admin/domain-group-roles-list-response.json api-ref/source/v3/samples/admin/domain-group-update-request.json api-ref/source/v3/samples/admin/domain-show-response.json api-ref/source/v3/samples/admin/domain-specific-role-create-request.json api-ref/source/v3/samples/admin/domain-update-request.json api-ref/source/v3/samples/admin/domain-update-response.json api-ref/source/v3/samples/admin/domain-user-roles-list-response.json api-ref/source/v3/samples/admin/domains-list-response.json api-ref/source/v3/samples/admin/endpoint-create-request.json api-ref/source/v3/samples/admin/endpoint-create-response.json api-ref/source/v3/samples/admin/endpoint-show-response.json api-ref/source/v3/samples/admin/endpoint-update-request.json api-ref/source/v3/samples/admin/endpoint-update-response.json api-ref/source/v3/samples/admin/endpoints-list-response.json api-ref/source/v3/samples/admin/get-available-domain-scopes-response.json api-ref/source/v3/samples/admin/get-available-project-scopes-response.json api-ref/source/v3/samples/admin/get-available-system-scopes-response.json api-ref/source/v3/samples/admin/get-role-inferences-response.json api-ref/source/v3/samples/admin/get-service-catalog-response.json api-ref/source/v3/samples/admin/group-create-request.json api-ref/source/v3/samples/admin/group-create-response.json api-ref/source/v3/samples/admin/group-roles-domain-list-response.json api-ref/source/v3/samples/admin/group-show-response.json api-ref/source/v3/samples/admin/group-update-request.json api-ref/source/v3/samples/admin/group-update-response.json api-ref/source/v3/samples/admin/group-users-list-response.json api-ref/source/v3/samples/admin/groups-list-response.json api-ref/source/v3/samples/admin/identity-version-response.json api-ref/source/v3/samples/admin/identity-versions-response.json api-ref/source/v3/samples/admin/limit-flat-model-response.json api-ref/source/v3/samples/admin/limit-show-response.json api-ref/source/v3/samples/admin/limits-create-request.json api-ref/source/v3/samples/admin/limits-create-response.json api-ref/source/v3/samples/admin/limits-list-response.json api-ref/source/v3/samples/admin/limits-update-request.json api-ref/source/v3/samples/admin/limits-update-response.json api-ref/source/v3/samples/admin/list-implied-roles-for-role-response.json api-ref/source/v3/samples/admin/list-system-roles-for-group-response.json api-ref/source/v3/samples/admin/list-system-roles-for-user-response.json api-ref/source/v3/samples/admin/policies-list-response.json api-ref/source/v3/samples/admin/policy-create-request.json api-ref/source/v3/samples/admin/policy-create-response.json api-ref/source/v3/samples/admin/policy-show-response.json api-ref/source/v3/samples/admin/policy-update-request.json api-ref/source/v3/samples/admin/policy-update-response.json api-ref/source/v3/samples/admin/project-create-domain-request.json api-ref/source/v3/samples/admin/project-create-request.json api-ref/source/v3/samples/admin/project-create-response.json api-ref/source/v3/samples/admin/project-enable-request.json api-ref/source/v3/samples/admin/project-group-roles-list-response.json api-ref/source/v3/samples/admin/project-show-parents-response.json api-ref/source/v3/samples/admin/project-show-response.json api-ref/source/v3/samples/admin/project-show-subtree-response.json api-ref/source/v3/samples/admin/project-tags-list-response.json api-ref/source/v3/samples/admin/project-tags-update-request.json api-ref/source/v3/samples/admin/project-tags-update-response.json api-ref/source/v3/samples/admin/project-update-request.json api-ref/source/v3/samples/admin/project-update-response.json api-ref/source/v3/samples/admin/project-user-roles-list-response.json api-ref/source/v3/samples/admin/projects-list-response.json api-ref/source/v3/samples/admin/region-create-request.json api-ref/source/v3/samples/admin/region-create-response.json api-ref/source/v3/samples/admin/region-show-response.json api-ref/source/v3/samples/admin/region-update-request.json api-ref/source/v3/samples/admin/region-update-response.json api-ref/source/v3/samples/admin/regions-list-response.json api-ref/source/v3/samples/admin/registered-limit-show-response.json api-ref/source/v3/samples/admin/registered-limits-create-request.json api-ref/source/v3/samples/admin/registered-limits-create-response.json api-ref/source/v3/samples/admin/registered-limits-list-response.json api-ref/source/v3/samples/admin/registered-limits-update-request.json api-ref/source/v3/samples/admin/registered-limits-update-response.json api-ref/source/v3/samples/admin/role-assignments-effective-list-include-names-response.json api-ref/source/v3/samples/admin/role-assignments-effective-list-response.json api-ref/source/v3/samples/admin/role-assignments-effective-list-response.txt api-ref/source/v3/samples/admin/role-assignments-list-include-subtree-response.json api-ref/source/v3/samples/admin/role-assignments-list-response.json api-ref/source/v3/samples/admin/role-assignments-list-response.txt api-ref/source/v3/samples/admin/role-create-request.json api-ref/source/v3/samples/admin/role-create-response.json api-ref/source/v3/samples/admin/role-inferences-response.json api-ref/source/v3/samples/admin/role-show-response.json api-ref/source/v3/samples/admin/role-update-request.json api-ref/source/v3/samples/admin/role-update-response.json api-ref/source/v3/samples/admin/roles-list-response.json api-ref/source/v3/samples/admin/service-create-request.json api-ref/source/v3/samples/admin/service-create-response.json api-ref/source/v3/samples/admin/service-show-response.json api-ref/source/v3/samples/admin/service-update-request.json api-ref/source/v3/samples/admin/service-update-response.json api-ref/source/v3/samples/admin/services-list-response.json api-ref/source/v3/samples/admin/token-validate-request.txt api-ref/source/v3/samples/admin/user-create-request.json api-ref/source/v3/samples/admin/user-create-response.json api-ref/source/v3/samples/admin/user-groups-list-response.json api-ref/source/v3/samples/admin/user-password-update-request.json api-ref/source/v3/samples/admin/user-projects-list-response.json api-ref/source/v3/samples/admin/user-roles-domain-list-response.json api-ref/source/v3/samples/admin/user-show-response.json api-ref/source/v3/samples/admin/user-update-request.json api-ref/source/v3/samples/admin/user-update-response.json api-ref/source/v3/samples/admin/users-list-response.json api-ref/source/v3/samples/auth/requests/domain-id-password.json api-ref/source/v3/samples/auth/requests/domain-id-token.json api-ref/source/v3/samples/auth/requests/domain-name-password.json api-ref/source/v3/samples/auth/requests/domain-name-token.json api-ref/source/v3/samples/auth/requests/project-id-password.json api-ref/source/v3/samples/auth/requests/project-id-token.json api-ref/source/v3/samples/auth/requests/project-id-totp.json api-ref/source/v3/samples/auth/requests/project-name-password.json api-ref/source/v3/samples/auth/requests/project-name-token.json api-ref/source/v3/samples/auth/requests/system-password.json api-ref/source/v3/samples/auth/requests/system-token.json api-ref/source/v3/samples/auth/responses/auth-receipt-password.json api-ref/source/v3/samples/auth/responses/domain-scoped-password.json api-ref/source/v3/samples/auth/responses/domain-scoped-token.json api-ref/source/v3/samples/auth/responses/project-scoped-password-totp.json api-ref/source/v3/samples/auth/responses/project-scoped-password.json api-ref/source/v3/samples/auth/responses/project-scoped-token.json api-ref/source/v3/samples/auth/responses/system-scoped-password.json api-ref/source/v3/samples/auth/responses/system-scoped-token.json api-ref/source/v3/samples/auth/responses/unscoped-password.json config-generator/keystone-policy-generator.conf config-generator/keystone.conf devstack/plugin.sh devstack/files/federation/attribute-map.xml devstack/files/federation/shib_apache_alias.txt devstack/files/federation/shib_apache_handler.txt devstack/files/federation/shibboleth2.xml devstack/files/oidc/apache_oidc.conf devstack/lib/federation.sh devstack/lib/oidc.sh devstack/lib/scope.sh devstack/tools/oidc/__init__.py devstack/tools/oidc/docker-compose.yaml devstack/tools/oidc/setup_keycloak_client.py doc/Makefile doc/README.rst doc/requirements.txt doc/ext/__init__.py doc/source/api_curl_examples.rst doc/source/code_documentation.rst doc/source/conf.py doc/source/index.rst doc/source/indices-tables.rst doc/source/_static/horizon-login-idp.png doc/source/_static/horizon-login-sp.png doc/source/admin/auth-totp.rst doc/source/admin/authentication-mechanisms.rst doc/source/admin/bootstrap.rst doc/source/admin/caching-layer.inc doc/source/admin/case-insensitive.rst doc/source/admin/cli-manage-projects-users-and-roles.rst doc/source/admin/configuration.rst doc/source/admin/configure-https.rst doc/source/admin/configure_tokenless_x509.rst doc/source/admin/credential-encryption.rst doc/source/admin/domain-specific-config.inc doc/source/admin/endpoint-filtering.inc doc/source/admin/endpoint-policy.inc doc/source/admin/event_notifications.rst doc/source/admin/external-authentication.rst doc/source/admin/fernet-token-faq.rst doc/source/admin/getting-started.rst doc/source/admin/health-check-middleware.rst doc/source/admin/identity-concepts.rst doc/source/admin/identity-sources.rst doc/source/admin/identity-support-matrix.ini doc/source/admin/index.rst doc/source/admin/integrate-with-ldap.inc doc/source/admin/jws-key-rotation.rst doc/source/admin/keystone-features.rst doc/source/admin/limit-list-size.inc doc/source/admin/logging.inc doc/source/admin/manage-services.rst doc/source/admin/manage-trusts.rst doc/source/admin/multi-factor-authentication.rst doc/source/admin/oauth1.rst doc/source/admin/oauth2-usage-guide.rst doc/source/admin/operations.rst doc/source/admin/performance.inc doc/source/admin/resource-options.rst doc/source/admin/security-compliance.inc doc/source/admin/service-api-protection.rst doc/source/admin/token-provider.rst doc/source/admin/token-support-matrix.ini doc/source/admin/tokens-overview.rst doc/source/admin/tokens.rst doc/source/admin/troubleshoot.inc doc/source/admin/unified-limits.rst doc/source/admin/upgrading.rst doc/source/admin/url-safe-naming.inc doc/source/admin/federation/configure_federation.rst doc/source/admin/federation/federated_identity.rst doc/source/admin/federation/introduction.rst doc/source/admin/federation/mapping_combinations.rst doc/source/admin/federation/mellon.inc doc/source/admin/federation/openidc.inc doc/source/admin/federation/shibboleth.inc doc/source/admin/figures/keystone-federation.png doc/source/admin/figures/keystone-federation.svg doc/source/cli/commands.rst doc/source/cli/index.rst doc/source/cli/keystone-manage.rst doc/source/cli/keystone-status.rst doc/source/configuration/config-options.rst doc/source/configuration/index.rst doc/source/configuration/policy.rst doc/source/configuration/samples/index.rst doc/source/configuration/samples/keystone-conf.rst doc/source/configuration/samples/logging-conf.rst doc/source/configuration/samples/policy-yaml.rst doc/source/contributor/api_change_tutorial.rst doc/source/contributor/architecture.rst doc/source/contributor/auth-plugins.rst doc/source/contributor/caching-layer.rst doc/source/contributor/contributing.rst doc/source/contributor/database-migrations.rst doc/source/contributor/developing-drivers.rst doc/source/contributor/doctor-checks.rst doc/source/contributor/filtering-responsibilities.rst doc/source/contributor/how-can-i-help.rst doc/source/contributor/http-api.rst doc/source/contributor/id-manage.rst doc/source/contributor/index.rst doc/source/contributor/list-truncation.rst doc/source/contributor/programming-exercises.rst doc/source/contributor/proposing-features.rst doc/source/contributor/release-notes.rst doc/source/contributor/service-catalog.rst doc/source/contributor/services.rst doc/source/contributor/set-up-keystone.rst doc/source/contributor/testing-keystone.rst doc/source/contributor/vision-reflection.rst doc/source/getting-started/architecture.rst doc/source/getting-started/community.rst doc/source/getting-started/index.rst doc/source/getting-started/policy_mapping.rst doc/source/install/get-started-obs.rst doc/source/install/get-started-rdo.rst doc/source/install/get-started-ubuntu.rst doc/source/install/index-obs.rst doc/source/install/index-rdo.rst doc/source/install/index-ubuntu.rst doc/source/install/index.rst doc/source/install/keystone-install-obs.rst doc/source/install/keystone-install-rdo.rst doc/source/install/keystone-install-ubuntu.rst doc/source/install/keystone-openrc-obs.rst doc/source/install/keystone-openrc-rdo.rst doc/source/install/keystone-openrc-ubuntu.rst doc/source/install/keystone-users-obs.rst doc/source/install/keystone-users-rdo.rst doc/source/install/keystone-users-ubuntu.rst doc/source/install/keystone-verify-obs.rst doc/source/install/keystone-verify-rdo.rst doc/source/install/keystone-verify-ubuntu.rst doc/source/install/common/get-started-identity.inc doc/source/install/common/keystone-users.inc doc/source/install/common/openrc.inc doc/source/install/shared/note_configuration_vary_by_distribution.rst doc/source/user/application_credentials.rst doc/source/user/index.rst doc/source/user/json_home.rst doc/source/user/multi-factor-authentication.rst doc/source/user/supported_clients.rst doc/source/user/trusts.rst etc/README.txt etc/default_catalog.templates etc/logging.conf.sample etc/sso_callback_template.html examples/pki/certs/cacert.pem examples/pki/certs/middleware.pem examples/pki/certs/signing_cert.pem examples/pki/certs/ssl_cert.pem examples/pki/private/cakey.pem examples/pki/private/signing_key.pem examples/pki/private/ssl_key.pem httpd/README httpd/keystone-uwsgi-admin.ini httpd/keystone-uwsgi-public.ini httpd/uwsgi-keystone.conf httpd/wsgi-keystone.conf keystone/__init__.py keystone/exception.py keystone/i18n.py keystone/notifications.py keystone/version.py keystone.egg-info/PKG-INFO keystone.egg-info/SOURCES.txt keystone.egg-info/dependency_links.txt keystone.egg-info/entry_points.txt keystone.egg-info/not-zip-safe keystone.egg-info/pbr.json keystone.egg-info/requires.txt keystone.egg-info/top_level.txt keystone/api/__init__.py keystone/api/auth.py keystone/api/credentials.py keystone/api/discovery.py keystone/api/domains.py keystone/api/ec2tokens.py keystone/api/endpoints.py keystone/api/groups.py keystone/api/limits.py keystone/api/os_ep_filter.py keystone/api/os_federation.py keystone/api/os_inherit.py keystone/api/os_oauth1.py keystone/api/os_oauth2.py keystone/api/os_revoke.py keystone/api/os_simple_cert.py keystone/api/policy.py keystone/api/projects.py keystone/api/regions.py keystone/api/registered_limits.py keystone/api/role_assignments.py keystone/api/role_inferences.py keystone/api/roles.py keystone/api/s3tokens.py keystone/api/services.py keystone/api/system.py keystone/api/trusts.py keystone/api/users.py keystone/api/_shared/EC2_S3_Resource.py keystone/api/_shared/__init__.py keystone/api/_shared/authentication.py keystone/api/_shared/implied_roles.py keystone/api/_shared/json_home_relations.py keystone/api/_shared/saml.py keystone/application_credential/__init__.py keystone/application_credential/core.py keystone/application_credential/schema.py keystone/application_credential/backends/__init__.py keystone/application_credential/backends/base.py keystone/application_credential/backends/sql.py keystone/assignment/__init__.py keystone/assignment/core.py keystone/assignment/schema.py keystone/assignment/backends/__init__.py keystone/assignment/backends/base.py keystone/assignment/backends/sql.py keystone/assignment/role_backends/__init__.py keystone/assignment/role_backends/base.py keystone/assignment/role_backends/resource_options.py keystone/assignment/role_backends/sql.py keystone/assignment/role_backends/sql_model.py keystone/auth/__init__.py keystone/auth/core.py keystone/auth/schema.py keystone/auth/plugins/__init__.py keystone/auth/plugins/application_credential.py keystone/auth/plugins/base.py keystone/auth/plugins/core.py keystone/auth/plugins/external.py keystone/auth/plugins/mapped.py keystone/auth/plugins/oauth1.py keystone/auth/plugins/password.py keystone/auth/plugins/token.py keystone/auth/plugins/totp.py keystone/catalog/__init__.py keystone/catalog/core.py keystone/catalog/schema.py keystone/catalog/backends/__init__.py keystone/catalog/backends/base.py keystone/catalog/backends/sql.py keystone/catalog/backends/templated.py keystone/cmd/__init__.py keystone/cmd/bootstrap.py keystone/cmd/cli.py keystone/cmd/idutils.py keystone/cmd/manage.py keystone/cmd/status.py keystone/cmd/doctor/__init__.py keystone/cmd/doctor/caching.py keystone/cmd/doctor/credential.py keystone/cmd/doctor/database.py keystone/cmd/doctor/debug.py keystone/cmd/doctor/federation.py keystone/cmd/doctor/ldap.py keystone/cmd/doctor/security_compliance.py keystone/cmd/doctor/tokens.py keystone/cmd/doctor/tokens_fernet.py keystone/common/__init__.py keystone/common/authorization.py keystone/common/context.py keystone/common/driver_hints.py keystone/common/fernet_utils.py keystone/common/json_home.py keystone/common/jwt_utils.py keystone/common/manager.py keystone/common/password_hashing.py keystone/common/profiler.py keystone/common/provider_api.py keystone/common/render_token.py keystone/common/tokenless_auth.py keystone/common/utils.py keystone/common/cache/__init__.py keystone/common/cache/_context_cache.py keystone/common/cache/core.py keystone/common/policies/__init__.py keystone/common/policies/access_rule.py keystone/common/policies/access_token.py keystone/common/policies/application_credential.py keystone/common/policies/auth.py keystone/common/policies/base.py keystone/common/policies/consumer.py keystone/common/policies/credential.py keystone/common/policies/domain.py keystone/common/policies/domain_config.py keystone/common/policies/ec2_credential.py keystone/common/policies/endpoint.py keystone/common/policies/endpoint_group.py keystone/common/policies/grant.py keystone/common/policies/group.py keystone/common/policies/identity_provider.py keystone/common/policies/implied_role.py keystone/common/policies/limit.py keystone/common/policies/mapping.py keystone/common/policies/policy.py keystone/common/policies/policy_association.py keystone/common/policies/project.py keystone/common/policies/project_endpoint.py keystone/common/policies/protocol.py keystone/common/policies/region.py keystone/common/policies/registered_limit.py keystone/common/policies/revoke_event.py keystone/common/policies/role.py keystone/common/policies/role_assignment.py keystone/common/policies/service.py keystone/common/policies/service_provider.py keystone/common/policies/token.py keystone/common/policies/token_revocation.py keystone/common/policies/trust.py keystone/common/policies/user.py keystone/common/rbac_enforcer/__init__.py keystone/common/rbac_enforcer/enforcer.py keystone/common/rbac_enforcer/policy.py keystone/common/resource_options/__init__.py keystone/common/resource_options/core.py keystone/common/resource_options/options/__init__.py keystone/common/resource_options/options/immutable.py keystone/common/sql/__init__.py keystone/common/sql/alembic.ini keystone/common/sql/core.py keystone/common/sql/upgrades.py keystone/common/sql/migrations/README.rst keystone/common/sql/migrations/__init__.py keystone/common/sql/migrations/autogen.py keystone/common/sql/migrations/env.py keystone/common/sql/migrations/manage.py keystone/common/sql/migrations/script.py.mako keystone/common/sql/migrations/versions/27e647c0fad4_initial_version.py keystone/common/sql/migrations/versions/CONTRACT_HEAD keystone/common/sql/migrations/versions/EXPAND_HEAD keystone/common/sql/migrations/versions/2024.01/expand/47147121_add_identity_federation_attribute_mapping_schema_version.py keystone/common/sql/migrations/versions/bobcat/contract/99de3849d860_fix_incorrect_constraints.py keystone/common/sql/migrations/versions/bobcat/contract/c88cdce8f248_remove_duplicate_constraints.py keystone/common/sql/migrations/versions/bobcat/expand/11c3b243b4cb_remove_service_provider_relay_state_server_default.py keystone/common/sql/migrations/versions/bobcat/expand/b4f8b3f584e0_fix_incorrect_constraints.py keystone/common/sql/migrations/versions/yoga/contract/e25ffa003242_initial.py keystone/common/sql/migrations/versions/yoga/expand/29e87d24a316_initial.py keystone/common/validation/__init__.py keystone/common/validation/parameter_types.py keystone/common/validation/validators.py keystone/conf/__init__.py keystone/conf/application_credential.py keystone/conf/assignment.py keystone/conf/auth.py keystone/conf/catalog.py keystone/conf/constants.py keystone/conf/credential.py keystone/conf/default.py keystone/conf/domain_config.py keystone/conf/endpoint_filter.py keystone/conf/endpoint_policy.py keystone/conf/federation.py keystone/conf/fernet_receipts.py keystone/conf/fernet_tokens.py keystone/conf/identity.py keystone/conf/identity_mapping.py keystone/conf/jwt_tokens.py keystone/conf/ldap.py keystone/conf/oauth1.py keystone/conf/oauth2.py keystone/conf/opts.py keystone/conf/policy.py keystone/conf/receipt.py keystone/conf/resource.py keystone/conf/revoke.py keystone/conf/role.py keystone/conf/saml.py keystone/conf/security_compliance.py keystone/conf/shadow_users.py keystone/conf/token.py keystone/conf/tokenless_auth.py keystone/conf/totp.py keystone/conf/trust.py keystone/conf/unified_limit.py keystone/conf/utils.py keystone/conf/wsgi.py keystone/credential/__init__.py keystone/credential/core.py keystone/credential/provider.py keystone/credential/schema.py keystone/credential/backends/__init__.py keystone/credential/backends/base.py keystone/credential/backends/sql.py keystone/credential/providers/__init__.py keystone/credential/providers/core.py keystone/credential/providers/fernet/__init__.py keystone/credential/providers/fernet/core.py keystone/endpoint_policy/__init__.py keystone/endpoint_policy/core.py keystone/endpoint_policy/backends/__init__.py keystone/endpoint_policy/backends/base.py keystone/endpoint_policy/backends/sql.py keystone/federation/__init__.py keystone/federation/constants.py keystone/federation/core.py keystone/federation/idp.py keystone/federation/schema.py keystone/federation/utils.py keystone/federation/backends/__init__.py keystone/federation/backends/base.py keystone/federation/backends/sql.py keystone/identity/__init__.py keystone/identity/core.py keystone/identity/generator.py keystone/identity/schema.py keystone/identity/backends/__init__.py keystone/identity/backends/base.py keystone/identity/backends/resource_options.py keystone/identity/backends/sql.py keystone/identity/backends/sql_model.py keystone/identity/backends/ldap/__init__.py keystone/identity/backends/ldap/common.py keystone/identity/backends/ldap/core.py keystone/identity/backends/ldap/models.py keystone/identity/id_generators/__init__.py keystone/identity/id_generators/sha256.py keystone/identity/mapping_backends/__init__.py keystone/identity/mapping_backends/base.py keystone/identity/mapping_backends/mapping.py keystone/identity/mapping_backends/sql.py keystone/identity/shadow_backends/__init__.py keystone/identity/shadow_backends/base.py keystone/identity/shadow_backends/sql.py keystone/limit/__init__.py keystone/limit/core.py keystone/limit/schema.py keystone/limit/backends/__init__.py keystone/limit/backends/base.py keystone/limit/backends/sql.py keystone/limit/models/__init__.py keystone/limit/models/base.py keystone/limit/models/flat.py keystone/limit/models/strict_two_level.py keystone/locale/de/LC_MESSAGES/keystone.po keystone/locale/en_GB/LC_MESSAGES/keystone.po keystone/locale/es/LC_MESSAGES/keystone.po keystone/locale/fr/LC_MESSAGES/keystone.po keystone/locale/it/LC_MESSAGES/keystone.po keystone/locale/ja/LC_MESSAGES/keystone.po keystone/locale/ko_KR/LC_MESSAGES/keystone.po keystone/locale/pt_BR/LC_MESSAGES/keystone.po keystone/locale/ru/LC_MESSAGES/keystone.po keystone/locale/zh_CN/LC_MESSAGES/keystone.po keystone/locale/zh_TW/LC_MESSAGES/keystone.po keystone/models/__init__.py keystone/models/receipt_model.py keystone/models/revoke_model.py keystone/models/token_model.py keystone/oauth1/__init__.py keystone/oauth1/core.py keystone/oauth1/schema.py keystone/oauth1/validator.py keystone/oauth1/backends/__init__.py keystone/oauth1/backends/base.py keystone/oauth1/backends/sql.py keystone/oauth2/__init__.py keystone/oauth2/handlers.py keystone/policy/__init__.py keystone/policy/core.py keystone/policy/schema.py keystone/policy/backends/__init__.py keystone/policy/backends/base.py keystone/policy/backends/rules.py keystone/policy/backends/sql.py keystone/receipt/__init__.py keystone/receipt/handlers.py keystone/receipt/provider.py keystone/receipt/receipt_formatters.py keystone/receipt/providers/__init__.py keystone/receipt/providers/base.py keystone/receipt/providers/fernet/__init__.py keystone/receipt/providers/fernet/core.py keystone/resource/__init__.py keystone/resource/core.py keystone/resource/schema.py keystone/resource/backends/__init__.py keystone/resource/backends/base.py keystone/resource/backends/resource_options.py keystone/resource/backends/sql.py keystone/resource/backends/sql_model.py keystone/resource/config_backends/__init__.py keystone/resource/config_backends/base.py keystone/resource/config_backends/sql.py keystone/revoke/__init__.py keystone/revoke/core.py keystone/revoke/model.py keystone/revoke/backends/__init__.py keystone/revoke/backends/base.py keystone/revoke/backends/sql.py keystone/server/__init__.py keystone/server/backends.py keystone/server/wsgi.py keystone/server/flask/__init__.py keystone/server/flask/application.py keystone/server/flask/common.py keystone/server/flask/core.py keystone/server/flask/request_processing/__init__.py keystone/server/flask/request_processing/json_body.py keystone/server/flask/request_processing/req_logging.py keystone/server/flask/request_processing/middleware/__init__.py keystone/server/flask/request_processing/middleware/auth_context.py keystone/server/flask/request_processing/middleware/url_normalize.py keystone/tests/__init__.py keystone/tests/common/__init__.py keystone/tests/common/auth.py keystone/tests/functional/__init__.py keystone/tests/functional/core.py keystone/tests/functional/shared/__init__.py keystone/tests/functional/shared/test_running.py keystone/tests/hacking/__init__.py keystone/tests/hacking/checks.py keystone/tests/protection/__init__.py keystone/tests/protection/v3/__init__.py keystone/tests/protection/v3/test_access_rules.py keystone/tests/protection/v3/test_application_credential.py keystone/tests/protection/v3/test_assignment.py keystone/tests/protection/v3/test_consumer.py keystone/tests/protection/v3/test_credentials.py keystone/tests/protection/v3/test_domain_config.py keystone/tests/protection/v3/test_domain_roles.py keystone/tests/protection/v3/test_domains.py keystone/tests/protection/v3/test_ec2_credential.py keystone/tests/protection/v3/test_endpoint_group.py keystone/tests/protection/v3/test_endpoints.py keystone/tests/protection/v3/test_grants.py keystone/tests/protection/v3/test_groups.py keystone/tests/protection/v3/test_identity_providers.py keystone/tests/protection/v3/test_implied_roles.py keystone/tests/protection/v3/test_limits.py keystone/tests/protection/v3/test_mappings.py keystone/tests/protection/v3/test_policy.py keystone/tests/protection/v3/test_policy_association.py keystone/tests/protection/v3/test_project_endpoint.py keystone/tests/protection/v3/test_project_tags.py keystone/tests/protection/v3/test_projects.py keystone/tests/protection/v3/test_protocols.py keystone/tests/protection/v3/test_regions.py keystone/tests/protection/v3/test_registered_limits.py keystone/tests/protection/v3/test_roles.py keystone/tests/protection/v3/test_service_providers.py keystone/tests/protection/v3/test_services.py keystone/tests/protection/v3/test_system_assignments.py keystone/tests/protection/v3/test_tokens.py keystone/tests/protection/v3/test_trusts.py keystone/tests/protection/v3/test_users.py keystone/tests/unit/__init__.py keystone/tests/unit/base_classes.py keystone/tests/unit/core.py keystone/tests/unit/default_catalog.templates keystone/tests/unit/default_catalog_multi_region.templates keystone/tests/unit/default_fixtures.py keystone/tests/unit/fakeldap.py keystone/tests/unit/federation_fixtures.py keystone/tests/unit/filtering.py keystone/tests/unit/identity_mapping.py keystone/tests/unit/mapping_fixtures.py keystone/tests/unit/rest.py keystone/tests/unit/test_app_config.py keystone/tests/unit/test_associate_project_endpoint_extension.py keystone/tests/unit/test_auth_plugin.py keystone/tests/unit/test_backend_endpoint_policy.py keystone/tests/unit/test_backend_endpoint_policy_sql.py keystone/tests/unit/test_backend_federation_sql.py keystone/tests/unit/test_backend_id_mapping_sql.py keystone/tests/unit/test_backend_ldap.py keystone/tests/unit/test_backend_ldap_pool.py keystone/tests/unit/test_backend_rules.py keystone/tests/unit/test_backend_sql.py keystone/tests/unit/test_backend_templated.py keystone/tests/unit/test_cli.py keystone/tests/unit/test_config.py keystone/tests/unit/test_contrib_ec2_core.py keystone/tests/unit/test_contrib_s3_core.py keystone/tests/unit/test_contrib_simple_cert.py keystone/tests/unit/test_driver_hints.py keystone/tests/unit/test_entry_points.py keystone/tests/unit/test_exception.py keystone/tests/unit/test_hacking_checks.py keystone/tests/unit/test_healthcheck.py keystone/tests/unit/test_ldap_livetest.py keystone/tests/unit/test_ldap_pool_livetest.py keystone/tests/unit/test_ldap_tls_livetest.py keystone/tests/unit/test_limits.py keystone/tests/unit/test_middleware.py keystone/tests/unit/test_policy.py keystone/tests/unit/test_receipt_provider.py keystone/tests/unit/test_revoke.py keystone/tests/unit/test_shadow_users.py keystone/tests/unit/test_sql_banned_operations.py keystone/tests/unit/test_sql_upgrade.py keystone/tests/unit/test_token_provider.py keystone/tests/unit/test_url_middleware.py keystone/tests/unit/test_v3.py keystone/tests/unit/test_v3_application_credential.py keystone/tests/unit/test_v3_assignment.py keystone/tests/unit/test_v3_auth.py keystone/tests/unit/test_v3_catalog.py keystone/tests/unit/test_v3_credential.py keystone/tests/unit/test_v3_domain_config.py keystone/tests/unit/test_v3_endpoint_policy.py keystone/tests/unit/test_v3_federation.py keystone/tests/unit/test_v3_filters.py keystone/tests/unit/test_v3_identity.py keystone/tests/unit/test_v3_oauth1.py keystone/tests/unit/test_v3_oauth2.py keystone/tests/unit/test_v3_os_revoke.py keystone/tests/unit/test_v3_policy.py keystone/tests/unit/test_v3_resource.py keystone/tests/unit/test_v3_trust.py keystone/tests/unit/test_validation.py keystone/tests/unit/test_versions.py keystone/tests/unit/utils.py keystone/tests/unit/application_credential/__init__.py keystone/tests/unit/application_credential/test_backends.py keystone/tests/unit/application_credential/backends/__init__.py keystone/tests/unit/application_credential/backends/test_sql.py keystone/tests/unit/assignment/__init__.py keystone/tests/unit/assignment/test_backends.py keystone/tests/unit/assignment/test_core.py keystone/tests/unit/assignment/role_backends/__init__.py keystone/tests/unit/assignment/role_backends/test_sql.py keystone/tests/unit/auth/__init__.py keystone/tests/unit/auth/test_controllers.py keystone/tests/unit/auth/test_schema.py keystone/tests/unit/auth/plugins/__init__.py keystone/tests/unit/auth/plugins/test_core.py keystone/tests/unit/auth/plugins/test_mapped.py keystone/tests/unit/backend/__init__.py keystone/tests/unit/backend/core_ldap.py keystone/tests/unit/backend/core_sql.py keystone/tests/unit/catalog/__init__.py keystone/tests/unit/catalog/test_backends.py keystone/tests/unit/catalog/test_core.py keystone/tests/unit/common/__init__.py keystone/tests/unit/common/test_cache.py keystone/tests/unit/common/test_database_conflicts.py keystone/tests/unit/common/test_json_home.py keystone/tests/unit/common/test_notifications.py keystone/tests/unit/common/test_provider_api.py keystone/tests/unit/common/test_rbac_enforcer.py keystone/tests/unit/common/test_resource_options_common.py keystone/tests/unit/common/test_sql_core.py keystone/tests/unit/common/test_utils.py keystone/tests/unit/common/sql/__init__.py keystone/tests/unit/common/sql/test_upgrades.py keystone/tests/unit/config_files/backend_ldap.conf keystone/tests/unit/config_files/backend_ldap_pool.conf keystone/tests/unit/config_files/backend_ldap_sql.conf keystone/tests/unit/config_files/backend_liveldap.conf keystone/tests/unit/config_files/backend_multi_ldap_sql.conf keystone/tests/unit/config_files/backend_pool_liveldap.conf keystone/tests/unit/config_files/backend_sql.conf keystone/tests/unit/config_files/backend_tls_liveldap.conf keystone/tests/unit/config_files/test_auth_plugin.conf keystone/tests/unit/config_files/domain_configs_default_ldap_one_sql/keystone.domain1.conf keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.Default.conf keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain1.conf keystone/tests/unit/config_files/domain_configs_multi_ldap/keystone.domain2.conf keystone/tests/unit/config_files/domain_configs_one_extra_sql/keystone.domain2.conf keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.Default.conf keystone/tests/unit/config_files/domain_configs_one_sql_one_ldap/keystone.domain1.conf keystone/tests/unit/contrib/__init__.py keystone/tests/unit/contrib/federation/__init__.py keystone/tests/unit/contrib/federation/test_utils.py keystone/tests/unit/credential/__init__.py keystone/tests/unit/credential/test_backend_sql.py keystone/tests/unit/credential/test_fernet_provider.py keystone/tests/unit/endpoint_policy/__init__.py keystone/tests/unit/endpoint_policy/backends/__init__.py keystone/tests/unit/endpoint_policy/backends/test_base.py keystone/tests/unit/endpoint_policy/backends/test_sql.py keystone/tests/unit/external/README.rst keystone/tests/unit/external/__init__.py keystone/tests/unit/external/test_timeutils.py keystone/tests/unit/federation/__init__.py keystone/tests/unit/federation/test_core.py keystone/tests/unit/federation/test_utils.py keystone/tests/unit/identity/__init__.py keystone/tests/unit/identity/test_backend_sql.py keystone/tests/unit/identity/test_backends.py keystone/tests/unit/identity/test_core.py keystone/tests/unit/identity/backends/__init__.py keystone/tests/unit/identity/backends/fake_driver.py keystone/tests/unit/identity/backends/test_base.py keystone/tests/unit/identity/backends/test_ldap.py keystone/tests/unit/identity/backends/test_ldap_common.py keystone/tests/unit/identity/backends/test_sql.py keystone/tests/unit/identity/shadow_users/__init__.py keystone/tests/unit/identity/shadow_users/test_backend.py keystone/tests/unit/identity/shadow_users/test_core.py keystone/tests/unit/ksfixtures/__init__.py keystone/tests/unit/ksfixtures/auth_plugins.py keystone/tests/unit/ksfixtures/backendloader.py keystone/tests/unit/ksfixtures/cache.py keystone/tests/unit/ksfixtures/database.py keystone/tests/unit/ksfixtures/hacking.py keystone/tests/unit/ksfixtures/jws_key_repository.py keystone/tests/unit/ksfixtures/key_repository.py keystone/tests/unit/ksfixtures/ldapdb.py keystone/tests/unit/ksfixtures/logging.py keystone/tests/unit/ksfixtures/policy.py keystone/tests/unit/ksfixtures/temporaryfile.py keystone/tests/unit/ksfixtures/warnings.py keystone/tests/unit/limit/__init__.py keystone/tests/unit/limit/test_backends.py keystone/tests/unit/policy/__init__.py keystone/tests/unit/policy/test_backends.py keystone/tests/unit/policy/backends/__init__.py keystone/tests/unit/policy/backends/test_base.py keystone/tests/unit/policy/backends/test_sql.py keystone/tests/unit/receipt/__init__.py keystone/tests/unit/receipt/test_fernet_provider.py keystone/tests/unit/receipt/test_receipt_serialization.py keystone/tests/unit/resource/__init__.py keystone/tests/unit/resource/test_backends.py keystone/tests/unit/resource/test_core.py keystone/tests/unit/resource/backends/__init__.py keystone/tests/unit/resource/backends/test_sql.py keystone/tests/unit/resource/config_backends/__init__.py keystone/tests/unit/resource/config_backends/test_sql.py keystone/tests/unit/saml2/idp_saml2_metadata.xml keystone/tests/unit/saml2/signed_saml2_assertion.xml keystone/tests/unit/server/__init__.py keystone/tests/unit/server/test_keystone_flask.py keystone/tests/unit/tests/__init__.py keystone/tests/unit/tests/test_core.py keystone/tests/unit/tests/test_utils.py keystone/tests/unit/token/__init__.py keystone/tests/unit/token/test_fernet_provider.py keystone/tests/unit/token/test_jws_provider.py keystone/tests/unit/token/test_token_serialization.py keystone/tests/unit/trust/__init__.py keystone/tests/unit/trust/test_backends.py keystone/token/__init__.py keystone/token/provider.py keystone/token/token_formatters.py keystone/token/providers/__init__.py keystone/token/providers/base.py keystone/token/providers/fernet/__init__.py keystone/token/providers/fernet/core.py keystone/token/providers/jws/__init__.py keystone/token/providers/jws/core.py keystone/trust/__init__.py keystone/trust/core.py keystone/trust/schema.py keystone/trust/backends/__init__.py keystone/trust/backends/base.py keystone/trust/backends/sql.py keystone_tempest_plugin/README.rst playbooks/enable-fips.yaml rally-jobs/README.rst rally-jobs/keystone.yaml releasenotes/notes/.placeholder releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml releasenotes/notes/add-description-to-role-88ab5bb8a96cc002.yaml releasenotes/notes/add-expires-at-int-to-trusts-60ae3c5d0c00808a.yaml releasenotes/notes/add-limit-description-c1f42641d9c6c33d.yaml releasenotes/notes/add-unified-limit-apis-c9ebc5116bc2cf93.yaml releasenotes/notes/add_bcrypt_sha256_algo-d6b146a59df9373c.yaml releasenotes/notes/add_password_expires_at_to_user_response-22f14ab629c48bc2.yaml releasenotes/notes/admin_token-a5678d712783c145.yaml releasenotes/notes/admin_token-c634ec12fc714255.yaml releasenotes/notes/bcrypt_truncation_fix-674dc5d7f1e776f2.yaml releasenotes/notes/bootstrap-update-endpoint-7a63a2329822b6e7.yaml releasenotes/notes/bp-allow-expired-f5d845b9601bc1ef.yaml releasenotes/notes/bp-application-credentials-c699f1f17c7d4e2f.yaml releasenotes/notes/bp-basic-default-roles-4ff6502b6ac57d48.yaml releasenotes/notes/bp-domain-config-as-stable-716ca5ab33c0cc42.yaml releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml releasenotes/notes/bp-json-web-tokens-37ce3bcd1356cf1b.yaml releasenotes/notes/bp-manage-migration-c398963a943a89fe.yaml releasenotes/notes/bp-mfa-auth-receipt-8b459431c1f360ce.yaml releasenotes/notes/bp-oauth2-client-credentials-ext-c8933f00a7b45be8.yaml releasenotes/notes/bp-password-expires-validation-4b32fe7032595932.yaml releasenotes/notes/bp-pci-dss-notifications-808a205a637bac25.yaml releasenotes/notes/bp-pci-dss-password-requirements-api-87bc724b2aa554f7.yaml releasenotes/notes/bp-pci-dss-query-password-expired-users-a7c96a3843bb9abc.yaml releasenotes/notes/bp-per-user-auth-plugin-reqs-feb95fd907be4b40.yaml releasenotes/notes/bp-policy-in-code-722372a27291b9cd.yaml releasenotes/notes/bp-shadow-mapping-06fc7c71a401d707.yaml releasenotes/notes/bp-strict-two-level-model.yaml releasenotes/notes/bp-support-federated-attr-94084d4073f50280.yaml releasenotes/notes/bp-support-oauth2-mtls-8552892a8e0c72d2.yaml releasenotes/notes/bp-system-scope-7d236ee5992d4e20.yaml releasenotes/notes/bp-upgrade-checks-0dc692a392a96879.yaml releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml releasenotes/notes/bp-whitelist-extension-for-app-creds-90e5bcd7b2b78b02.yaml releasenotes/notes/bug-1017606-98313bb4c1edf250.yaml releasenotes/notes/bug-1291157-00b5c714a097e84c.yaml releasenotes/notes/bug-1332058-f25e2de40411b711.yaml releasenotes/notes/bug-1473292-c21481e6aec29ec2.yaml releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml releasenotes/notes/bug-1519210-de76097c974f9c93.yaml releasenotes/notes/bug-1523369-4d42c841b6e7e54e.yaml releasenotes/notes/bug-1524030-0814724d5c2b7c8d.yaml releasenotes/notes/bug-1524030-ccff6b0ec9d1cbf2.yaml releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml releasenotes/notes/bug-1547684-911aed68a0d3df17.yaml releasenotes/notes/bug-1561054-dbe88b552a936a05.yaml releasenotes/notes/bug-1563101-134df5b99ea48f00.yaml releasenotes/notes/bug-1571878-1bcaea5337905af0.yaml releasenotes/notes/bug-1582585-a368ac5a252ec84f.yaml releasenotes/notes/bug-1590587-domain-specific-role-assignment-8f120604a6625852.yaml releasenotes/notes/bug-1594482-52a5dd1d8477b694.yaml releasenotes/notes/bug-1611102-e1348cbec9b1110a.yaml releasenotes/notes/bug-1613466-credential-update-ec2-type-8fb51ff3ad3a449c.yaml releasenotes/notes/bug-1615014-b30f606a2d202428.yaml releasenotes/notes/bug-1616424-c46ba773f7ac40ae.yaml releasenotes/notes/bug-1622310-c501cf77437fdfa6.yaml releasenotes/notes/bug-1636950-8fa1a47fce440977.yaml releasenotes/notes/bug-1638603-354ee4167e6e.yaml releasenotes/notes/bug-1641625-fe463874dc5edb10.yaml releasenotes/notes/bug-1641639-b9accc163e61ca15.yaml releasenotes/notes/bug-1641645-516709f9da3de26f.yaml releasenotes/notes/bug-1641654-8630ce7bcde43a7e.yaml releasenotes/notes/bug-1641660-f938267e1ec54071.yaml releasenotes/notes/bug-1641816-8b39f3f73359c778.yaml releasenotes/notes/bug-1642212-9964dfd3af0184bd.yaml releasenotes/notes/bug-1642348-83d4c86ad3984d75.yaml releasenotes/notes/bug-1642457-4533f9810a8cd927.yaml releasenotes/notes/bug-1642687-5497fb56fe86806d.yaml releasenotes/notes/bug-1642687-c7ab1c9be152db20.yaml releasenotes/notes/bug-1642692-d669c8fcf9e171d9.yaml releasenotes/notes/bug-1645487-ca22c216ec26cc9b.yaml releasenotes/notes/bug-1649138-c53974f6bb0eab14.yaml releasenotes/notes/bug-1649446-efff94143823755d.yaml releasenotes/notes/bug-1649616-b835d1dac3401e8c.yaml releasenotes/notes/bug-1656076-c4422270f73b43b.yaml releasenotes/notes/bug-1659730-17834ba2dde668ae.yaml releasenotes/notes/bug-1659995-f3e716de743b7291.yaml releasenotes/notes/bug-1670382-ee851ba4f364d608.yaml releasenotes/notes/bug-1676497-92271e25f642e2de.yaml releasenotes/notes/bug-1684994-264fb8f182ced180.yaml releasenotes/notes/bug-1687593-95e1568291ecd70b.yaml releasenotes/notes/bug-1688137-e4203c9a728690a7.yaml releasenotes/notes/bug-1696574-15a728396350a95a.yaml releasenotes/notes/bug-1700852-de775d0eb2ddfdd1.yaml releasenotes/notes/bug-1701324-739a31f38037f77b.yaml releasenotes/notes/bug-1702211-abb59adda73fd78e.yaml releasenotes/notes/bug-1703369-9a901d627a1e0316.yaml releasenotes/notes/bug-1703666-b8a990f2bf5b62f0.yaml releasenotes/notes/bug-1704205-bc0570feeb3ec5c4.yaml releasenotes/notes/bug-1705485-7a1ad17b9cc99b9d.yaml releasenotes/notes/bug-1718747-50d39fa87bdbb12b.yaml releasenotes/notes/bug-1724645-a94659dfd0f45b9a.yaml releasenotes/notes/bug-1727099-1af277b35db34372.yaml releasenotes/notes/bug-1727726-0b47608811a2cd16.yaml releasenotes/notes/bug-1728907-bab6769ab46bd8aa.yaml releasenotes/notes/bug-1729933-4a09201e9dface2a.yaml releasenotes/notes/bug-1733754-4d9d3042b8501ec6.yaml releasenotes/notes/bug-1734244-1b4ea83baa72566d.yaml releasenotes/notes/bug-1735250-b60332a7f288cf94.yaml releasenotes/notes/bug-1736875-c790f568c5f4d671.yaml releasenotes/notes/bug-1738895-342864cd0285bc42.yaml releasenotes/notes/bug-1740951-82b7e4bd608742ab.yaml releasenotes/notes/bug-1744195-a7154ac2e8556efc.yaml releasenotes/notes/bug-1746599-848a1163e52ac0a6.yaml releasenotes/notes/bug-1747694-48c8caa4871300e3.yaml releasenotes/notes/bug-1748027-decc2e11154b97cf.yaml releasenotes/notes/bug-1748970-eb63ad2030e296f3.yaml releasenotes/notes/bug-1749264-676ca02902bcd169.yaml releasenotes/notes/bug-1749267-96153d2fa6868f67.yaml releasenotes/notes/bug-1750415-95ede3a9685b6e0c.yaml releasenotes/notes/bug-1750660-e2a360ddd6790fc4.yaml releasenotes/notes/bug-1750669-dfce859550126f03.yaml releasenotes/notes/bug-1750673-b53f74944d767ae9.yaml releasenotes/notes/bug-1750676-cf70c1a27b2c8de3.yaml releasenotes/notes/bug-1750678-88a38851ca80fc64.yaml releasenotes/notes/bug-1751045-f950e3fb85e2b573.yaml releasenotes/notes/bug-1753584-e052bc7805f001b4.yaml releasenotes/notes/bug-1753585-7e11213743754999.yaml releasenotes/notes/bug-1754048-correct-federated-domain-47cb889d88d7770a.yaml releasenotes/notes/bug-1754677-13ee75ed1b473f26.yaml releasenotes/notes/bug-1755874-9951f77c6d18431c.yaml releasenotes/notes/bug-1756190-0e5d86d334555931.yaml releasenotes/notes/bug-1757022-664d0b0db1242bf8.yaml releasenotes/notes/bug-1757151-43eb3baaa175f904.yaml releasenotes/notes/bug-1759289-466cdf4514de3498.yaml releasenotes/notes/bug-1760205-87dedd6d8812db3f.yaml releasenotes/notes/bug-1760521-fec5c88af214401f.yaml releasenotes/notes/bug-1760809-711df870a9d67c0d.yaml releasenotes/notes/bug-1763824-3d2f5169af9d42f.yaml releasenotes/notes/bug-1765193-b40318b9fb5d1c7b.yaml releasenotes/notes/bug-1773967-b59517a09e0e6141.yaml releasenotes/notes/bug-1774229-cb968e95c9d81c4d.yaml releasenotes/notes/bug-1776504-keystone-conversion-to-flask-372a5654a55675c6.yaml releasenotes/notes/bug-1778109-ea15ce6a8207f857.yaml releasenotes/notes/bug-1778945-b7f2db3052525ca8.yaml releasenotes/notes/bug-1779889-12eb5edf4cc93a1d.yaml releasenotes/notes/bug-1779903-f2b22cf23a9e01f9.yaml releasenotes/notes/bug-1780159-095ffa0e53be2464.yaml releasenotes/notes/bug-1780503-70ca1ba3f428dd41.yaml releasenotes/notes/bug-1782704-0b053eaf5d801dee.yaml releasenotes/notes/bug-1782922-db822fda486ac773.yaml releasenotes/notes/bug-1784536-9d1d1e149c605a1d.yaml releasenotes/notes/bug-1785164-2b7ed29266eb4792.yaml releasenotes/notes/bug-1787874-13499ec227b8e26c.yaml releasenotes/notes/bug-1788415-3190279e9c900f76.yaml releasenotes/notes/bug-1788694-4dc8b3ec47fc6084.yaml releasenotes/notes/bug-1789450-9dec1383ffd3de01.yaml releasenotes/notes/bug-1792026-2de8345a89e2256b.yaml releasenotes/notes/bug-1794376-53ce14528f00f01d.yaml releasenotes/notes/bug-1794527-866b1caff67977f3.yaml releasenotes/notes/bug-1794864-3116bf165a146be6.yaml releasenotes/notes/bug-1796887-eaea84e3f9a8ff9f.yaml releasenotes/notes/bug-1801095-6e28d7a86719da74.yaml releasenotes/notes/bug-1801873-0eb9a5ec3e801190.yaml releasenotes/notes/bug-1804292-0107869c7029f79e.yaml releasenotes/notes/bug-1804446-1a281eadbb044070.yaml releasenotes/notes/bug-1804462-59ad43f98242dea0.yaml releasenotes/notes/bug-1804463-74537652166cf656.yaml releasenotes/notes/bug-1804482-aa95619320d098fa.yaml releasenotes/notes/bug-1804483-1d9ccfcb24f25f51.yaml releasenotes/notes/bug-1804516-24b0b10ed6fe0589.yaml releasenotes/notes/bug-1804517-a351aec088fee066.yaml releasenotes/notes/bug-1804519-8384a9ead261d4c2.yaml releasenotes/notes/bug-1804520-d124599967923052.yaml releasenotes/notes/bug-1804521-3c0d9f567e8f532f.yaml releasenotes/notes/bug-1804522-00df902cd2d74ee3.yaml releasenotes/notes/bug-1804523-d1768909b13b167e.yaml releasenotes/notes/bug-1805363-0b85d71917ad09d1.yaml releasenotes/notes/bug-1805366-670867516c6fc4bc.yaml releasenotes/notes/bug-1805368-ea32c2db2ae57225.yaml releasenotes/notes/bug-1805369-ed98d3fcfafb5c43.yaml releasenotes/notes/bug-1805371-249c8c9b562ab371.yaml releasenotes/notes/bug-1805372-af4ebf4b19500b72.yaml releasenotes/notes/bug-1805400-c192be936d277ade.yaml releasenotes/notes/bug-1805402-75d0d93f31af620f.yaml releasenotes/notes/bug-1805403-c003627a64768716.yaml releasenotes/notes/bug-1805406-252b45d443af20b3.yaml releasenotes/notes/bug-1805409-8bc6cc9f1c5bc672.yaml releasenotes/notes/bug-1805880-0032024ea6b83563.yaml releasenotes/notes/bug-1805880-3fc6b30309a4370f.yaml releasenotes/notes/bug-1806713-cf5feab23fc78a23.yaml releasenotes/notes/bug-1806762-08ff9eecdc03c554.yaml releasenotes/notes/bug-1806762-09f414995924db23.yaml releasenotes/notes/bug-1806762-0b7356ace200a5d3.yaml releasenotes/notes/bug-1806762-2092fee9f6c87dc3.yaml releasenotes/notes/bug-1806762-c3bfc71cb9bb94f3.yaml releasenotes/notes/bug-1806762-daed3e27f58f0f6d.yaml releasenotes/notes/bug-1809116-b65502f3b606b060.yaml releasenotes/notes/bug-1810393-5a7d379842c51d9b.yaml releasenotes/notes/bug-1811605-9d23080d7e949c25.yaml releasenotes/notes/bug-1813085-cf24b204e95fd7f5.yaml releasenotes/notes/bug-1814589-f3e7f554bee1c317.yaml releasenotes/notes/bug-1815771-ae0e4118c552f01e.yaml releasenotes/notes/bug-1816076-ba39508e6ade529e.yaml releasenotes/notes/bug-1816927-e17f4e596e611380.yaml releasenotes/notes/bug-1817313-c11481e6eed29ec2.yaml releasenotes/notes/bug-1818725-96d698e22e648764.yaml releasenotes/notes/bug-1818734-d753bfae60ffd030.yaml releasenotes/notes/bug-1818736-98ea186a074056f4.yaml releasenotes/notes/bug-1818845-05f8c3af5ea9abc7.yaml releasenotes/notes/bug-1818846-d1a8c77d20659ad6.yaml releasenotes/notes/bug-1819036-e2d24655c70d0aad.yaml releasenotes/notes/bug-1820333-356dcc8bf9f73fed.yaml releasenotes/notes/bug-1823258-9649b56a440b5ae1.yaml releasenotes/notes/bug-1823258-9f93dbdc0fa8441d.yaml releasenotes/notes/bug-1827431-2f078c13dfc9a02a.yaml releasenotes/notes/bug-1831918-c70cf87ef086d871.yaml releasenotes/notes/bug-1832265-cb76ccf505c2d9d1.yaml releasenotes/notes/bug-1833739-f962e8caf3e22068.yaml releasenotes/notes/bug-1836568-66d853a1f22c5530.yaml releasenotes/notes/bug-1839133-24570c9fbacb530d.yaml releasenotes/notes/bug-1839577-1226d86ea0744055.yaml releasenotes/notes/bug-1840291-35af1ac7ba06e166.yaml releasenotes/notes/bug-1841486-425f367925f5e03f.yaml releasenotes/notes/bug-1843609-8498b132222596b7.yaml releasenotes/notes/bug-1844157-7808af9bcea0429d.yaml releasenotes/notes/bug-1844194-48ae60db49f91bd4.yaml releasenotes/notes/bug-1844207-x27a31f3403xfd7y.yaml releasenotes/notes/bug-1844461-08a8bdc5f613b88d.yaml releasenotes/notes/bug-1844664-905cf6cad2e032a7.yaml releasenotes/notes/bug-1848238-f6533644f7907358.yaml releasenotes/notes/bug-1848342-317c9e4afa65a3ff.yaml releasenotes/notes/bug-1855080-08b28181b7cb2470.yaml releasenotes/notes/bug-1856881-277103af343187f1.yaml releasenotes/notes/bug-1856904-101af15bb48eb3ca.yaml releasenotes/notes/bug-1856962-2c87d541da61c727.yaml releasenotes/notes/bug-1858012-584267ada7e33f2c.yaml releasenotes/notes/bug-1872732-7261816d0b170008.yaml releasenotes/notes/bug-1872733-2377f456a57ad32c.yaml releasenotes/notes/bug-1872735-0989e51d2248ce1e.yaml releasenotes/notes/bug-1872737-f8e1ad3b6705b766.yaml releasenotes/notes/bug-1872753-e2a934eac919ccde.yaml releasenotes/notes/bug-1872755-2c81d3267b89f124.yaml releasenotes/notes/bug-1873290-ff7f8e4cee15b75a.yaml releasenotes/notes/bug-1878938-70ee2af6fdf66004.yaml releasenotes/notes/bug-1880252-51036d5353125e15.yaml releasenotes/notes/bug-1885753-51df25f3ff1d9ae8.yaml releasenotes/notes/bug-1886017-bc2ad648d57101a2.yaml releasenotes/notes/bug-1889936-78d6853b5212b8f1.yaml releasenotes/notes/bug-1896125-b17a4d12730fe493.yaml releasenotes/notes/bug-1897280-e7065c4368a325ad.yaml releasenotes/notes/bug-1901207-13762f85b8a04481.yaml releasenotes/notes/bug-1901654-69b9f35d11cd0c75.yaml releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml releasenotes/notes/bug-1929066-6e741c9182620a37.yaml releasenotes/notes/bug-1941020-cleanup-541a2d372a1cf4cd.yaml releasenotes/notes/bug-1941020-f694395a9bcea72f.yaml releasenotes/notes/bug-1951632-11272e49e2fa439d.yaml releasenotes/notes/bug-2074018-28f7bbe8f28f5efe.yaml releasenotes/notes/bug1828565-0790c4c60ba34100.yaml releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml releasenotes/notes/bug_1543048_and_1668503-7ead4e15faaab778.yaml releasenotes/notes/bug_1674415-e8a7345aa2b05ab7.yaml releasenotes/notes/bug_1688188-256e3572295231a1.yaml releasenotes/notes/bug_1698900-f195125bf341d887.yaml releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml releasenotes/notes/change_min_pool_retry_max-f5e7c8d315401426.yaml releasenotes/notes/convert-keystone-to-flask-80d980e239b662b0.yaml releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml releasenotes/notes/deprecate-json-formatted-policy-file-95f6307f88358f58.yaml releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml releasenotes/notes/deprecate-policies-api-b104fbd1d2367b1b.yaml releasenotes/notes/deprecate-templated-catalog-driver-f811a6040abdc4a8.yaml releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml releasenotes/notes/deprecated-as-of-newton-be1d8dbcc6bdc68f.yaml releasenotes/notes/deprecated-as-of-ocata-a5b2f1e3e39f818e.yaml releasenotes/notes/deprecated-as-of-pike-506f9aca91674550.yaml releasenotes/notes/deprecated-as-of-queens-8ad7f826e4f08f57.yaml releasenotes/notes/deprecated-as-of-rocky-60b2fa05d07d3a28.yaml releasenotes/notes/deprecated-as-of-stein-0166965502cb3be2.yaml releasenotes/notes/deprecated-as-of-train-de3fe41ff2251385.yaml releasenotes/notes/deprecated-socket_timeout-option-d3358b4f2310706c.yaml releasenotes/notes/domain-level-limit-support-60e1e330d06227ed.yaml releasenotes/notes/domain-manager-persona-7921587ce2fab4fd.yaml releasenotes/notes/dont-enforce-get-s3tokens-ec2tokens-62b90b199e8075d8.yaml releasenotes/notes/drop-project-id-fk-b683b414e1585be8.yaml releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yaml releasenotes/notes/eventlet-cleanup-f35fc5f83c16ea1c.yaml releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml releasenotes/notes/filter-mappings-by-entity-77162a146d375385.yaml releasenotes/notes/fix_application_credentials_implied_roles-b445fa56cb335a4d.yaml releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml releasenotes/notes/identity_driver_new_change_password_method-e8c0e06795bca2d8.yaml releasenotes/notes/immutable-resource-options-bug-1807751-acc1e3c689484337.yaml releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml releasenotes/notes/implied-roles-stable-8b293e187c5620ad.yaml releasenotes/notes/improve-driver-donfiguration-ecedaf6ad0c3f9d2.yaml releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml releasenotes/notes/integrate-osprofiler-ad0e16a542b12899.yaml releasenotes/notes/is-admin-24b34238c83b3a82.yaml releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml releasenotes/notes/limits-api-refactor-05abf9e6c2e75852.yaml releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml releasenotes/notes/mapping_populate-521d92445505b8a3.yaml releasenotes/notes/max-password-length-truncation-and-warning-bd69090315ec18a7.yaml releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml releasenotes/notes/oauth1-headers-content-type-9a9245d9bbec8f8e.yaml releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml releasenotes/notes/password-created_at-nullable-b3c284be50d93ef5.yaml releasenotes/notes/policy_new_federated_projects_for_user-dcd7bd148efef049.yaml releasenotes/notes/pre-cache-tokens-73450934918af26b.yaml releasenotes/notes/project-tags-1e72a6779d9d02c5.yaml releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml releasenotes/notes/python3-support-e4189e0a1a6e2e4f.yaml releasenotes/notes/randomize_urls-c0c19f48b2bfa299.yaml releasenotes/notes/remove-db_sync-extension-opt-2ab1f29340281215.yaml releasenotes/notes/remove-legacy-migrations-647f60019c8dd9e8.yaml releasenotes/notes/remove-sqlalchemy-migrate-a4fa47685c7e28c6.yaml releasenotes/notes/remove-token-auth-middleware-5ea3b3734ce1d9e6.yaml releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml releasenotes/notes/removed-as-of-newton-721c06b5dcb1b34a.yaml releasenotes/notes/removed-as-of-ocata-436bb4b839e74494.yaml releasenotes/notes/removed-as-of-pike-deadbeefdeadbeef.yaml releasenotes/notes/removed-as-of-queens-94c04e88c08f89aa.yaml releasenotes/notes/removed-as-of-rocky-f44c3ba7c3e73d01.yaml releasenotes/notes/removed-as-of-stein-5eb23253b72ab54e.yaml releasenotes/notes/removed-as-of-train-92b2942a680eb859.yaml releasenotes/notes/removed-as-of-ussuri-d2f6ef8901ef54ed.yaml releasenotes/notes/request_context-e143ba9c446a5952.yaml releasenotes/notes/resource-backend-sql-only-03154d8712b36bd0.yaml releasenotes/notes/resource-driver-33793dd5080ee4d2.yaml releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yaml releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml releasenotes/notes/scope-and-default-roles-a733c235731bb558.yaml releasenotes/notes/support_encrypted_credentials_at_rest-93dcb67b3508e91a.yaml releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml releasenotes/notes/tenant_id_to_project_id-42d95d93011785cb.yaml releasenotes/notes/token-formatter-ec58aba00fa83706.yaml releasenotes/notes/token-provider-refactor-a3a64146807daf36.yaml releasenotes/notes/token_expiration_to_match_application_credential-56d058355a9f240d.yaml releasenotes/notes/totp-40d93231714c6a20.yaml releasenotes/notes/unified-limit-api-improvment-b34d18769d18a0a7.yaml releasenotes/notes/use-correct-inspect-8142e317c1e39c2a.yaml releasenotes/notes/use-pyldap-6e811c28bf350d6d.yaml releasenotes/notes/use-python-ldap-0318ff7798bdd98d.yaml releasenotes/notes/v2-dep-d6e7ab2d08119549.yaml releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml releasenotes/source/2023.1.rst releasenotes/source/2023.2.rst releasenotes/source/2024.1.rst releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/yoga.rst releasenotes/source/zed.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po tools/cover.sh tools/fast8.sh tools/sample_data.sh tools/test-setup.sh././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/dependency_links.txt0000664000175000017500000000000100000000000023064 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/entry_points.txt0000664000175000017500000000732300000000000022321 0ustar00zuulzuul00000000000000[console_scripts] keystone-manage = keystone.cmd.manage:main keystone-status = keystone.cmd.status:main [keystone.application_credential] sql = keystone.application_credential.backends.sql:ApplicationCredential [keystone.assignment] sql = keystone.assignment.backends.sql:Assignment [keystone.auth.application_credential] default = keystone.auth.plugins.application_credential:ApplicationCredential [keystone.auth.external] DefaultDomain = keystone.auth.plugins.external:DefaultDomain Domain = keystone.auth.plugins.external:Domain default = keystone.auth.plugins.external:DefaultDomain [keystone.auth.kerberos] default = keystone.auth.plugins.external:KerberosDomain [keystone.auth.mapped] default = keystone.auth.plugins.mapped:Mapped [keystone.auth.oauth1] default = keystone.auth.plugins.oauth1:OAuth [keystone.auth.openid] default = keystone.auth.plugins.mapped:Mapped [keystone.auth.password] default = keystone.auth.plugins.password:Password [keystone.auth.saml2] default = keystone.auth.plugins.mapped:Mapped [keystone.auth.token] default = keystone.auth.plugins.token:Token [keystone.auth.totp] default = keystone.auth.plugins.totp:TOTP [keystone.auth.x509] default = keystone.auth.plugins.mapped:Mapped [keystone.catalog] sql = keystone.catalog.backends.sql:Catalog templated = keystone.catalog.backends.templated:Catalog [keystone.credential] sql = keystone.credential.backends.sql:Credential [keystone.credential.provider] fernet = keystone.credential.providers.fernet:Provider [keystone.endpoint_filter] sql = keystone.catalog.backends.sql:Catalog [keystone.endpoint_policy] sql = keystone.endpoint_policy.backends.sql:EndpointPolicy [keystone.federation] sql = keystone.federation.backends.sql:Federation [keystone.identity] ldap = keystone.identity.backends.ldap:Identity sql = keystone.identity.backends.sql:Identity [keystone.identity.id_generator] sha256 = keystone.identity.id_generators.sha256:Generator [keystone.identity.id_mapping] sql = keystone.identity.mapping_backends.sql:Mapping [keystone.identity.shadow_users] sql = keystone.identity.shadow_backends.sql:ShadowUsers [keystone.oauth1] sql = keystone.oauth1.backends.sql:OAuth1 [keystone.policy] rules = keystone.policy.backends.rules:Policy sql = keystone.policy.backends.sql:Policy [keystone.receipt.provider] fernet = keystone.receipt.providers.fernet:Provider [keystone.resource] sql = keystone.resource.backends.sql:Resource [keystone.resource.domain_config] sql = keystone.resource.config_backends.sql:DomainConfig [keystone.revoke] sql = keystone.revoke.backends.sql:Revoke [keystone.role] sql = keystone.assignment.role_backends.sql:Role [keystone.server_middleware] cors = oslo_middleware:CORS debug = oslo_middleware:Debug http_proxy_to_wsgi = oslo_middleware:HTTPProxyToWSGI osprofiler = osprofiler.web:WsgiMiddleware request_id = oslo_middleware:RequestId sizelimit = oslo_middleware:RequestBodySizeLimiter [keystone.token.provider] fernet = keystone.token.providers.fernet:Provider jws = keystone.token.providers.jws:Provider [keystone.trust] sql = keystone.trust.backends.sql:Trust [keystone.unified_limit] sql = keystone.limit.backends.sql:UnifiedLimit [keystone.unified_limit.model] flat = keystone.limit.models.flat:FlatModel strict_two_level = keystone.limit.models.strict_two_level:StrictTwoLevelModel [oslo.config.opts] keystone = keystone.conf.opts:list_opts [oslo.config.opts.defaults] keystone = keystone.conf:set_external_opts_defaults [oslo.policy.enforcer] keystone = keystone.common.rbac_enforcer.policy:get_enforcer [oslo.policy.policies] keystone = keystone.common.policies:list_rules [wsgi_scripts] keystone-wsgi-admin = keystone.server.wsgi:initialize_admin_application keystone-wsgi-public = keystone.server.wsgi:initialize_public_application ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/not-zip-safe0000664000175000017500000000000100000000000021244 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/pbr.json0000664000175000017500000000006000000000000020470 0ustar00zuulzuul00000000000000{"git_version": "97431ec99", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/requires.txt0000664000175000017500000000170300000000000021417 0ustar00zuulzuul00000000000000Flask!=0.11,>=1.0.2 Flask-RESTful>=0.3.5 PyJWT>=1.6.1 SQLAlchemy>=1.4.0 WebOb>=1.7.1 bcrypt>=3.1.3 cryptography>=2.7 dogpile.cache>=1.0.2 jsonschema>=3.2.0 keystonemiddleware>=7.0.0 msgpack>=0.5.0 oauthlib>=0.6.2 oslo.cache>=1.26.0 oslo.config>=6.8.0 oslo.context>=2.22.0 oslo.db>=6.0.0 oslo.i18n>=3.15.3 oslo.log>=3.44.0 oslo.messaging>=5.29.0 oslo.middleware>=3.31.0 oslo.policy>=3.10.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.upgradecheck>=1.3.0 oslo.utils>=3.33.0 osprofiler>=1.4.0 passlib>=1.7.0 pbr!=2.1.0,>=2.0.0 pycadf!=2.0.0,>=1.1.0 pysaml2>=5.0.0 python-keystoneclient>=3.8.0 scrypt>=0.8.0 stevedore>=1.20.0 [ldap] ldappool>=2.3.1 python-ldap>=3.0.0 [memcache] python-memcached>=1.56 [test] WebTest>=2.0.27 bandit>=1.1.0 bashate~=2.1.0 coverage!=4.4,>=4.0 fixtures>=3.0.0 flake8-docstrings freezegun>=0.3.6 hacking lxml>=4.5.0 oslo.db[fixtures,mysql,postgresql]>=6.0.0 oslotest>=3.2.0 requests>=2.14.2 stestr>=1.0.0 tempest>=17.1.0 testtools>=2.2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867786.0 keystone-26.0.0/keystone.egg-info/top_level.txt0000664000175000017500000000001100000000000021540 0ustar00zuulzuul00000000000000keystone ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/keystone_tempest_plugin/0000775000175000017500000000000000000000000020443 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/keystone_tempest_plugin/README.rst0000664000175000017500000000016100000000000022130 0ustar00zuulzuul00000000000000===== MOVED ===== The keystone tempest plugin has moved to http://opendev.org/openstack/keystone-tempest-plugin ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/playbooks/0000775000175000017500000000000000000000000015466 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/playbooks/enable-fips.yaml0000664000175000017500000000010400000000000020532 0ustar00zuulzuul00000000000000- hosts: all tasks: - include_role: name: enable-fips ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1727867786.574113 keystone-26.0.0/rally-jobs/0000775000175000017500000000000000000000000015541 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/rally-jobs/README.rst0000664000175000017500000000034300000000000017230 0ustar00zuulzuul00000000000000This directory contains rally benchmark scenarios to be run by OpenStack CI. * more about rally: https://rally.readthedocs.io/en/latest/ * how to add rally-gates: https://rally.readthedocs.io/en/latest/quick_start/gates.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/rally-jobs/keystone.yaml0000664000175000017500000000527100000000000020273 0ustar00zuulzuul00000000000000--- KeystoneBasic.create_user: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_delete_user: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_list_users: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_user_update_password: - args: password_length: 10 runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_list_tenants: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.get_entities: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.add_and_remove_user_role: - runner: type: "constant" times: 100 concurrency: 10 context: users: tenants: 5 users_per_tenant: 4 sla: failure_rate: max: 0 KeystoneBasic.create_and_delete_role: - runner: type: "constant" times: 100 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_add_and_list_user_roles: - runner: type: "constant" times: 100 concurrency: 10 context: users: tenants: 5 users_per_tenant: 4 sla: failure_rate: max: 0 KeystoneBasic.create_tenant: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_tenant_with_users: - args: users_per_tenant: 10 runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_update_and_delete_tenant: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_delete_service: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 KeystoneBasic.create_and_list_services: - runner: type: "constant" times: 50 concurrency: 10 sla: failure_rate: max: 0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4301152 keystone-26.0.0/releasenotes/0000775000175000017500000000000000000000000016154 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6141124 keystone-26.0.0/releasenotes/notes/0000775000175000017500000000000000000000000017304 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000021555 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/Assignment_V9_driver-c22be069f7baccb0.yaml0000664000175000017500000000117500000000000026657 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The V8 Assignment driver interface is deprecated. Support for the V8 Assignment driver interface is planned to be removed in the 'O' release of OpenStack. other: - The list_project_ids_for_user(), list_domain_ids_for_user(), list_user_ids_for_project(), list_project_ids_for_groups(), list_domain_ids_for_groups(), list_role_ids_for_groups_on_project() and list_role_ids_for_groups_on_domain() methods have been removed from the V9 version of the Assignment driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/DomainSpecificRoles-fc5dd2ef74a1442c.yaml0000664000175000017500000000120200000000000026417 0ustar00zuulzuul00000000000000--- features: - > [`blueprint domain-specific-roles `_] Roles can now be optionally defined as domain specific. Domain specific roles are not referenced in policy files, rather they can be used to allow a domain to build their own private inference rules with implied roles. A domain specific role can be assigned to a domain or project within its domain, and any subset of global roles it implies will appear in a token scoped to the respective domain or project. The domain specific role itself, however, will not appear in the token. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/Role_V9_driver-971c3aae14d9963d.yaml0000664000175000017500000000044400000000000025247 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The V8 Role driver interface is deprecated. Support for the V8 Role driver interface is planned to be removed in the 'O' release of OpenStack. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/V9ResourceDriver-26716f97c0cc1a80.yaml0000664000175000017500000000026500000000000025511 0ustar00zuulzuul00000000000000--- deprecations: - The V8 Resource driver interface is deprecated. Support for the V8 Resource driver interface is planned to be removed in the 'O' release of OpenStack. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/add-bootstrap-cli-192500228cc6e574.yaml0000664000175000017500000000141000000000000025467 0ustar00zuulzuul00000000000000--- features: - > [`blueprint bootstrap `_] keystone-manage now supports the bootstrap command on the CLI so that a keystone install can be initialized without the need of the admin_token filter in the paste-ini. security: - The use of admin_token filter is insecure compared to the use of a proper username/password. Historically the admin_token filter has been left enabled in Keystone after initialization due to the way CMS systems work. Moving to an out-of-band initialization using ``keystone-manage bootstrap`` will eliminate the security concerns around a static shared string that conveys admin access to keystone and therefore to the entire installation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/add-description-to-role-88ab5bb8a96cc002.yaml0000664000175000017500000000030300000000000027105 0ustar00zuulzuul00000000000000--- features: - | [`bug 1669080 `_] Added support for a ``description`` attribute for V3 Identity Roles, see API docs for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/add-expires-at-int-to-trusts-60ae3c5d0c00808a.yaml0000664000175000017500000000051600000000000027747 0ustar00zuulzuul00000000000000--- upgrade: - | The trusts table now has an expires_at_int column that represents the expiration time as an integer instead of a datetime object. This will prevent rounding errors related to the way date objects are stored in some versions of MySQL. The expires_at column remains, but will be dropped in Rocky. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/add-limit-description-c1f42641d9c6c33d.yaml0000664000175000017500000000044200000000000026570 0ustar00zuulzuul00000000000000--- features: - > [`bug 1754185 `_] Registered limits and project limits now support an optional, nullable property called `description`. Users can create/update a registered limit or project limit with `description` now. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/add-unified-limit-apis-c9ebc5116bc2cf93.yaml0000664000175000017500000000076700000000000026775 0ustar00zuulzuul00000000000000--- features: - > [`blueprint unified-limit `_] Keystone now supports unified limits. Two resouces called ``registered limit`` and ``limit`` are added and a batch of related APIs are supported as well. These APIs are experimental now. It means that they are not stable enough and may be changed without backward compatibility. Once unified limit feature are ready for consuming, the APIs will be marked as stable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/add_bcrypt_sha256_algo-d6b146a59df9373c.yaml0000664000175000017500000000033300000000000026622 0ustar00zuulzuul00000000000000--- features: - | Added support for the ``bcrypt_sha256`` password hashing algorythm, which does workaround limitation on a password length BCrypt have by running the password through HMAC-SHA2-256 first. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/add_password_expires_at_to_user_response-22f14ab629c48bc2.yaml0000664000175000017500000000014600000000000032752 0ustar00zuulzuul00000000000000--- upgrade: - We have added the ``password_expires_at`` attribute to the user response object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/admin_token-a5678d712783c145.yaml0000664000175000017500000000124400000000000024466 0ustar00zuulzuul00000000000000--- upgrade: - > [`bug 1473553 `_] The `keystone-paste.ini` must be updated to put the ``admin_token_auth`` middleware before ``build_auth_context``. See the sample `keystone-paste.ini` for the correct `pipeline` value. Having ``admin_token_auth`` after ``build_auth_context`` is deprecated and will not be supported in a future release. deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The ``admin_token_auth`` filter must now be placed before the ``build_auth_context`` filter in `keystone-paste.ini`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/admin_token-c634ec12fc714255.yaml0000664000175000017500000000116200000000000024611 0ustar00zuulzuul00000000000000--- security: - The admin_token method of authentication was never intended to be used for any purpose other than bootstrapping an install. However many deployments had to leave the admin_token method enabled due to restrictions on editing the paste file used to configure the web pipelines. To minimize the risk from this mechanism, the `admin_token` configuration value now defaults to a python `None` value. In addition, if the value is set to `None`, either explicitly or implicitly, the `admin_token` will not be enabled, and an attempt to use it will lead to a failed authentication. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bcrypt_truncation_fix-674dc5d7f1e776f2.yaml0000664000175000017500000000040500000000000027043 0ustar00zuulzuul00000000000000--- fixes: - | Passwords that are hashed using bcrypt are now truncated properly to the maximum allowed length by the algorythm. This solves regression, when passwords longer then 54 symbols are getting invalidated after the Keystone upgrade. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bootstrap-update-endpoint-7a63a2329822b6e7.yaml0000664000175000017500000000122100000000000027356 0ustar00zuulzuul00000000000000--- features: - | The ``keystone-manage bootstrap`` command can now be used to update existing endpoints idempotently, which is useful in conjunction with configuration management tools that use this command for both initialization and lifecycle management of keystone. upgrade: - | The ``keystone-manage bootstrap`` command will now update existing endpoints rather than skipping them if they already exist but are different from the values provided to the command. This is useful in conjunction with configuration management tools that use this command for both initialization and lifecycle management of keystone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-allow-expired-f5d845b9601bc1ef.yaml0000664000175000017500000000200700000000000025642 0ustar00zuulzuul00000000000000--- features: - > [`blueprint allow-expired `_] An `allow_expired` flag is added to the token validation call (``GET/HEAD /v3/auth/tokens``) that allows fetching a token that has expired. This allows for validating tokens in long running operations. upgrade: - > [`blueprint allow-expired `_] To allow long running operations to complete services must be able to fetch expired tokens via the ``allow_expired`` flag. The length of time a token is retrievable for beyond its traditional expiry is managed by the ``[token] allow_expired_window`` option and so the data must be retrievable for this amount of time. When using fernet tokens this means the key rotation period must exceed this time so older tokens are still decryptable. Ensure you do not rotate fernet keys faster than ``[token] expiration`` + ``[token] allow_expired_window`` seconds. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-application-credentials-c699f1f17c7d4e2f.yaml0000664000175000017500000000277200000000000027711 0ustar00zuulzuul00000000000000--- prelude: > This release adds support for Application Credentials, a new way to allow applications and automated tooling to authenticate with keystone. Rather than storing a username and password in an application's config file, which can pose security risks, you can now create an application credential to allow an application to authenticate and acquire a preset scope and role assignments. This is especially useful for LDAP and federated users, who can now delegate their cloud management tasks to a keystone-specific resource, rather than share their externally managed credentials with keystone and risk a compromise of those external systems. Users can delegate a subset of their role assignments to an application credential, allowing them to strategically limit their application's access to the minimum needed. Unlike passwords, a user can have more than one active application credential, which means they can be rotated without causing downtime for the applications using them. features: - | [`blueprint application-credentials `_] Users can now create Application Credentials, a new keystone resource that can provide an application with the means to get a token from keystone with a preset scope and role assignments. To authenticate with an application credential, an application can use the normal token API with the 'application_credential' auth method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-basic-default-roles-4ff6502b6ac57d48.yaml0000664000175000017500000000140000000000000026631 0ustar00zuulzuul00000000000000--- features: - | [`blueprint basic-default-roles `_] Support has been added for deploying two new roles during the bootstrap process, `reader` and `member`, in addition to the `admin` role. upgrades: - | If the bootstrap process is re-run, and a `reader`, `member`, or `admin` role already exists, a role implication chain will be created: `admin` implies `member` implies `reader`. If you do not want these role implications either skip running bootstrap or delete them after it has completed execution. See [`blueprint basic-default-roles `_] for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-domain-config-as-stable-716ca5ab33c0cc42.yaml0000664000175000017500000000106200000000000027425 0ustar00zuulzuul00000000000000--- features: - > [`blueprint domain-config-as-stable `_] The domain config via API is now marked as stable. deprecations: - > [`blueprint domain-config-as-stable `_] Deprecated ``keystone-manage domain_config_upload``. The keystone team recommends setting domain config options via the API instead. The ``domain_config_upload`` command line option may be removed in the 'P' release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-domain-config-default-82e42d946ee7cb43.yaml0000664000175000017500000000045500000000000027155 0ustar00zuulzuul00000000000000--- features: - > [`blueprint domain-config-default `_] The Identity API now supports retrieving the default values for the configuration options that can be overridden via the domain specific configuration API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-json-web-tokens-37ce3bcd1356cf1b.yaml0000664000175000017500000000065200000000000026165 0ustar00zuulzuul00000000000000--- features: - | [`blueprint json-web-tokens `_] Keystone now supports a JSON Web Signature (JWS) token provider in addition to fernet tokens. Fernet token remain the default token provider. Full details can be found in the `specification `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-manage-migration-c398963a943a89fe.yaml0000664000175000017500000000047300000000000026176 0ustar00zuulzuul00000000000000--- features: - > [`blueprint manage-migration `_] Upgrading keystone to a new version can now be undertaken as a rolling upgrade using the `--expand`, `--migrate` and `--contract` options of the `keystone-manage db_sync` command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-mfa-auth-receipt-8b459431c1f360ce.yaml0000664000175000017500000000200300000000000026051 0ustar00zuulzuul00000000000000--- features: - | [`blueprint mfa-auth-receipt `_] Added support for auth receipts. Allows multi-step authentication for users with configured MFA Rules. Partial authentication with successful auth methods will return an auth receipt that can be consumed in subsequent auth attempts along with the missing auth methods to complete auth and be provided with a valid token. upgrade: - | [`blueprint mfa-auth-receipt `_] Auth receipts share the same fernet mechanism as tokens and by default will share keys with tokens and work out of the box. If your fernet key directory is not the default, you will need to also configure the receipt key directory, but they can both point to the same location allowing key rotations to affect both safely. It is possible to split receipt and token keys and run rotatations separately for both if needed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-oauth2-client-credentials-ext-c8933f00a7b45be8.yaml0000664000175000017500000000076200000000000030562 0ustar00zuulzuul00000000000000--- features: - | [`blueprint oauth2-client-credentials-ext `_] Users can now use the OAuth2.0 Access Token API to get an access token from the keystone identity server with application credentials. Then the users can use the access token to access the OpenStack APIs that use the keystone middleware to support OAuth2.0 client credentials authentication through the keystone identity server. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-password-expires-validation-4b32fe7032595932.yaml0000664000175000017500000000111500000000000030234 0ustar00zuulzuul00000000000000--- features: - > [`blueprint password-expires-validation `_] Token responses will now have a ``password_expires_at`` field in the ``user`` object, this can be expressed briefly as:: {"token": {"user": {"password_expires_at": null}}} If PCI support is enabled, via the ``[security_compliance]`` configuration options, then the ``password_expires_at`` field will be populated with a timestamp. Otherwise, it will default to ``null``, indicating the password does not expire. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-pci-dss-notifications-808a205a637bac25.yaml0000664000175000017500000000223300000000000027123 0ustar00zuulzuul00000000000000--- features: - > [`blueprint pci-dss-notifications `_] CADF notifications now extend to PCI-DSS events. A ``reason`` object is added to the notification. A ``reason`` object has both a ``reasonType`` (a short description of the reason) and ``reasonCode`` (the HTTP return code). The following events will be impacted: * If a user does not change their passwords at least once every X days. See ``[security_compliance] password_expires_days``. * If a user is locked out after many failed authentication attempts. See ``[security_compliance] lockout_failure_attempts``. * If a user submits a new password that was recently used. See ``[security_compliance] unique_last_password_count``. * If a password does not meet the specified criteria. See ``[security_compliance] password_regex``. * If a user attempts to change their password too often. See ``[security_compliance] minimum_password_age``. For additional details see: `event notifications `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-pci-dss-password-requirements-api-87bc724b2aa554f7.yaml0000664000175000017500000000107300000000000031501 0ustar00zuulzuul00000000000000--- features: - > [`blueprint pci-dss-password-requirements-api `_] Added a new API (``/v3/domains/{domain_id}/config/security_compliance``) to retrieve regular expression requirements for passwords. Specifically, ``[security_compliance] password_regex`` and ``[security_compliance] password_regex_description`` will be returned. Note that these options are only meaningful if PCI support is enabled, via various ``[security_compliance]`` configuration options. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-pci-dss-query-password-expired-users-a7c96a3843bb9abc.yaml0000664000175000017500000000150600000000000032310 0ustar00zuulzuul00000000000000--- features: - > [`blueprint pci-dss-query-password-expired-users `_] Added a ``password_expires_at`` query to ``/v3/users`` and ``/v3/groups/{group_id}/users``. The ``password_expires_at`` query is comprised of two parts, an ``operator`` (valid choices listed below) and a ``timestamp`` (of form ``YYYY-MM-DDTHH:mm:ssZ``). The APIs will filter the list of users based on the ``operator`` and ``timestamp`` given. * lt - password expires before the timestamp * lte - password expires at or before timestamp * gt - password expires after the timestamp * gte - password expires at or after the timestamp * eq - password expires at the timestamp * neq - password expires not at the timestamp ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-per-user-auth-plugin-reqs-feb95fd907be4b40.yaml0000664000175000017500000000704600000000000030126 0ustar00zuulzuul00000000000000--- features: - | [`blueprint per-user-auth-plugin-reqs `_] Per-user Multi-Factor-Auth rules (MFA Rules) have been implemented. These rules define which auth methods can be used (e.g. Password, TOTP) and provides the ability to require multiple auth forms to successfully get a token. The MFA rules are set via the user create and update API (``POST/PATCH /v3/users``) call; the options allow an admin to force a user to use specific forms of authentication or combinations of forms of authentication to get a token. The rules are specified as follows:: user["options"]["multi_factor_auth_rules"] = [["password", "totp"], ["password", "custom-auth-method"]] The rules are specified as a list of lists. The elements of the sub-lists must be strings and are intended to mirror the required authentication method names (e.g. ``password``, ``totp``, etc) as defined in the ``keystone.conf`` file in the ``[auth] methods`` option. Each list of methods specifies a rule. If the auth methods provided by a user match (or exceed) the auth methods in the list, that rule is used. The first rule found (rules will not be processed in a specific order) that matches will be used. If a user has the ruleset defined as ``[["password", "totp"]]`` the user must provide both password and totp auth methods (and both methods must succeed) to receive a token. However, if a user has a ruleset defined as ``[["password"], ["password", "totp"]]`` the user may use the ``password`` method on it's own but would be required to use both ``password`` and ``totp`` if ``totp`` is specified at all. Any auth methods that are not defined in ``keystone.conf`` in the ``[auth] methods`` option are ignored when the rules are processed. Empty rules are not allowed. If a rule is empty due to no-valid auth methods existing within it, the rule is discarded at authentication time. If there are no rules or no valid rules for the user, authentication occurs in the default manner: any single configured auth method is sufficient to receive a token. In the case a user should be exempt from MFA Rules, regardless if they are set, the User-Option ``multi_factor_auth_enabled`` may be set to ``False`` for that user via the user create and update API (``POST/PATCH /v3/users``) call. If this option is set to ``False`` the MFA rules will be ignored for the user. Any other value except ``False`` will result in the MFA Rules being processed; the option can only be a boolean (``True`` or ``False``) or "None" (which will result in the default behavior (same as ``True``) but the option will no longer be shown in the ``user["options"]`` dictionary. To mark a user exempt from the MFA Rules:: user["options"]["multi_factor_auth_enabled"] = False The ``token`` auth method typically should not be specified in any MFA Rules. The ``token`` auth method will include all previous auth methods for the original auth request and will match the appropriate ruleset. This is intentional, as the ``token`` method is used for rescoping/changing active projects. SECURITY INFO: The MFA rules are only processed when authentication happens through the V3 authentication APIs. If V2 Auth is enabled it is possible to circumvent the MFA rules if the user can authenticate via V2 Auth API. It is recommended to disable V2 authentication for full enforcement of the MFA rules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-policy-in-code-722372a27291b9cd.yaml0000664000175000017500000000366400000000000025467 0ustar00zuulzuul00000000000000--- features: - > `[`blueprint policy-in-code `_] Keystone now supports the ability to register default policies in code. This makes policy file maintenance easier by allowing duplicated default policies to be removed from the policy file. The only policies that should exist within a deployment's policy file after Pike should be policy overrides. Note that there is no longer a default value for the default rule. That rule is only checked when the more specific rule cannot be found, and with policy in code all rules should be found in code even if they are not in the policy file. To generate sample policy files from default values, prune default policies from existing policy files, or familiarize yourself with general policy usage, please see the `usage documentation `_ provided in oslo.policy. upgrade: - > `[`blueprint policy-in-code `_] Keystone now supports the ability to register default policies in code. This makes policy file maintenance easier by allowing duplicated default policies to be removed from the policy file. The only policies that should exist within a deployment's policy file after Pike should be policy overrides. Note that there is no longer a default value for the default rule. That rule is only checked when the more specific rule cannot be found, and with policy in code all rules should be found in code even if they are not in the policy file. To generate sample policy files from default values, prune default policies from existing policy files, or familiarize yourself with general policy usage, please see the `usage documentation `_ provided in oslo.policy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-shadow-mapping-06fc7c71a401d707.yaml0000664000175000017500000000141200000000000025631 0ustar00zuulzuul00000000000000--- features: - > [`blueprint shadow-mapping `_] The federated identity mapping engine now supports the ability to automatically provision ``projects`` for ``federated users``. A role assignment will automatically be created for the user on the specified project. If the project specified within the mapping does not exist, it will be automatically created in the ``domain`` associated with the ``identity provider``. This behavior can be triggered using a specific syntax within the ``local`` rules section of a mapping. For more information see: `mapping combinations `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-strict-two-level-model.yaml0000664000175000017500000000326300000000000025115 0ustar00zuulzuul00000000000000--- features: - > [`blueprint strict-two-level-model `_] A new limit enforcement model called `strict_two_level` is added. Change the value of the option `[unified_limit]/enforcement_model` to `strict_two_level` to enable it. In this [`model `_]: 1. The project depth is force limited to 2 level. 2. Any child project's limit can not exceed the parent's. Please ensure that the previous project and limit structure deployment in your Keystone won't break this model before starting to use it. If a newly created project results in a project tree depth greater than 2, a `403 Forbidden` error will be raised. When try to use this model but the project depth exceed 2 already, Keystone process will fail to start. Operators should choose another available model to fix the issue first. - > [`blueprint strict-two-level-model `_] The `project_id` filter is added for listing limits. This filter is used for system-scoped request only to fetch the specified project limits. Non system-scoped request will get empty response body instead. - > [`blueprint strict-two-level-model `_] The `include_limits` filter is added to `GET /v3/projects/{project_id}` API. This filter should be used together with `parents_as_list` or `subtree_as_list` filter to add parent/sub project's limit information the response body. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-support-federated-attr-94084d4073f50280.yaml0000664000175000017500000000045600000000000027116 0ustar00zuulzuul00000000000000--- features: - > [`blueprint support-federated-attr `_] Added new filters to the `list user` API (``GET /v3/users``) to support querying federated identity attributes: ``idp_id``, ``protocol_id``, and ``unique_id``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-support-oauth2-mtls-8552892a8e0c72d2.yaml0000664000175000017500000000135100000000000026544 0ustar00zuulzuul00000000000000--- features: - | [`blueprint support-oauth2-mtls `_] Provide the option for users to proof-of-possession of OAuth 2.0 access token based on `RFC8705 OAuth 2.0 Mutual-TLS Client Authentication and Certificate-Bound Access Tokens`. Users can now use the OAuth 2.0 Access Token API to get an OAuth 2.0 certificate-bound access token from the keystone identity server with OAuth 2.0 credentials and Mutual-TLS certificates. Then users can use the OAuth 2.0 certificate-bound access token and the Mutual-TLS certificates to access the OpenStack APIs that use the keystone middleware to support OAuth 2.0 Mutual-TLS client authentication. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-system-scope-7d236ee5992d4e20.yaml0000664000175000017500000000230200000000000025364 0ustar00zuulzuul00000000000000--- features: - | [`blueprint system-scope `_] Keystone now supports the ability to assign roles to users and groups on the system. As a result, users and groups with system role assignment will be able to request system-scoped tokens. Additional logic has been added to ``keystone-manage bootstrap`` to ensure the administrator has a role on the project and system. fixes: - | [`bug 968696 `_] The work to introduce `system-scope `_ in addition to associating `scope types `_ to operations with ``oslo.policy`` will give project developers the ability to fix `bug 968696 `_. - | [`bug 1749268 `_] The ``keystone-manage bootstrap`` command now ensures that an administrator has a system role assignment. This prevents the ability for operators to lock themselves out of system-level APIs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-upgrade-checks-0dc692a392a96879.yaml0000664000175000017500000000070400000000000025551 0ustar00zuulzuul00000000000000--- features: - | [`Community Goal `_] Support has been added for developers to write pre-upgrade checks. Operators can run these checks using ``keystone-status upgrade check``. This allows operators to be more confident when upgrading their deployments by having a tool that automates programmable checks against the deployment configuration or dataset. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-url-safe-naming-ad90d6a659f5bf3c.yaml0000664000175000017500000000043600000000000026142 0ustar00zuulzuul00000000000000--- features: - > [`blueprint url-safe-naming `_] The names of projects and domains can optionally be ensured to be url safe, to support the future ability to specify projects using hierarchical naming. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bp-whitelist-extension-for-app-creds-90e5bcd7b2b78b02.yaml0000664000175000017500000000207100000000000031546 0ustar00zuulzuul00000000000000--- prelude: > [`blueprint whitelist-extension-for-app-creds `_] This release adds support for delegating fine-grained privileges to application credentials via access rules. Access rules act as a whitelist of APIs that an application credential is allowed to use. Regular RBAC is still enforced by oslo.policy. See the `API reference `_ for details. features: - | [`blueprint whitelist-extension-for-app-creds `_] This release adds support for delegating fine-grained privileges to application credentials via access rules. Access rules act as a whitelist of APIs that an application credential is allowed to use. Regular RBAC is still enforced by oslo.policy. See the `API reference `_ for details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1017606-98313bb4c1edf250.yaml0000664000175000017500000000116200000000000023776 0ustar00zuulzuul00000000000000--- other: - > [`bug 1017606 `_] The signature on the ``get_catalog`` and ``get_v3_catalog`` methods of ``keystone.catalog.backends.base.CatalogDriverBase`` have been updated. Third-party extensions that extend the abstract class (``CatalogDriverBase``) should be updated according to the new parameter names. The method signatures have changed from:: get_catalog(self, user_id, tenant_id) get_v3_catalog(self, user_id, tenant_id) to:: get_catalog(self, user_id, project_id) get_v3_catalog(self, user_id, project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1291157-00b5c714a097e84c.yaml0000664000175000017500000000047500000000000023732 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1291157 `_] Identity provider information is now validated in during token validation. If an identity provider is removed from a keystone service provider, tokens associated to that identity provider will be considered invalid. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1332058-f25e2de40411b711.yaml0000664000175000017500000000031700000000000023710 0ustar00zuulzuul00000000000000--- features: - | [`bug 1332058 `_] ``keystone-manage doctor`` now checks that keystone can establish connections to Memcached, if configured. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1473292-c21481e6aec29ec2.yaml0000664000175000017500000000202200000000000024063 0ustar00zuulzuul00000000000000--- feature: - | [`bug 1473292 `_] As trusts created by user are stored in database resulting it to grow larger as trusts that are expired and soft deleted non-expired are not automatically purged by keystone.Thus this implements TrustFlush via keystone-manage to delete expired and soft deleted non-expired trusts. Command: $ keystone-manage trust-flush [Options] Options (optional): --project-id : To purge trusts of given project-id. --trustor-user-id : To purge trusts of given trustor-id. --trustee-user-id : To purge trusts of given trustee-id. --date : To purge trusts older than date. It will purge trusts older than current if date not given. other: - > [`bug 1473292 `_] If you're relying on a custom implementation of the trust backend, please be sure to implement the new method prior to upgrading. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1490804-de58a9606edb31eb.yaml0000664000175000017500000000105700000000000024156 0ustar00zuulzuul00000000000000--- features: - > [`bug 1490804 `_] Audit IDs are included in the token revocation list. security: - > [`bug 1490804 `_] [`CVE-2015-7546 `_] A bug is fixed where an attacker could avoid token revocation when the PKI or PKIZ token provider is used. The complete remediation for this vulnerability requires the corresponding fix in the keystonemiddleware project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1519210-de76097c974f9c93.yaml0000664000175000017500000000044400000000000023757 0ustar00zuulzuul00000000000000--- features: - > [`bug 1519210 `_] A user may now opt-out of notifications by specifying a list of event types using the `notification_opt_out` option in `keystone.conf`. These events are never sent to a messaging service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1523369-4d42c841b6e7e54e.yaml0000664000175000017500000000103500000000000024020 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1523369 `_] Deleting a project will now cause it to be removed as a default project for users. If caching is enabled the changes may not be visible until the user's cache entry expires. upgrade: - > The identity backend driver interface has changed. A new method, `unset_default_project_id(project_id)`, was added to unset a user's default project ID for a given project ID. Custom backend implementations must implement this method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1524030-0814724d5c2b7c8d.yaml0000664000175000017500000000075400000000000023724 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1524030 `_] Revocation records are no longer written to the ``revocation_event`` table when a domain or project is disabled. These records were only ever used during the token validation process. In favor of revocation events, the project or domain will be validated online when the token is validated. This results in less database bloat while maintaining security during token validation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1524030-ccff6b0ec9d1cbf2.yaml0000664000175000017500000000144300000000000024355 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1524030 `_] During token validation we have reduced the number of revocation events returned, only returning a subset of events relevant to the token. Thus, improving overall token validation performance. other: - > [`bug 1524030 `_] The signature on the ``list_events`` method of ``keystone.revoke.backends.base.RevokeDriverBase`` has been updated. Third-party extensions that extend the abstract class (``RevokeDriverBase``) should update their code according to the new parameter names. The method signature has changed from:: list_events(self, last_fetch=None) to:: list_events(self, last_fetch=None, token=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1535878-change-get_project-permission-e460af1256a2c056.yaml0000664000175000017500000000053000000000000031643 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1535878 `_] Originally, to perform GET /projects/{project_id}, the provided policy files required a user to have at least project admin level of permission. They have been updated to allow it to be performed by any user who has a role on the project. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1542417-d630b7886bb0b369.yaml0000664000175000017500000000156600000000000023743 0ustar00zuulzuul00000000000000--- features: - > [`bug 1542417 `_] Added support for a `user_description_attribute` mapping to the LDAP driver configuration. upgrade: - > The LDAP driver now also maps the user description attribute after user retrieval from LDAP. If this is undesired behavior for your setup, please add `description` to the `user_attribute_ignore` LDAP driver config setting. The default mapping of the description attribute is set to `description`. Please adjust the LDAP driver config setting `user_description_attribute` if your LDAP uses a different attribute name (for instance to `displayName` in case of an AD backed LDAP). If your `user_additional_attribute_mapping` setting contains `description:description` you can remove this mapping, since this is now the default behavior. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1547684-911aed68a0d3df17.yaml0000664000175000017500000000231000000000000024073 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1651989 `_] Due to ``bug 1547684``, when using the ``policy.v3cloudsample.json`` sample file, a domain admin token was being treated as a cloud admin. Since the ``is_admin_project`` functionality only supports project-scoped tokens, we automatically set any domain scoped token to have the property ``is_admin_project`` to ``False``. [`bug 1547684 `_] A typo in the ``policy.v3cloudsample.json`` sample file was causing `oslo.policy` to not load the file. See the ``upgrades`` section for more details. upgrade: - | [`bug 1547684 `_] A minor change to the ``policy.v3cloudsample.json`` sample file was performed so the sample file loads correctly. The ``cloud_admin`` rule has changed from:: "role:admin and (token.is_admin_project:True or domain_id:admin_domain_id)" To the properly written:: "role:admin and (is_admin_project:True or domain_id:admin_domain_id)" Adjust configuration tools as necessary, see the ``fixes`` section for more details on this change. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1561054-dbe88b552a936a05.yaml0000664000175000017500000000250300000000000024007 0ustar00zuulzuul00000000000000--- prelude: > - The default token provider is now Fernet. upgrade: - > [`bug 1561054 `_] The default token provider has switched from UUID to Fernet. Please note Fernet requires a key repository to be in place prior to running Ocata, this can be done by running ``keystone-manage fernet_setup``. Additionally, for multi-node deployments, it is imperative a key distribution process be in use before upgrading. Once a key repository has been created it should be distributed to all keystone nodes in the deployment. This ensures each keystone node will be able to validate tokens issued across the deployment. If you do not wish to switch token formats, you will need to explicitly set the token provider for each node in the deployment by setting ``[token] provider`` to ``uuid`` in ``keystone.conf``. Documentation can be found at `fernet-tokens `_. critical: - > [`bug 1561054 `_] If upgrading to Fernet tokens, you must have a key repository and key distribution mechanism in place, otherwise token validation may not work. Please see the upgrade section for more details. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1563101-134df5b99ea48f00.yaml0000664000175000017500000000046500000000000024010 0ustar00zuulzuul00000000000000--- other: - > [`bug 1563101 `_] The token provider driver interface has moved from ``keystone.token.provider.Provider`` to ``keystone.token.providers.base.Provider``. If implementing a custom token provider, subclass from the new location. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1571878-1bcaea5337905af0.yaml0000664000175000017500000000041500000000000024070 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1571878 `_] A valid ``mapping_id`` is now required when creating or updating a federation protocol. If the ``mapping_id`` does not exist, a ``400 - Bad Request`` will be returned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1582585-a368ac5a252ec84f.yaml0000664000175000017500000000075400000000000024106 0ustar00zuulzuul00000000000000--- other: - > [`bug 1582585 `_] A new method ``get_domain_mapping_list`` was added to ``keystone.identity.mapping_backends.base.MappingDriverBase``. Third-party extensions that extend the abstract class (``MappingDriverBase``) should implement this new method. The method has the following signature:: get_domain_mapping_list(self, domain_id) and will return a list of mappings for a given domain ID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1590587-domain-specific-role-assignment-8f120604a6625852.yaml0000664000175000017500000000042100000000000031652 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1590587 `_] When assigning Domain Specific Roles, the domain of the role and the domain of the project must match. This is now validated and the REST call will return a 403 Forbidden. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1594482-52a5dd1d8477b694.yaml0000664000175000017500000000054500000000000023755 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1594482 `_] When using list_limit config option, the GET /services?name={service_name} API was first truncating the list and afterwards filtering by name. The API was fixed to first filter by name and only afterwards truncate the result list to the desired limit. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1611102-e1348cbec9b1110a.yaml0000664000175000017500000000046400000000000024040 0ustar00zuulzuul00000000000000 --- other: - > [`bug 1611102 `_] The methods ``list_endpoints_for_policy()`` and ``get_policy_for_endpoint()`` have been removed from the ``keystone.endpoint_policy.backends.base.EndpointPolicyDriverBase`` abstract class, they were unused.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1613466-credential-update-ec2-type-8fb51ff3ad3a449c.yaml0000664000175000017500000000066400000000000031200 0ustar00zuulzuul00000000000000fixes: - > [`bug 1613466 `_] Credentials update to ec2 type originally accepted credentials with no project ID set, this would lead to an error when trying to use such credential. This behavior has been blocked, so creating a non-ec2 credential with no project ID and updating it to ec2 without providing a project ID will fail with a `400 Bad Request` error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1615014-b30f606a2d202428.yaml0000664000175000017500000000045700000000000023627 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1615014 `_] Migration order is now strictly enforced. The ensure upgrade process is done in the order it is officially documented and support, starting with `expand`, then `migrate`, and finishing with `contract`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1616424-c46ba773f7ac40ae.yaml0000664000175000017500000000044000000000000024141 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1616424 `_] Provide better exception messages when creating OAuth request tokens and OAuth access tokens via the ``/v3/OS-OAUTH1/request_token`` and ``/v3/OS-OAUTH1/access_token`` APIs, respectively.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1622310-c501cf77437fdfa6.yaml0000664000175000017500000000127600000000000024071 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1622310 `_] Trusts will now be invalidated if: the project to which the trust is scoped, or the user (trustor or trustee) for which the delegation is assigned, has been deleted. other: - > [`bug 1622310 `_] A new method ``delete_trusts_for_project`` has been added to ``keystone.trust.backends.base.TrustDriverBase``. Third-party extensions that extend the abstract class (``TrustDriverBase``) should be updated according to the new parameter names. The signature for the new method is:: delete_trusts_for_project(self, project_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1636950-8fa1a47fce440977.yaml0000664000175000017500000000100400000000000024021 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1636950 `_] New option ``[ldap] connection_timeout`` allows a deployer to set a ``OPT_NETWORK_TIMEOUT`` value to use with the LDAP server. This allows the LDAP server to return a ``SERVER_DOWN`` exception, if the LDAP URL is incorrect or if there is a connection failure. By default, the value for ``[ldap] connection_timeout`` is -1, meaning it is disabled. Set a positive value (in seconds) to enable the option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1638603-354ee4167e6e.yaml0000664000175000017500000000066400000000000023350 0ustar00zuulzuul00000000000000--- features: - > [`bug 1638603 `_] Add support for nested groups in Active Directory. A new boolean option ``[ldap] group_ad_nesting`` has been added, it defaults to ``False``. Enable the option is using Active Directory with nested groups. This option will impact the ``list_users_in_group``, ``list_groups_for_user``, and ``check_user_in_group`` operations.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1641625-fe463874dc5edb10.yaml0000664000175000017500000000041100000000000024067 0ustar00zuulzuul00000000000000--- features: - | [`bug 1641625 `_] The keystone configured as an identity provider now includes an additional attribute called `openstack_groups` in the assertion when generating SAML assertions. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1641639-b9accc163e61ca15.yaml0000664000175000017500000000105300000000000024141 0ustar00zuulzuul00000000000000--- fixes: - | A Federated user gets an entry in the shadow-users table. This entry has a unique ID. It was generated using a UUID. This fix changes to reuse the mechanism for LDAP, where the ID is generated from the domain ID + the local id of the user (an attribute that uniquely ids the user from the IdP). This generator is specified by the configuration file. Now Both LDAP and Federated Ids are generated the same way. It also means that Federated IDs can be kept in sync between two independtent Keystone servers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1641645-516709f9da3de26f.yaml0000664000175000017500000000067700000000000024036 0ustar00zuulzuul00000000000000--- features: - | [`bug 1641645 `_] RBAC protection was removed from the `Self-service change user password` API (``/v3/user/$user_id/password``), meaning, a user can now change their password without a token specified in the ``X-Auth-Token`` header. This change will allow a user, with an expired password, to update their password without the need of an administrator. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1641654-8630ce7bcde43a7e.yaml0000664000175000017500000000165100000000000024157 0ustar00zuulzuul00000000000000--- features: - > [`bug 1641654 `_] The ``healthcheck`` middleware from `oslo.middleware` has been added to the keystone application pipelines by default. This middleware provides a common method to check the health of keystone. Refer to the example paste provided in ``keystone-paste.ini`` to see how to include the ``healthcheck`` middleware. upgrade: - | [`bug 1641654 `_] The ``healthcheck`` middleware from `oslo.middleware` has been added to the keystone application pipelines by default. The following section has been added to ``keystone-paste.ini``:: [filter:healthcheck] use = egg:oslo.middleware#healthcheck It is recommended to have the ``healthcheck`` middleware first in the pipeline:: pipeline = healthcheck cors sizelimit http_proxy_to_wsgi osprofiler ... ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1641660-f938267e1ec54071.yaml0000664000175000017500000000122500000000000023657 0ustar00zuulzuul00000000000000--- upgrade: - > [`bug 1641660 `_] The default value for ``[DEFAULT] notification_format`` has been changed from ``basic`` to ``cadf``. The CADF notifications have more information about the user that initiated the request. - > [`bug 1641660 `_] The default value for ``[DEFAULT] notification_opt_out`` has been changed to include: ``identity.authenticate.success``, ``identity.authenticate.pending`` and ``identity.authenticate.failed``. If a deployment relies on these notifications, then override the default setting. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1641816-8b39f3f73359c778.yaml0000664000175000017500000000036400000000000023706 0ustar00zuulzuul00000000000000--- features: - > [`bug 1641816 `_] The ``[token] cache_on_issue`` option is now enabled by default. This option has no effect unless global caching and token caching are enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1642212-9964dfd3af0184bd.yaml0000664000175000017500000000025000000000000024065 0ustar00zuulzuul00000000000000--- features: - Added an option ``--check`` to ``keystone-manage db_sync``, the option will allow a user to check the status of rolling upgrades in the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1642348-83d4c86ad3984d75.yaml0000664000175000017500000000036400000000000023757 0ustar00zuulzuul00000000000000--- features: - > [`bug 1642348 `_] Added new option ``[security_compliance] lockout_ignored_user_ids`` to allow deployers to specify users that are exempt from PCI lockout rules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1642457-4533f9810a8cd927.yaml0000664000175000017500000000041200000000000023662 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1642457 `_] Handle disk write and IO failures when rotating keys for Fernet tokens. Rather than creating empty keys, properly catch and log errors when unable to write to disk. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1642687-5497fb56fe86806d.yaml0000664000175000017500000000171000000000000023767 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1642687 `_] When registering an ``identity provider`` via the OS-FEDERATION API, it is now recommended to include a ``domain_id`` to associate with the ``identity provider`` in the request. Federated users that authenticate with the ``identity provider`` will now be associated with the ``domain_id`` specified. If no ``domain_id`` is specified, then a domain will be automatically created. upgrade: - > [`bug 1642687 `_] Upon a successful upgrade, all existing ``identity providers`` will now be associated with an automatically created domain. Each ``identity provider`` that existed prior to the `Ocata` release will now have a ``domain_id`` field. The new domain will have an ``id`` (random UUID), a ``name`` (that will match the ``identity provider`` ID , and be ``enabled`` by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1642687-c7ab1c9be152db20.yaml0000664000175000017500000000143300000000000024143 0ustar00zuulzuul00000000000000--- other: - | [`bug 1642687 `_] The signature on the ``create_federated_user`` method of ``keystone.identity.shadow_backends.base.ShadowUsersDriverBase`` has been updated. Third-party extensions that extend the abstract class (``ShadowUsersDriverBase``) should be updated according to the new parameter names. The method signature has changed from:: create_federated_user(self, federated_dict) to:: create_federated_user(self, domain_id, federated_dict) fixes: - | [`bug 1642687 `_] Users that authenticate with an ``identity provider`` will now have a ``domain_id`` attribute, that is associated with the ``identity provider``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1642692-d669c8fcf9e171d9.yaml0000664000175000017500000000033400000000000024124 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1642692 `_] When a `federation protocol` is deleted, all users that authenticated with the `federation protocol` will also be deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1645487-ca22c216ec26cc9b.yaml0000664000175000017500000000057600000000000024157 0ustar00zuulzuul00000000000000--- features: - > [`Bug 1645487 `_] Added a new PCI-DSS feature that will require users to immediately change their password upon first use for new users and after an administrative password reset. The new feature can be enabled by setting [security_compliance] ``change_password_upon_first_use`` to ``True``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1649138-c53974f6bb0eab14.yaml0000664000175000017500000000060300000000000024072 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1649138 `_] When using LDAP as an identity backend, the initial bind will now occur upon creation of a connection object, i.e. early on when performing LDAP queries, no matter whether the bind is authenticated or anonymous, so that any connection errors can be handled correctly and early. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1649446-efff94143823755d.yaml0000664000175000017500000000133400000000000023760 0ustar00zuulzuul00000000000000--- fixes: - | [`Bug 1649446 `_] The default policy for listing revocation events has changed. Previously, any authenticated user could list revocation events; it is now, by default, an admin or service user only function. This can be changed by modifying the policy file being used by keystone. upgrade: - | [`Related to Bug 1649446 `_] The ``identity:list_revoke_events`` rule has been changed in both sample policy files, ``policy.json`` and ``policy.v3cloudsample.json``. From:: "identity:list_revoke_events": "" To:: "identity:list_revoke_events": "rule:service_or_admin" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1649616-b835d1dac3401e8c.yaml0000664000175000017500000000027500000000000024073 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1689616 `_] Significant improvements have been made when performing a token flush on massive data sets. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1656076-c4422270f73b43b.yaml0000664000175000017500000000127000000000000023561 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1656076 `_] The various plugins under ``keystone.controllers.Auth.authenticate`` now require ``AuthContext`` objects to be returned. security: - > [`bug 1650676 `_] Authentication plugins now required ``AuthContext`` objects to be used. This has added security features to ensure information such as the ``user_id`` does not change between authentication methods being processed by the server. The ``keystone.controllers.Auth.authenticate`` method now requires the argument ``auth_context`` to be an actual ``AuthContext`` object. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1659730-17834ba2dde668ae.yaml0000664000175000017500000000104300000000000024077 0ustar00zuulzuul00000000000000--- other: - > [`bug 1659730 `_] The signature on the ``authenticate`` method of ``keystone.auth.plugins.base.AuthMethodHandler`` has been updated. Third-party extensions that extend the abstract class (``AuthMethodHandler``) should update their code according to the new parameter names. The method signature has changed from:: authenticate(self, context, auth_payload, auth_context) to:: authenticate(self, request, auth_payload, auth_context) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1659995-f3e716de743b7291.yaml0000664000175000017500000000202200000000000023757 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1659995 `_] New options have been made available via the user create and update API (``POST/PATCH /v3/users``) call, the options will allow an admin to mark users as exempt from certain PCI requirements via an API. Set the following user attributes to ``True`` or ``False`` in an API request. To mark a user as exempt from the PCI password lockout policy:: user['options']['ignore_lockout_failure_attempts'] To mark a user as exempt from the PCI password expiry policy:: user['options']['ignore_password_expiry'] To mark a user as exempt from the PCI reset policy:: user['options']['ignore_change_password_upon_first_use'] deprecations: - | [`bug 1659995 `_] The config option ``[security_compliance] password_expires_ignore_user_ids`` has been deprecated in favor of using the option value set, available via the user create and update API call././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1670382-ee851ba4f364d608.yaml0000664000175000017500000000042400000000000024016 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1670382 `_] The ldap config group_members_are_ids has been added to the whitelisted options allowing it to now be used in the domain config API and `keystone-manage domain_config_upload` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1676497-92271e25f642e2de.yaml0000664000175000017500000000031700000000000023753 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1676497 `_] `bindep` now correctly reports the `openssl-devel` binary dependency for rpm distros instead of `libssl-dev`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1684994-264fb8f182ced180.yaml0000664000175000017500000000050300000000000024030 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1684994 `_] This catches the ldap.INVALID_CREDENTIALS exception thrown when trying to connect to an LDAP backend with an invalid username or password, and emits a message back to the user instead of the default 500 error message. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1687593-95e1568291ecd70b.yaml0000664000175000017500000000034300000000000023756 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1687593 `_] Ensure that the URL used to make the request when creating OAUTH1 request tokens is also the URL that verifies the request token. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1688137-e4203c9a728690a7.yaml0000664000175000017500000000054200000000000023665 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1688137 `_] Fixed the AccountLocked exception being shown to the end user since it provides some information that could be exploited by a malicious user. The end user will now see Unauthorized instead of AccountLocked, preventing user info oracle exploitation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1696574-15a728396350a95a.yaml0000664000175000017500000000051400000000000023607 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1696574 `_] All GET APIs within keystone now have support for HEAD, if not already implemented. All new HEAD APIs have the same response codes and headers as their GET counterparts. This aids in client-side processing, especially caching. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1700852-de775d0eb2ddfdd1.yaml0000664000175000017500000000032700000000000024310 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1700852 `_] Keystone now supports caching of the `GET|HEAD /v3/users/{user_id}/projects` API in an effort to improve performance. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1701324-739a31f38037f77b.yaml0000664000175000017500000000025000000000000023647 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1701324 `_] Token bodies now contain only unique roles in the authentication response. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1702211-abb59adda73fd78e.yaml0000664000175000017500000000071200000000000024302 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1702211 `_] Password `created_at` field under some versions/deployments of MySQL would lose sub-second precision. This means that it was possible for passwords to be returned out-of-order when changed within one second (especially common in testing). This change stores password `created_at` and `expires_at` as an integer instead of as a DATETIME data-type. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1703369-9a901d627a1e0316.yaml0000664000175000017500000000112000000000000023636 0ustar00zuulzuul00000000000000--- security: - | [`bug 1703369 `_] There was a typo for the identity:get_identity_provider rule in the default ``policy.json`` file in previous releases. The default value for that rule was the same as the default value for the default rule (restricted to admin) so this typo was not readily apparent. Anyone customizing this rule should review their settings and confirm that they did not copy that typo. Particularly given that the default rule is being removed in Pike with the move of policy into code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1703666-b8a990f2bf5b62f0.yaml0000664000175000017500000000055000000000000024076 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1703666 `_] Fixing multi-region support for the templated v3 catalog by making sure that the catalog contains only one definition per endpoint, and that each region is listed under that endpoint. Previously each region and endpoint would have had its own definition. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1704205-bc0570feeb3ec5c4.yaml0000664000175000017500000000062200000000000024214 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1704205 `_] All users and groups are required to have a name. Prior to this fix, Keystone was not properly enforcing this for LDAP users and groups. Keystone will now ignore users and groups that do not have a value for the LDAP attribute which Keystone has been configured to use for that entity's name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1705485-7a1ad17b9cc99b9d.yaml0000664000175000017500000000172700000000000024174 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1705485 `_] The `change_password` protection policy can be removed from file-based policies. This policy is no longer used to protect the self-service password change API since the logic was moved into code. Note that the administrative password reset functionality is still protected via policy on the `update_user` API. fixes: - | [`bug 1705485 `_] A `previous change `_ removed policy from the self-service password API. Since a user is required to authenticate to change their password, protection via policy didn't necessarily make sense. This change removes the default policy from code, since it is no longer required or used by the service. Note that administrative password resets for users are still protected via policy through a separate endpoint. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1718747-50d39fa87bdbb12b.yaml0000664000175000017500000000154100000000000024155 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1718747 `_] Fixes a regression where deleting a domain with users in it caues a server error. This bugfix restores the previous behavior of deleting the users namespaced in the domain. This only applies when using the SQL identity backend. other: - | [`bug 1718747 `_] As part of solving a regression in the identity SQL backend that prevented domains containing users from being deleted, a notification callback was altered so that users would only be deleted if the identity backend is SQL. If you have a custom identity backend that is not read-only, deleting a domain in keystone will not delete the users in your backend unless your driver has an is_sql property that evaluates to true. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1724645-a94659dfd0f45b9a.yaml0000664000175000017500000000144600000000000024114 0ustar00zuulzuul00000000000000--- features: - | [`bug 1724645 `_] Adds a new attribute, ``remote_id_attribute``, to the federation protocol object, which allows WebSSO authentication to forward authentication requests through the right implementation for a federated protocol based on the remote ID attribute in the authentication headers. fixes: - | [`bug 1724645 `_] Fixes an issue where multiple implementations of a federation protocol, such as Shibboleth and Mellon for the SAML2.0 protocol, could not be differentiated from one another because they had to share the same globally configured remote ID attribute. Now the remote ID attribute can be set on the protocol object itself. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1727099-1af277b35db34372.yaml0000664000175000017500000000063000000000000023734 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1727099 `_] When users try to changes their password, the total number which includes the new password should not be greater or equal to the ``unique_last_password_count`` config options. But the help and error messages for this scenario are not described clearly. Now the messges are updated to be more clear. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1727726-0b47608811a2cd16.yaml0000664000175000017500000000066100000000000023650 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1727726 `_] All users and groups are required to have a name. Prior to this fix, Keystone was allowing LDAP users and groups whose name has only empty white spaces. Keystone will now ignore users and groups that do have only white spaces as value for the LDAP attribute which Keystone has been configured to use for that entity's name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1728907-bab6769ab46bd8aa.yaml0000664000175000017500000000077500000000000024250 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1728907 `_] In some rare cases, an empty key file can get created within the fernet key repository. When keystone tries to load the keys from disk, it will fail with an invalid fernet key ValueError. Keystone now handles empty key files when loading and rotating keys. If an empty file exists, it will be ignored when loaded, reported as a warning in the log, and overwritten with a valid key upon rotation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1729933-4a09201e9dface2a.yaml0000664000175000017500000000074100000000000024150 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1729933 `_] The Region Update API now correctly updates extra values. Previously adding any extra values to a region via the update API would discard any added values besides the default ones. Any extra values are now correctly added and returned. This fix was for consistency with other APIs in keystone that use 'extra' and the use of 'extra' in keystone is highly discouraged. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1733754-4d9d3042b8501ec6.yaml0000664000175000017500000000044500000000000023734 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1733754 `_] Keystone didn't validate the OS-TRUST:trust key of the authentication request is actually a dictionary. This results in a 500 Internal Server Error when it should really be a 400 Bad Request. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1734244-1b4ea83baa72566d.yaml0000664000175000017500000000031700000000000024063 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1734244 `_] Return a 400 status code instead of a 500 when creating a trust with extra attributes in the roles parameter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1735250-b60332a7f288cf94.yaml0000664000175000017500000000055500000000000023741 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1734244 `_] Users can't set password longer than 128 if Keystone using `Sqlalchemy` < 1.1.0. Update `Sqlalchemy` to a higher version can solve this problem. [`Related Sqlalchemy Changelog `_]. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1736875-c790f568c5f4d671.yaml0000664000175000017500000000065700000000000023774 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1736875 `_] Add schema check to return a 400 status code instead of a 500 when authorize a request token with non-id attributes in the `roles` parameter. other: - > Keystone now supports authorizing a request token by providing a role name. A `role` in the `roles` parameter can include either a role name or role id, but not both. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1738895-342864cd0285bc42.yaml0000664000175000017500000000041600000000000023665 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1738895 `_] Fixed the bug that federated users can't be listed by `name` filter. Now when list users by `name`, Keystone will query both local user backend and shadow user backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1740951-82b7e4bd608742ab.yaml0000664000175000017500000000052200000000000024011 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1740951 `_] A new method was added that made it so oslo.policy sample generation scripts can be used with keystone. The ``oslopolicy-policy-generator`` script will now generate a policy file containing overrides and defaults registered in code. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1744195-a7154ac2e8556efc.yaml0000664000175000017500000000044000000000000024077 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1744195 `_] The SQL Foreign Key is enabled for Keystone unit tests now. This is not an end user impact fixed. But for the downstream teams, please take care of it for your private test code changes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1746599-848a1163e52ac0a6.yaml0000664000175000017500000000027600000000000023744 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1746599 `_] Fixes user email being set for federated shadow users, when the rule contains email in user. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1747694-48c8caa4871300e3.yaml0000664000175000017500000000043400000000000023741 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1747694 `_] The trust API reference declared support for ``page`` and ``per_page`` query parameters, when the actual trust API didn't support them. The API reference has been updated accordingly.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1748027-decc2e11154b97cf.yaml0000664000175000017500000000353000000000000024152 0ustar00zuulzuul00000000000000--- features: - | [`bug 1748027 `_] The user API now supports the ``admin``, ``member``, and ``reader`` default roles across system-scope, domain-scope, and project-scope. upgrade: - | [`bug 1748027 `_] The user API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides user policies. deprecations: - | [`bug 1748027 `_] The user policies have been deprecated. The ``identity:get_user`` policy now uses ``(role:reader and system_scope:all) or (role:reader and token.domain.id:%(target.user.domain_id)s) or user_id:%(target.user.id)s`` instead of ``rule:admin_or_owner``. The ``identity:list_users`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)`` instead of ``rule:admin_required``. The ``identity:create_user``, ``identity:update_user``, and ``identity:delete_user`` policies now use ``(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)`` instead of ``rule:admin_required``. These new defaults automatically include support for a read-only role and allow for more granular access to user APIs, making it easier for system and domain administrators to delegate authorization, safely. Please consider these new defaults if your deployment overrides user policies. security: - | [`bug 1748027 `_] The user API now uses system-scope, domain-scope, project-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1748970-eb63ad2030e296f3.yaml0000664000175000017500000000061000000000000024011 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1748970 `_] A bug was introduced in Queens that resulted in system role assignments being returned when querying the role assignments API for a specific role. The issue is fixed and the list of roles returned from ``GET /v3/role_assignments?role.id={role_id}`` respects system role assignments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1749264-676ca02902bcd169.yaml0000664000175000017500000000025000000000000023732 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1749264 `_] A user's system role assignment will be removed when the user is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1749267-96153d2fa6868f67.yaml0000664000175000017500000000024600000000000023711 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1749267 `_] A group's system role assignments are removed when the group is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1750415-95ede3a9685b6e0c.yaml0000664000175000017500000000046400000000000024106 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1750415 `_] Fixes an implementation fault in application credentials where the application credential reference was not populated in the token data, causing problems with the token validation when caching was disabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1750660-e2a360ddd6790fc4.yaml0000664000175000017500000000406400000000000024074 0ustar00zuulzuul00000000000000--- features: - | [`bug 1750660 `_] The project API now supports the ``admin``, ``member``, and ``reader`` default roles across system-scope, domain-scope, and project-scope. upgrade: - | [`bug 1750660 `_] The project API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides project policies. deprecations: - | [`bug 1750660 `_] The project policies have been deprecated. The ``identity:get_project`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.project.domain_id)s) or project_id:%(target.project.id)s`` instead of ``rule:admin_required or project_id:%(target.project.id)s``. The ``identity:list_projects`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s`` instead of ``rule:admin_required``. The ``identity:list_user_projects`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s) or user_id:%(target.user.id)s`` instead of ``rule:admin_or_owner``. The ``identity:create_project`` now uses ``(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s)`` instead of ``rule:admin_required``. These new defaults automatically include support for a read-only role and allow for more granular access to project APIs, making it easier for system and domain administrators to delegate authorization, safely. Please consider these new defaults if your deployment overrides the project policies. security: - | [`bug 1750660 `_] The project API now uses system-scope, domain-scope, project-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1750669-dfce859550126f03.yaml0000664000175000017500000000524400000000000023752 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805368 `_] [`bug 1750669 `_] The system assignment API now supports the ``admin``, ``member``, and ``reader`` default roles across system-scope, domain-scope, and project-scope. The grant API now supports the ``admin``, ``member``, and ``reader`` default roles for system-scope. upgrade: - | [`bug 1805368 `_] [`bug 1750669 `_] The system assignment and grant APIs uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides system assignment policies. deprecations: - | [`bug 1805368 `_] [`bug 1750669 `_] The system assignment and grant policies have been deprecated. The ``identity:list_system_grants_for_user``, ``identity:check_system_grant_for_user``, ``identity:list_system_grants_for_group``, and ``identity:check_system_grant_for_group`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_system_grant_for_user``, ``identity:revoke_system_grant_for_user``, ``identity:create_system_grant_for_group``, and ``identity:revoke_system_grant_for_group`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:check_grant`` and ``identity:list_grants`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_grant`` and ``identity:revoke_grant`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically include support for a read-only role and allow for more granular access to the system assignment and grant APIs, making it easier for administrators to delegate authorization, safely. Please consider these new defaults if your deployment overrides the system assignment APIs. security: - | [`bug 1805368 `_] [`bug 1750669 `_] The system assignment API now uses system-scope, domain-scope, project-scope, and default roles to provide better accessibility to users in a secure way. The grant API now uses system-scope and default to provide better accessbility to operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1750673-b53f74944d767ae9.yaml0000664000175000017500000000252600000000000023764 0ustar00zuulzuul00000000000000--- features: - | [`bug 1750673 `_] The role assignment API now supports the ``admin``, ``member``, and ``reader`` default roles across system-scope, domain-scope, and project-scope. upgrade: - | [`bug 1750673 `_] The role assignment API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new policies if your deployment overrides role assignment policies. deprecations: - | [`bug 1750673 `_] The role assignment ``identity:list_role_assignments`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain.id)s)`` instead of ``rule:admin_required``. This new default automatically includes support for a read-only role and allows for more granular access to the role assignment API. Please consider this new default if your deployment overrides the role assignment policies. security: - | [`bug 1750673 `_] The role assignment API now uses system-scope, domain-scope, project-scope, and default roles to provide better accessbility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1750676-cf70c1a27b2c8de3.yaml0000664000175000017500000000343600000000000024156 0ustar00zuulzuul00000000000000--- features: - | [`bug 1750676 `_] [`bug 1818844 `_] The token API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1750676 `_] [`bug 1818844 `_] The token API uses new default policies that make it easier for system users to delegate functionality in a secure way. Please consider the new policies if your deployment overrides the token policies. deprecations: - | [`bug 1750676 `_] [`bug 1818844 `_] The ``identity:check_token`` policy now uses ``(role:reader and system_scope:all) or rule:token_subject`` instead of ``rule:admin_required or rule:token_subject``. The ``identity:validate_token`` policy now uses ``(role:reader and system_scope:all) or rule:service_role or rule:token_subject`` instead or ``rule:service_or_admin or rule:token_subject``. The ``identity:revoke_token`` policy now uses ``(role:admin and system_scope:all) or rule:token_subject`` instead of ``rule:admin_or_token_subject``. These new defaults automatically account for a read-only role by default and allow more granular access to the API. Please consider these new defaults if your deployment overrides the token policies. security: - | [`bug 1750676 `_] [`bug 1818844 `_] The token API now uses system-scope and default roles properly to provide more granular access to the token API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1750678-88a38851ca80fc64.yaml0000664000175000017500000000346300000000000023760 0ustar00zuulzuul00000000000000--- features: - | [`bug 1750678 `_] The EC2 credentials API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1750678 `_] The EC2 credentials API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides EC2 credentials consumer policies. deprecations: - | [`bug 1750678 `_] The EC2 credentials policies have been deprecated. The ``identity:ec2_get_credentials`` now use ``(role:reader and system_scope:all) or user_id:%(target.credential.user_id)s`` instead of ``rule:admin_required``and ``identity:ec2_list_credentials`` policies now use ``role:reader and system_scope:all or rule:owner`` instead of ``rule:admin_required``. The ``identity:ec2_delete_credentials`` now use ``(role:admin and system_scope:all) or user_id:%(target.credential.user_id)s`` instead of ``rule:admin_required``and ``identity:ec2_create_credentials`` policies now use ``role:admin and system_scope:all or rule:owner`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the EC2 credentials policies. security: - | [`bug 1750678 `_] The EC2 credentials API now uses system-scope and default roles to provide better accessibility to users in a secure manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1751045-f950e3fb85e2b573.yaml0000664000175000017500000000050400000000000024016 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1751045 `_] It is now possible to clean up role assignments for groups that don't exist in the identity backend. This is relevant to deployments that are backed by LDAP and groups are removed directly by LDAP and not through keystone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1753584-e052bc7805f001b4.yaml0000664000175000017500000000027500000000000023726 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1753584 `_] Fix formatting of ImportError when using a driver not found in the list of token providers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1753585-7e11213743754999.yaml0000664000175000017500000000027500000000000023462 0ustar00zuulzuul00000000000000--- fixes: - | ['bug 1753585 '_] LDAP attribute names are now matched case insensitively to comply with LDAP implementations.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1754048-correct-federated-domain-47cb889d88d7770a.yaml0000664000175000017500000000035400000000000030612 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1754048 `_] The correct user domain is now reported when validating a federated token. Previously, the domain would always be validated as "Federated."././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1754677-13ee75ed1b473f26.yaml0000664000175000017500000000056300000000000024032 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1754677 `_] When you setup a user with a role assignment on a domain and then a role assignment on a project "acting as a domain", you can't actually remove them. This fixes it by filtering the query by "type" i.e either a USER_DOMAIN or a USER_PROJECT in role assignment table.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1755874-9951f77c6d18431c.yaml0000664000175000017500000000051000000000000023675 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1755874 `_] Users now can have the resource option ``lock_password`` set which prevents the user from utilizing the self-service password change API. Valid values are ``True``, ``False``, or "None" (where ``None`` clears the option). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1756190-0e5d86d334555931.yaml0000664000175000017500000000070400000000000023603 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1756190 `_] When filtering projects based on tags, the filtering will now be performed by matching a subset containing the given tags against projects, rather than exact matching. Providing more tags when performing a search will yield more exact results while less will return any projects that match the given tags but could contain other tags as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1757022-664d0b0db1242bf8.yaml0000664000175000017500000000060200000000000023772 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1757022 `_] In previous releases, ``keystone-manage mapping_purge --type {user,group}`` command would purge all mapping incorrectly instead of only purging the specified type mappings. ``keystone-manage mapping_purge --type {user,group}`` now purges only specified type mappings as expected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1757151-43eb3baaa175f904.yaml0000664000175000017500000000050000000000000024053 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1757151 `_] More thorough documentation has been added for authorization and token scopes, which helps users and developers understand the purpose of scope and why it can be a useful tool for resource isolation and API protection. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1759289-466cdf4514de3498.yaml0000664000175000017500000000074300000000000023770 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1759289 `_] The ``keystone-manage token_flush`` command no longer establishes a connection to a database, or persistence backend. It's usage should be removed if you're using a supported non-persistent token format. If you're relying on external token providers that write tokens to disk and would like to maintain this functionality, please consider porting it to a separate tool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1760205-87dedd6d8812db3f.yaml0000664000175000017500000000107700000000000024165 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1760205 `_] When deleting a shadow user, the related cache info is not invalidated so that Keystone will raise 404 UserNotFound error when authenticating with the previous federation info. This bug has been fixed now. other: - | A new interface called `list_federated_users_info` is added to shadow backend. It's used to get the shadow user information internally. If you are maintaining any out-tree shadow backends, please implement this function for them as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1760521-fec5c88af214401f.yaml0000664000175000017500000000034000000000000024061 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1760521 `_] Fixed the bug that the result count for ``domain list`` may lack one if the config option ``list_limit`` in [resource] is set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1760809-711df870a9d67c0d.yaml0000664000175000017500000000027500000000000024026 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1760809 `_] Identity providers registered to domains will now be cleaned up when the domain is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1763824-3d2f5169af9d42f.yaml0000664000175000017500000000034200000000000023741 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1763824 `_] JSON Schema implementation ``nullable`` in keystone.common.validation now properly adds ``None`` to the enum if the enum exists. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1765193-b40318b9fb5d1c7b.yaml0000664000175000017500000000032700000000000024074 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1765193 `_] The unified limit API now exposes a deployment's configured enforcement model via the ``GET /limits/model`` endpoint. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1773967-b59517a09e0e6141.yaml0000664000175000017500000000064400000000000023670 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1773967 `_] Fixes an issue where users who had role assignments only via a group membership and not via direct assignment could create but not use application credentials. It is important to note that federated users who only have role assignments via a mapped group membership still cannot create application credentials. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1774229-cb968e95c9d81c4d.yaml0000664000175000017500000000031000000000000024114 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1774229 `_] The API reference for token management now includes more specific examples for different token scopes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1776504-keystone-conversion-to-flask-372a5654a55675c6.yaml0000664000175000017500000000223700000000000031350 0ustar00zuulzuul00000000000000--- other: - | Keystone has been fully converted to run under flask. All of the APIs are now natively dispatched under flask. Included in this change is a removal of a legacy WSGI environment data holder calld `openstack.params`. The data holder was used exclusively for communicating data down the chain under paste-deploy. The data in `openstack.params` was generally "normalized" in an odd way and unreferenced in the rest of the openstack code-base. Some minor changes to the JSON Home document occured to make it consistent with the rest of our convensions (Technically an API contract break) but required for the more strict view the Keystone flask code takes on setting up the values for JSON Home. Notably "application_credentials" now has an appropriate entry for listing and creating new app creds. JSON Body and URL Normalizing middleware were move to a flask-native model. Any middleware defined in Keystone's tree is no longer loaded via stevedore, and likewise the entry points were removed. Original WSGI Framework (custom, home-rolled, based on WEBOB) has been removed from the codebase. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1778109-ea15ce6a8207f857.yaml0000664000175000017500000000054400000000000024031 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1778109 `_] Previously the token data for a trust-scoped token may have contained duplicate roles, when implied roles were present. This is no longer the case, for the sake of accuracy and to prevent the breaking of applications which may consume this role list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1778945-b7f2db3052525ca8.yaml0000664000175000017500000000174300000000000024026 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1778945 `_] The pluggable interface for token providers has changed. If you're maintaining a custom token provider, you're going to be affected by these interface changes. Implementing the new interface will be required before using your custom token provider with the Rocky release of keystone. The new interface is more clear about the relationship and responsibilities between the token API and pluggable token providers. fixes: - | [`bug 1778945 `_] There were several improvements made to the token provider API and interface that simplify what external developers need to do and understand in order to provide their own token provider implementation. Please see the linked bug report for more details as to why these changes were made and the benefits they provide for both upstream and downstream developers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1779889-12eb5edf4cc93a1d.yaml0000664000175000017500000000032500000000000024252 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1779889 `_] Adds documentation about service tokens and configuring services to use service tokens for long running operations. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1779903-f2b22cf23a9e01f9.yaml0000664000175000017500000000026600000000000024102 0ustar00zuulzuul00000000000000--- features: - > [`bug 1779903 `_] When a project is deleted, the limits which belong to it will be deleted as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1780159-095ffa0e53be2464.yaml0000664000175000017500000000036300000000000024017 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1780159 `_] Revoke the `role` cache when creating a project. This removes the delay before making it appear in the list when a user has inherited role on it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1780503-70ca1ba3f428dd41.yaml0000664000175000017500000000057000000000000024054 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1780503 `_] The notification wrapper now sets the initiator's id to the given user id. This fixes an issue where identity.authentication event would result in the initiator id being a random default UUID, rather than the user's id when said user would authenticate against keystone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1782704-0b053eaf5d801dee.yaml0000664000175000017500000000040300000000000024137 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1782704 `_] Checking for non-existant configuration files is more robust to ensure proper logging to users when passing configuration information to ``keystone-manage``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1782922-db822fda486ac773.yaml0000664000175000017500000000076100000000000024106 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1782922 `_] Fixed the problem where Keystone indiscriminately return the first RDN as the user ID, regardless whether it matches the configured 'user_id_attribute' or not. This will break deployments where 'group_members_are_ids' are set to False and 'user_id_attribute' is not in the DN. This patch will perform a lookup by DN if the first RND does not match the configured 'user_id_attribute'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1784536-9d1d1e149c605a1d.yaml0000664000175000017500000000033700000000000024016 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1784536 `_] Keystone now return `401 Unauthorized` correctly when issuing a project-scoped token but the input project id is a domain id. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1785164-2b7ed29266eb4792.yaml0000664000175000017500000000073300000000000023754 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1785164 `_] Setting resource limits on domains is explicitly unsupported. Previously, it was possible to set a limit on a domain and the response would include the domain ID as the project ID of the limit. This issue has been corrected by explicitly opting domains out of limit support. A later release may include functionality for domains to be associated to limit resources. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1787874-13499ec227b8e26c.yaml0000664000175000017500000000122200000000000023755 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1787874 `_] The default value of the config option `unique_last_password_count` is changed from 1 to 0. Now `unique_last_password_count = 0` means password history check is disabled. `unique_last_password_count = 1` means when changing password, the new one should be different than the current one. upgrade: - | [`bug 1787874 `_] Please note that the deployment which sets `unique_last_password_count = 1` in the config file should update the value to 0 to keep the same behavior as before. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1788415-3190279e9c900f76.yaml0000664000175000017500000000244500000000000023622 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1788415 `_] [`bug 968696 `_] Policies protecting the ``/v3/credentials`` API have changed defaults in order to make the credentials API more accessible for all users and not just operators or system administrator. Please consider these updates when using this version of keystone since it could affect API behavior in your deployment, especially if you're using a customized policy file. security: - | [`bug 1788415 `_] [`bug 968696 `_] More granular policy checks have been applied to the credential API in order to make it more self-service for users. By default, end users will now have the ability to manage their credentials. fixes: - | [`bug 1788415 `_] [`bug 968696 `_] Improved self-service support has been implemented in the credential API. This means that end users have the ability to manage their own credentials as opposed to filing tickets to have deployment administrators manage credentials for users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1788694-4dc8b3ec47fc6084.yaml0000664000175000017500000000034200000000000024117 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1788694 `_] System-scoped tokens now support expanding role assignments to include implied roles in token creation and validation responses. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1789450-9dec1383ffd3de01.yaml0000664000175000017500000000053700000000000024167 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1789450 `_] When a mapped group that does not exist in keystone is found, instead of throwing a 500 error, keystone will now log the instance and continue. This is expected behavior as an external IdP may specify a group that does not exist within keystone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1792026-2de8345a89e2256b.yaml0000664000175000017500000000063300000000000023743 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1792026 `_] Formal documentation for user resource options has been added to the administrator guide and the API reference. This documentation helps describe how user options can improve user experience, namely for deployments looking to offer flexibility around PCI-DSS security requirements, among other things. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1794376-53ce14528f00f01d.yaml0000664000175000017500000000220500000000000023730 0ustar00zuulzuul00000000000000--- features: - | [`bug 1794376 `_] The domain API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1794376 `_] The domain API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides domain policies. deprecations: - | [`bug 1794376 `_] The following domain policy check strings have been deprecated in favor of more clear and concise defaults: * ``identity:get_domain`` * ``identity:list_domains`` * ``identity:create_domain`` * ``identity:update_domain`` * ``identtity:delete_domain`` Please consider these new default if your deployment overrides domain policies. security: - | [`bug 1794376 `_] The domain API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1794527-866b1caff67977f3.yaml0000664000175000017500000000202100000000000024041 0ustar00zuulzuul00000000000000--- features: - | Allow the creating of a domain with the additional, optional parameter of `explicit_domain_id` instead of auto-creating a domain_id from a uuid. When keeping two Keystone servers in sync, but avoiding Database replication, it was often necessary to hack the database to update the Domain ID so that entries match. Domain ID is then used for LDAP mapped IDs, and if they don't match, the user IDs are different. It should be possible to add a domain with an explicit ID, so that the two servers can match User IDs. The reason that the variable name is not simple `domain_id` is twofold: First to keep people from thinking that this is a required, or at least suggested field. Second, to prevent copy errors when creating a new domain, where the domain_id would be copied in from the old one, and having spurious failures, or undesirecd domain_id matching. https://specs.openstack.org/openstack/keystone-specs/specs/keystone/stein/explicit-domains-ids.html././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1794864-3116bf165a146be6.yaml0000664000175000017500000000443000000000000023737 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1794864 `_] [`bug 1794376 `_] The default policies that protect the domains API have been deprecated in favor of ones that are more secure and self-serviceable. If you're maintaining custom policies, please make sure you resolve your domain policies to work with the new default by adding the proper role assignments, or continue maintaining custom overrides. The new defaults allow for better protection of the domains API when giving the `admin` role to users on domains and projects. deprecations: - | [`bug 1794864 `_] [`bug 1794376 `_] The default policies that protect the domains API have been deprecated in favor of ones that are more secure and self-serviceable. If you're maintaining custom policies, please make sure you resolve your domain policies to work with the new default by adding the proper role assignments, or continue maintaining custom overrides. The new defaults allow for better protection of the domains API when giving the `admin` role to users on domains and projects. security: - | [`bug 1794864 `_] [`bug 1794376 `_] The default policies that protect the domains API have been deprecated in favor of ones that are more secure and self-serviceable. fixes: - | [`bug 1794864 `_] [`bug 1794376 `_] The default policies that protect the domains API have been deprecated in favor of ones that are more secure and self-serviceable. Users with roles on domains and projects are now able to call the ``GET /v3/domains/{domain_id}`` API if they use a token scoped to that domain or a token scoped to a project within that domain. System users are allowed to access the domain APIs in the same way legacy `admin` users were able to. This allows for better protection of the domain API when giving the `admin` role to users on domains and projects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1796887-eaea84e3f9a8ff9f.yaml0000664000175000017500000000050100000000000024354 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1796887 `_] Add caching on trust role validation to improve performance. Services relying heavily on trusts are impacted as the trusts are validated against the database. This adds caching on those operations to improve performance ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1801095-6e28d7a86719da74.yaml0000664000175000017500000000027300000000000023751 0ustar00zuulzuul00000000000000--- features: - > [`bug 1801095 `_] Request ID and global request ID have been added to both basic and CADF notifications. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1801873-0eb9a5ec3e801190.yaml0000664000175000017500000000034000000000000024001 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1801873 `_] This fixes an issue where an LDAP-backed domain could not be deleted due to the existence of shadow users in the SQL database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804292-0107869c7029f79e.yaml0000664000175000017500000000114000000000000023604 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1804292 `_] The region policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the region API and implementing default roles. fixes: - | [`bug 1804292 `_] The region policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804446-1a281eadbb044070.yaml0000664000175000017500000000241200000000000023763 0ustar00zuulzuul00000000000000--- features: - | [`bug 1804446 `_] The regions API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1804446 `_] The regions API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides region policies. deprecations: - | [`bug 1804446 `_] The ``identity:create_region``, ``identity:update_region``, and ``identity:delete_region`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the region policies. security: - | [`bug 1804446 `_] The regions API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804462-59ad43f98242dea0.yaml0000664000175000017500000000114300000000000024012 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1804462 `_] The service policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the service API and implementing default roles. fixes: - | [`bug 1804462 `_] The service policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804463-74537652166cf656.yaml0000664000175000017500000000273200000000000023533 0ustar00zuulzuul00000000000000--- features: - | [`bug 1804463 `_] The services API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1804463 `_] The services API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides service policies. deprecations: - | [`bug 1804463 `_] The service policies have been deprecated. The ``identity:get_service`` and ``identity:list_services`` policies now use ``(role:reader and system_scope:all)`` instead of ``rule:admin_required``. The ``identity:create_service``, ``identity:update_service``, and ``identity:delete_service`` policies now use ``(role:admin and system_scope:all)`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides service policies. security: - | [`bug 1804463 `_] The services API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804482-aa95619320d098fa.yaml0000664000175000017500000000114700000000000023736 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1804482 `_] The endpoint policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the endpoint API and implementing default roles. fixes: - | [`bug 1804482 `_] The endpoint policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804483-1d9ccfcb24f25f51.yaml0000664000175000017500000000274200000000000024156 0ustar00zuulzuul00000000000000--- features: - | [`bug 1804483 `_] The endpoint API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1804483 `_] The endpoint API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides endpoint policies. deprecations: - | [`bug 1804483 `_] The endpoint policies have been deprecated. The ``identity:list_endpoints`` and ``identity:get_endpoint`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_endpoint``, ``identity:update_endpoint``, and ``identity:delete_endpoint`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the endpoint policies. security: - | [`bug 1804483 `_] The endpoint API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804516-24b0b10ed6fe0589.yaml0000664000175000017500000000320100000000000023777 0ustar00zuulzuul00000000000000features: - | [`bug 1804516 `_] The federated identity provider API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1804516 `_] The federated identity provider API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides federated identity provider policies. deprecations: - | [`bug 1804516 `_] The federated identity provider policies have been deprecated. The ``identity:list_identity_providers`` and ``identity:get_identity_provider`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_identity_provider``, ``identity:update_identity_provider``, ``identity:delete_identity_provider`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the federated identity provider policies. security: - | [`bug 1804516 `_] The federated identity provider API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804517-a351aec088fee066.yaml0000664000175000017500000000123200000000000024066 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1804517 `_] The federated identity provider policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the identity provider API and implementing default roles. fixes: - | [`bug 1804517 `_] The federated identity provider policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804519-8384a9ead261d4c2.yaml0000664000175000017500000000116400000000000024016 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1804519 `_] The federated mapping policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the mapping API and implementing default roles. fixes: - | [`bug 1804519 `_] The federated mapping policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804520-d124599967923052.yaml0000664000175000017500000000121600000000000023440 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1804520 `_] The federated service provider policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the service provider API and implementing default roles. fixes: - | [`bug 1804520 `_] The federated service provider policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804521-3c0d9f567e8f532f.yaml0000664000175000017500000000302300000000000024020 0ustar00zuulzuul00000000000000features: - | [`bug 1804521 `_] The federated mapping API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1804521 `_] The federated mapping API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides federated mapping policies. deprecations: - | [`bug 1804521 `_] The federated mapping policies have been deprecated. The ``identity:list_mappings`` and ``identity:get_mapping`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_mapping``, ``identity:update_mapping``, and ``identity:delete_mapping`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the federated mapping policies. security: - | [`bug 1804521 `_] The federated mapping API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804522-00df902cd2d74ee3.yaml0000664000175000017500000000316300000000000024062 0ustar00zuulzuul00000000000000--- features: - | [`bug 1804522 `_] The federated service provider API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1804522 `_] The federated service provider API uses new default policies that make it more accessible to end users and administrators. Please consider these new defaults if your deployment overrides federated service provider policies. deprecations: - | [`bug 1804522 `_] The federated service provider policies have been deprecated. The ``identity:get_service_provider`` and ``identity:list_service_providers`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_service_provider``, ``identity:update_service_provider``, and ``identity:delete_service_provider`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically include support for a read-only role and allow for more granular access to service provider APIs, making it easier for system administrators to delegate authorization. Please consider these new defaults if your deployment overrides the federated service provider policies. security: - | [`bug 1804522 `_] The federated service provider API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1804523-d1768909b13b167e.yaml0000664000175000017500000000301100000000000023650 0ustar00zuulzuul00000000000000--- features: - | [`bug 1804523 `_] The federated protocol API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1804523 `_] The federated protocol API uses new default policies that make it more accessible to end users and administrators. Please consider these new defaults if your deployment overrides federated protocol policies. deprecations: - | [`bug 1804523 `_] The federated protocol policies have been deprecated. The ``identity:get_protocol`` and ``identity:list_protocols`` now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_protocol``, ``identity:update_protocol``, and ``identity:delete_protocol`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the federated protocol policies. security: - | [`bug 1804523 `_] The federated protocol API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805363-0b85d71917ad09d1.yaml0000664000175000017500000000301600000000000023730 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805363 `_] The oauth1 consumer API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805363 `_] The oauth1 consumer API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides oauth1 consumer policies. deprecations: - | [`bug 1805363 `_] The oauth1 consumer policies have been deprecated. The ``identity:get_consumer`` and ``identity:list_consumers`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_consumer``, ``identity:update_consumer``, and ``identity:delete_consumer`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the oauth1 consumer policies. security: - | [`bug 1805363 `_] The oauth1 consumer API now uses system-scope and default roles to provide better accessibility to users in a secure manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805366-670867516c6fc4bc.yaml0000664000175000017500000000351300000000000023747 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805366 `_] The Domain Config API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805366 `_] The Domain Config API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides Domain Config policies. deprecations: - | [`bug 1805366 `_] The Domain Config API policies have been deprecated. The ``identity:get_domain_config`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:get_domain_config_default`` policy now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``.The ``identity:create_domain_config`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:update_domain_config`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:delete_domain_config`` policy now uses ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the domain config policies. security: - | [`bug 1805366 `_] The domain config API now uses system-scope and default roles to provide better accessibility to users in a secure manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805368-ea32c2db2ae57225.yaml0000664000175000017500000000426100000000000024064 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805368 `_] [`bug 1750669 `_] The grant API now supports the ``admin``, ``member``, and ``reader`` default roles for domain users (e.g., domain-scoped tokens). upgrade: - | [`bug 1805368 `_] [`bug 1750669 `_] The grant APIs use new default policies that make it more accessible to domain users in a safe and secure way. Please consider these new defaults if your deployment overrides the grant APIs. deprecations: - | [`bug 1805368 `_] [`bug 1750669 `_] The grant policies have been deprecated and replaced with new policies that expose grant APIs to domain users. This allows deployments to delegate more functionality to domain owners by default. The ``identity:check_grant`` and ``identity:list_grants`` policies now use ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s) or (role:reader and domain_id:%(target.group.domain_id)s)`` instead of ``role:reader and system_scope:all``. The ``identity:create_grant`` and ``identity:revoke_grant`` policies now use ``(role:admin and system_scope:all) or (role:admin and domain_id:%(target.user.domain_id)s) or (role:admin and domain_id:%(target.group.domain_id)s)`` instead of ``role:admin and system_scope:all``. These new defaults automatically include support for domain reader and domain administrator roles, making it easier for system administrator to delegate functionality down to domain users to manage grants within their domains. Please consider these new defaults if your deployment overrides the grant APIs. security: - | [`bug 1805368 `_] [`bug 1750669 `_] The grant API now supports domain-scoped default roles to provide better accessbility grants for domain users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805369-ed98d3fcfafb5c43.yaml0000664000175000017500000000460400000000000024333 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805369 `_] The group API now supports the ``admin``, ``member``, and ``reader`` default roles. - | [`bug 1808859 `_] The group API now supports using the ``domain`` scope type for performing domain-specific actions on groups and group membership. upgrade: - | [`bug 1805369 `_] The group API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides group policies. deprecations: - | [`bug 1805369 `_] The group policies have been deprecated. The ``identity:get_group``, ``identity:list_groups``, ``identity:list_users_in_group``, and ``identity:check_user_in_group`` policies now use ``role:reader and system_scope:all or (role:reader and domain_id:%(target.group.domain_id)s)`` instead of ``rule:admin_required``. The ``identity:list_groups_for_user`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.user.domain_id)s) or or user_id:%(user_id)s`` instead of ``rule:admin_or_owner``. The ``identity:create_group``, ``identity:update_group``, ``identity:delete_group``, ``identity:remove_user_from_group``, and ``identity:add_user_to_group`` policies now use ``role:admin and system_scope:all or (role:admin and domain_id:%(target.group.domain_id)s)`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and domain-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides group policies. security: - | [`bug 1805369 `_] The group API now uses system-scope and default roles to provide better accessibility to users in a secure way. - | [`bug 1808859 `_] The group API now supports using the ``domain`` scope for the reader, member, and admin role to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805371-249c8c9b562ab371.yaml0000664000175000017500000000310000000000000023725 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805371 `_] The implied roles API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805371 `_] The implied roles API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides implied roles policies. deprecations: - | [`bug 1805371 `_] The implied roles policies have been deprecated. The ``identity:get_implied_role``, ``identity:list_implied_roles``, ``identity:list_role_inference_rules``, and ``identity:check_implied_role`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_implied_role`` and ``identity:delete_implied_role`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the implied roles policies. security: - | [`bug 1805371 `_] The implied role API now uses system-scope and default roles to provide better accessibility to users in a secure manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805372-af4ebf4b19500b72.yaml0000664000175000017500000000232200000000000024060 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805372 `_] The registered limit and limit API now support the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805372 `_] Several of the registered limit and limit policies have been deprecated. The following policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``: * ``identity:create_registered_limits`` * ``identity:update_registered_limit`` * ``identity:delete_registered_limit`` * ``identity:create_limits`` * ``identity:update_limit`` * ``identity:delete_limit`` These policies are not being formally deprecated because the unified limits API is still considered experimental. These new default automatically account for system-scope. Please consider these new defaults if your deployment overrides the registered limit or limit policies. security: - | [`bug 1805372 `_] The registered limit and limit APIs now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805400-c192be936d277ade.yaml0000664000175000017500000000301300000000000024064 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805400 `_] The domain roles API now supports system scope using the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805400 `_] The domain role API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides role policies. deprecations: - | [`bug 1805400 `_] The domain role policies have been deprecated. The ``identity:get_domain_role`` and ``identity:list_domain_roles`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_domain_role``, ``identity:update_domain_role``, and ``identity:delete_role`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the domain role policies. security: - | [`bug 1805400 `_] The domain role API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805402-75d0d93f31af620f.yaml0000664000175000017500000000266700000000000024015 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805402 `_] The role API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805402 `_] The role API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides role policies. deprecations: - | [`bug 1805402 `_] The role policies have been deprecated. The ``identity:get_role`` and ``identity:list_roles`` policies now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_role``, ``identity:update_role``, and ``identity:delete_role`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the role policies. security: - | [`bug 1805402 `_] The role API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805403-c003627a64768716.yaml0000664000175000017500000000345500000000000023516 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805403 `_] The project API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805403 `_] The project API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides project policies. deprecations: - | [`bug 1805403 `_] The project policies have been deprecated. The ``identity:get_project`` policy now uses ``(role:reader and system_scope:all) or project_id:%(target.project.id)s`` instead of ``rule:admin_required or project_id:%(target.project.id)s``. The ``identity:list_projects`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_project``, ``identity:update_project``, and ``identity:delete_project`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:list_user_projects`` policy now uses ``(role:admin and system_scope:all) or user_id:%(target.user.id)s`` instead of ``rule:admin_or_owner``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the project policies. security: - | [`bug 1805403 `_] The project API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805406-252b45d443af20b3.yaml0000664000175000017500000000402200000000000023706 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805406 `_] The user API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805406 `_] The ``GET /v3/users/{user_id`` API now properly returns an ``HTTP 403 Forbidden`` as opposed to ``HTTP 404 Not Found`` if the calling user doesn't have authorization to call the API. This applies consistent authorititive policy checks to the API. The user API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides user policies. deprecations: - | [`bug 1805406 `_] The user policies have been deprecated. The ``identity:get_user`` now uses ``(role:reader and system_scope:all) or (role:reader and token.domain.id:%(target.user.domain_id)s) or user_id:%(target.user.id)s`` instead of ``rule:admin_or_owner``. The ``identity:list_users`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.domain_id)s)`` instead of ``rule:admin_required``. The ``identity:create_user``, ``identity:update_user``, and ``identity:delete_user`` policies now use ``(role:admin and system_scope:all) or (role:admin and token.domain.id:%(target.user.domain_id)s)`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope, domain-scope, and support a read-only role, making it easier for system and domain administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the user policies. security: - | [`bug 1805406 `_] The user API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805409-8bc6cc9f1c5bc672.yaml0000664000175000017500000000707300000000000024166 0ustar00zuulzuul00000000000000--- features: - | [`bug 1805409 `_] The policy and policy associations API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1805409 `_] The policy and policy associations API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides policy and policy associations policies. deprecations: - | [`bug 1805409 `_] The policy and policy associations policies have been deprecated. The ``identity:get_policy`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:list_policies`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:update_policy`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``.The ``identity:create_policy`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:delete_policy`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:check_policy_association_for_endpoint`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:check_policy_association_for_service`` policy now uses ``role:reader and system_scope:all`` instead of ``role:reader and system_scope:all``. The ``identity:check_policy_association_for_region_and_service`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:get_policy_for_endpoint`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:list_endpoints_for_policy`` policy now use ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_policy_association_for_endpoint`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:delete_policy_association_for_endpoint`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_policy_association_for_service`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:delete_policy_association_for_service`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:create_policy_association_for_region_and_service`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:delete_policy_association_for_region_and_service`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the policy and policy associations policies. security: - | [`bug 1805409 `_] The policy and policy associations API now uses system-scope and default roles to provide better accessibility to users in a secure manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805880-0032024ea6b83563.yaml0000664000175000017500000000113500000000000023557 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1805880 `_] The limit policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the limit API and implementing default roles. fixes: - | [`bug 1805880 `_] The limit policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1805880-3fc6b30309a4370f.yaml0000664000175000017500000000117600000000000023732 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1805880 `_] The registered limit policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the registered limit API and implementing default roles. fixes: - | [`bug 1805880 `_] The registered limit policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1806713-cf5feab23fc78a23.yaml0000664000175000017500000000113400000000000024227 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1806713 `_] The role policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the role API and implementing default roles. fixes: - | [`bug 1806713 `_] The role policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1806762-08ff9eecdc03c554.yaml0000664000175000017500000000225200000000000024163 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1806762 `_] [`bug 1630434 `_] The entire ``policy.v3cloudsample.json`` file has been removed. If you were using this policy file to supply overrides in your deployment, you should consider using the defaults in code and setting ``keystone.conf [oslo_policy] enforce_scope=True``. The new policy defaults are more flexible, they're tested extensively, and they solve all the problems the ``policy.v3cloudsample.json`` file was trying to solve. fixes: - | [`bug 1806762 `_] [`bug 1630434 `_] The entire ``policy.v3cloudsample.json`` file has been removed. If you were using this policy file to supply overrides in your deployment, you should consider using the defaults in code and setting ``keystone.conf [oslo_policy] enforce_scope=True``. The new policy defaults are more flexible, they're tested extensively, and they solve all the problems the ``policy.v3cloudsample.json`` file was trying to solve. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1806762-09f414995924db23.yaml0000664000175000017500000000117700000000000023614 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1806762 `_] The user policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope, domain-scope, and project-scope into the user API and implementing default roles. fixes: - | [`bug 1806762 `_] The user policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1806762-0b7356ace200a5d3.yaml0000664000175000017500000000115200000000000023773 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1806762 `_] The grant policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope and domain-scope into the grant API and implementing default roles. fixes: - | [`bug 1806762 `_] The grant policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1806762-2092fee9f6c87dc3.yaml0000664000175000017500000000116300000000000024114 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1804462 `_] The group policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope and domain-scope into the groups API and implementing default roles. fixes: - | [`bug 1804462 `_] The group policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1806762-c3bfc71cb9bb94f3.yaml0000664000175000017500000000260100000000000024235 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1806762 `_] The domain policies defined in ``policy.v3cloudsample.json`` have been removed. These policies are now obsolete after incorporating system-scope into the domain API and implementing default roles. Additionally, the ``identity:get_domain`` policy in ``policy.v3cloudsample.json`` has been relaxed slightly to allow all users with role assignments on a domain to retrieve that domain, as opposed to only allowing users with the ``admin`` role to access that policy. All policies in ``policy.v3cloudsample.json`` that are redundant with the defaults in code have been removed. This improves maintainability and leaves the ``policy.v3cloudsample.json`` policy file with only overrides. These overrides will eventually be moved into code or new defaults in keystone directly. If you're using the policies removed from ``policy.v3cloudsample.json`` please check to see if you can migrate to the new defaults or continue maintaining the policy as an override. fixes: - | [`bug 1806762 `_] The domain policies in ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1806762-daed3e27f58f0f6d.yaml0000664000175000017500000000144200000000000024246 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1806762 `_] [`bug 1804518 `_] The protocol policies defined in the ``policy.v3cloudsample.json`` policy file have been removed. These policies are now obsolete after incorporating system-scope into the federated protocol API and implementing default roles. fixes: - | [`bug 1806762 `_] [`bug 1804518 `_] The federated protocol policies in the ``policy.v3cloudsample.json`` policy file have been removed in favor of better defaults in code. These policies weren't tested exhaustively and were misleading to users and operators. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1809116-b65502f3b606b060.yaml0000664000175000017500000000173600000000000023643 0ustar00zuulzuul00000000000000--- features: - | [`bug 1809116 `_] It is now possible to have group memberships carried over through mapping persist for a limited time after a user authenticates using federation. The "time to live" of these memberships is specified via the configuration option `[federation] default_authorization_ttl` or for each identity provider by setting `authorization_ttl` on the identity provider. Every time a user authenticates carrying over that membership, it will be renewed. security: - | If expiring user group memberships are enabled via the `[federation] default_authorization_ttl` configuration option, or on an idp by idp basis by setting `authorization_ttl`, there will be a lag between when a user is removed from a group in an identity provider, and when that will be reflected in keystone. That amount of time will be equal to the last time the user logged in + idp ttl. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1810393-5a7d379842c51d9b.yaml0000664000175000017500000000034000000000000023737 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1810393 `_] Now when an identity provider protocol is deleted, the cache info for the related federated users will be invalidated as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1811605-9d23080d7e949c25.yaml0000664000175000017500000000100000000000000023646 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1811605 `_] Fixes X.509 tokenless auth by properly populating the request context with the necessary credential information. Since Stein release, RBAC has been using the credential information from the Keystone request context instead of the authentication context. Therefore, we'll need to populate the request context with the necessary credential information from the X.509 tokenless authentication context. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1813085-cf24b204e95fd7f5.yaml0000664000175000017500000000044700000000000024104 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1813085 `] Validation of federated domain-scoped tokens scoped to the ``default`` domain no longer results in an ``HTTP 404 Domain Not Found`` due to byte string conversion issues with msgpack in python 3. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1814589-f3e7f554bee1c317.yaml0000664000175000017500000000035200000000000024107 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1814589 `_] Fixes incorrect parameters passed into keystone.federation.utils.transform_to_group_ids() which resulted in HTTP 500 internal error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1815771-ae0e4118c552f01e.yaml0000664000175000017500000000043400000000000024002 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1815771 `_] Allows operators to cache credentials to avoid lookups on the database. This operation can be turned on/off through the configuration parameter of keystone.conf [credential] caching. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1816076-ba39508e6ade529e.yaml0000664000175000017500000000115100000000000024100 0ustar00zuulzuul00000000000000--- features: - | `GET /v3/users/{user_id}` now returns a federated object associated with the user if any. `POST /v3/users` allows an operator to add a list of federated objects to associate with the user. `PATCH /v3/users` allows the operator to update a users associated federated objects. upgrade: - | If you have a custom implementation for the shadow users backend, you will need to implement the new methods: ``delete_federated_object``, ``create_federated_object``, ``get_federated_objects``. These methods are needed to support federated attributes via the user API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1816927-e17f4e596e611380.yaml0000664000175000017500000000057000000000000023673 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1816927 `_] It was discovered that the order in which fernet keys are distributed after fernet key rotation has impact on keystone service. All operators are advised to ensure that during fernet key distribution the new primary fernet key (with largest number) is distributed first. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1817313-c11481e6eed29ec2.yaml0000664000175000017500000000047600000000000024076 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1817313 `_] Raise METHOD NOT ALLOWED for OS-Federation protocols creation if the protocol_id is not in the URL. The corrective action was to split the LIST from CRUD resources so that the routing regexes can work as expected.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1818725-96d698e22e648764.yaml0000664000175000017500000000407400000000000023637 0ustar00zuulzuul00000000000000--- features: - | [`bug 1818725 `_] [`bug 1750615 `_] The application credential API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1818725 `_] [`bug 1750615 `_] The application credential API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides application credential policies. deprecations: - | [`bug 1818725 `_] [`bug 1750615 `_] The application credential policies have been deprecated. The ``identity:get_application_credential`` policy now uses ``(role:reader and system_scope:all) or user_id:%(user_id)s`` instead of ``rule:admin_required or user_id:%(user_id)s``. The ``identity:list_application_credentials`` policy now uses ``(role:reader and system_scope:all) or user_id:%(user_id)s`` instead of ``rule:admin_required or user_id:%(user_id)s``. The ``identity:delete_application_credential`` policy now use ``(role:admin and system_scope:all) or user_id:%(user_id)s`` instead of ``rule:admin_required or user_id:%(user_id)s``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the application credential policies. security: - | [`bug 1818725 `_] [`bug 1750615 `_] The application credential API now uses system-scope and default roles to provide better accessibility to users in a secure manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1818734-d753bfae60ffd030.yaml0000664000175000017500000000534500000000000024160 0ustar00zuulzuul00000000000000--- features: - | [`bug 1818734 `_] The endpoint groups API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1818734 `_] The endpoint groups API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides endpoint groups policies. deprecations: - | [`bug 1818734 `_] The endpoint groups policies have been deprecated. The ``identity:list_endpoint_groups`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:get_endpoint_group`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:update_endpoint_group`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``.The ``identity:create_endpoint_group`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:delete_endpoint_group`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:list_projects_associated_with_endpoint_group`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:get_endpoint_group_in_project`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:list_endpoints_associated_with_endpoint_group`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:list_endpoint_groups_for_project`` policy now uses ``role:reader and system_scope:all`` instead of ``rule:admin_required``. The ``identity:add_endpoint_group_to_project`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. The ``identity:remove_endpoint_group_from_project`` policy now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the endpoint group policies. security: - | [`bug 1818734 `_] The endpoint group API now uses system-scope and default roles to provide better accessibility to users in a secure manner.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1818736-98ea186a074056f4.yaml0000664000175000017500000000150300000000000023667 0ustar00zuulzuul00000000000000--- features: - | [`bug 1818736 `_] The ``identity:get_limit``, ``identity:list_limits`` and ``identity:get_limit_model`` policies now support domain scope, so domain users are now able to get limit information on their own domains as well as see the limit model in effect. upgrade: - | [`bug 1818736 `_] The ``identity:get_limit`` policy default check string has been changed to support domain scope. This policy are not being formally deprecated because the unified limits API is still considered experimental. These new default automatically account for domain scope in addition to system scope. Please consider these new defaults if your deployment overrides the limit policies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1818845-05f8c3af5ea9abc7.yaml0000664000175000017500000000064300000000000024246 0ustar00zuulzuul00000000000000--- deprecations: - | [`bug 1818845 `_] The ``identity:revocation_list`` policy has been deprecated for removal. This policy didn't actually protect the revocation list API since that API is unenforced and unprotected. It only returns an ``HTTP 410`` or ``HTTP 403`` depending on how keystone is configured. This policy can be safely removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1818846-d1a8c77d20659ad6.yaml0000664000175000017500000000410400000000000024026 0ustar00zuulzuul00000000000000--- features: - | [`bug 1818846 `_] The trusts API now supports the ``admin``, ``member``, and ``reader`` default roles. System users can now audit and clean up trusts using the default policies. upgrade: - | [`bug 1818846 `_] [`bug 1818850 `_] The trusts API uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides trust policies. deprecations: - | [`bug 1818846 `_] [`bug 1818850 `_] The trust policies have been deprecated. The ``identity:list_trusts`` policy now uses ``(role:reader and system_scope:all)`` instead of ``rule_admin_required``. The ``identity:list_roles_for_trust``, ``identity:get_role_for_trust``, and ``identity:get_trust`` policies now use ``(role:reader and system_scope:all) or user_id:%(target.trust.trustor_user_id)s or user_id:%(target.trust.trustee_user_id)s`` instead of``user_id:%(target.trust.trustor_user_id)s or user_id:%(target.trust.trustee_user_id)s``. The ``identity:delete_trust`` policy now uses ``(role:admin and system_scope:all) or user_id:%(target.trust.trustor_user_id)s`` instead of ``user_id:%(target.trust.trustor_user_id)s``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides trust policies. security: - | [`bug 1818846 `_] [`bug 1818850 `_] The trusts API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1819036-e2d24655c70d0aad.yaml0000664000175000017500000000070200000000000024057 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1819036 `_] Middleware that processes requests in front of keystone now caches tokens per request, eliminating unnecessary round trips to validate tokens on every request. This change doesn't require the usage of any configuration options to take effect. The fix for this bug improved performance ~20% during testing and impacts most of keystone's API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1820333-356dcc8bf9f73fed.yaml0000664000175000017500000000151700000000000024247 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1798184 `_] [`bug 1820333 `_] In Python 3, python-ldap no longer allows bytes for some fields (DNs, RDNs, attribute names, queries). Instead, text values are represented as str, the Unicode text type. Compatibility support is provided for Python 2 by setting bytes_mode=False [1]. The keystone LDAP backend is updated to adhere to this behavior by using bytes_mode=False for Python 2 and dropping UTF-8 encoding and decoding fields that are now represented as text in python-ldap. [1] More details about byte/str usage in python-ldap can be found at: http://www.python-ldap.org/en/latest/bytes_mode.html#bytes-mode Note that at a minimum python-ldappool 2.3.1 is required. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1823258-9649b56a440b5ae1.yaml0000664000175000017500000000074700000000000023744 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1823258 `_] The ``keystone-manage bootstrap`` command now defaults to making the default roles (`admin`, `member`, and `reader`) immutable. This has the consequence that if the bootstrap command is re-run on an existing deployment, those roles will become immutable if they were not before. To opt out of this behavior, add the ``--no-immutable-roles`` flag to the bootstrap command. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1823258-9f93dbdc0fa8441d.yaml0000664000175000017500000000114600000000000024162 0ustar00zuulzuul00000000000000--- features: - | [`bug 1823258 `_] Adds support for an "immutable" resource option for roles, which when enabled prevents accidental harmful modification or deletion of roles. Also adds a new flag ``--immutable-roles`` to the ``keystone-manage bootstrap`` command to make the default roles (admin, member, and reader) immutable by default, as well as a check in the ``keystone-status upgrade check`` command to check that these roles have been made immutable. In a future release, these three roles will be immutable by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1827431-2f078c13dfc9a02a.yaml0000664000175000017500000000057300000000000024066 0ustar00zuulzuul00000000000000--- features: - | [`bug 1827431 `_] Added a new user option 'ignore_user_inactivity' (defaults to False). When set to True, it overrides disabling the user after being inactive for certain time as set in ``[security_compliance]disable_user_account_days_inactive`` option in Keystone configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1831918-c70cf87ef086d871.yaml0000664000175000017500000000021000000000000024026 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1831918 `_] Credentials now logs cadf audit messages. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1832265-cb76ccf505c2d9d1.yaml0000664000175000017500000000045200000000000024152 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1832265 `_] Binary msgpack payload types are now consistently and correctly decoded when running Keystone under Python 3, avoiding any TypeErrors when attempting to convert binary encoded strings into UUID's. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1833739-f962e8caf3e22068.yaml0000664000175000017500000000070700000000000024035 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1833739 `_] Fix PostgreSQL specifc issue with storing encrypted credentials. In Python 3 the psycopg2 module treats bytes strings as binary data. This causes issues when storing encrypted credentials in the Database. To fix this isseu the credentials sql backend is updated to encode the credential into a text string before handing it over to the database. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1836568-66d853a1f22c5530.yaml0000664000175000017500000000103400000000000023656 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1836568 `_ Addresses a side effect of the large series of policy migrations in which the volume of deprecation warnings that were emitted had become too massive to be helpful. Instead of emitting warnings for individual policy rules, the keystone server now emits a single warning indicating problematic rules were found. Operators can use oslopolicy-policy-generator and oslopolicy-policy-upgrade to find and resolve deprecated policies. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1839133-24570c9fbacb530d.yaml0000664000175000017500000000025000000000000024060 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1839133 `_] Makes user_enabled_emulation_use_group_config honor group_members_are_ids. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1839577-1226d86ea0744055.yaml0000664000175000017500000000042300000000000023602 0ustar00zuulzuul00000000000000--- features: - > [`bug 1839577 `_] TOTP now allows by default the code from the previous time window to be considered valid as part of auth. This can be disabled, or the extended up to ten previous windows.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1840291-35af1ac7ba06e166.yaml0000664000175000017500000000034500000000000024060 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1840291 `_] Adds retries for ``delete_credential_for_user`` method to avoid DBDeadlocks when deleting large number of credentials concurrently. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1841486-425f367925f5e03f.yaml0000664000175000017500000000041700000000000023673 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1841486 `_] The ``keystone-manage mapping_engine --engine-debug`` CLI tool now outputs useful information about the direct mappings from an assertion after processing mapping rules. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1843609-8498b132222596b7.yaml0000664000175000017500000000066600000000000023536 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1843609 `] Fixed an issue where system-scoped tokens couldn't be used to list users and groups (e.g., GET /v3/users or GET /v3/groups) if ``keystone.conf [identity] domain_specific_drivers_enabled=True`` and the API would return an ``HTTP 401 Unauthorized``. These APIs now recognize system-scoped tokens when using domain-specific drivers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1844157-7808af9bcea0429d.yaml0000664000175000017500000000115100000000000024100 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1844157 `_] When performing `keystone-manage db_sync --check` if the legacy repo started at the same version number as the expand/contract/migrate repos the check to see if the db was under version control failed indicating that the db was up-to-date. This was due to the function `get_init_version` never receiving the path for the repo queried for version information. The fix is to ensure the repo path is always passed to get_init_version from the `keystone.common.sql.upgrade.get_db_version` function. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1844194-48ae60db49f91bd4.yaml0000664000175000017500000000415100000000000024104 0ustar00zuulzuul00000000000000--- features: - | [`bug 1844194 `_] [`bug 1844193 `_] The project tags API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1844194 `_] [`bug 1844193 `_] The project tags API now uses new default policies that make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides the project tags policies. deprecations: - | [`bug 1844194 `_] [`bug 1844193 `_] The project tags API policies have been deprecated. The ``identity:get_project_tag`` and ``identity:list_project_tags`` policies now use ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.project.domain_id)s) or project_id:%(target.project.id)s`` instead of ``rule:admin_required or project_id:%(target.project.id)s``. The ``identity:update_project_tags``, ``identity:delete_project_tags``, ``identity:delete_project_tag``, and ``identity:create_project_tag`` policies now use ``(role:admin and system_scope:all) or (role:admin and domain_id:%(target.project.domain_id)s) or (role:admin and project_id:%(target.project.id)s)`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility with compromising security. Please consider these new defaults if your deployment overrides the project tag policies. security: - | [`bug 1844194 `_] [`bug 1844193 `_] The project tags API now uses system-scope and default roles to provide better accessibility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1844207-x27a31f3403xfd7y.yaml0000664000175000017500000000043500000000000024163 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1844207 `_] Fixes an issue with WebSSO auth where a server error was raised if a remote ID can't be found for the requested federation protocol, now correctly raises an Unauthorized client error. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1844461-08a8bdc5f613b88d.yaml0000664000175000017500000000277200000000000024110 0ustar00zuulzuul00000000000000--- features: - | [`bug 1844461 `_] Listing role assignments for a project subtree is now allowed by system readers and domain readers in addition to project admins. upgrade: - | [`bug 1844461 `_] The ``identity:list_role_assignments_for_subtree`` policy now allows system and domain readers to list role assignments for a project subtree and deprecates the old ``rule:admin_required`` policy check string. Please consider the new policies if your deployment overrides role assignment policies. deprecations: - | [`bug 1844461 `_] The role assignment ``identity:list_role_assignments_for_subtree`` policy now uses ``(role:reader and system_scope:all) or (role:reader and domain_id:%(target.project.domain_id)s) or (role:admin and project_id:%(target.project.id)s)`` instead of ``rule:admin_required``. This new default automatically includes support for a read-only role and allows for more granular access to the role assignment API. Please consider this new default if your deployment overrides the role assignment policies. security: - | [`bug 1844461 `_] Listing role assignments for a project subtree now uses system-scope, domain-scope, project-scope, and default roles to provide better accessbility to users in a secure way. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1844664-905cf6cad2e032a7.yaml0000664000175000017500000000342100000000000024071 0ustar00zuulzuul00000000000000--- features: - | [`bug 1844664 `_] The Project Endpoints API now supports the ``admin``, ``member``, and ``reader`` default roles. upgrade: - | [`bug 1844664 `_] The Project Endpoints API uses new default policies to make it more accessible to end users and administrators in a secure way. Please consider these new defaults if your deployment overrides Project Endpoints policies. deprecations: - | [`bug 1844664 `_] The Project Endpoints policies have been deprecated. The ``identity:list_projects_for_endpoint`` now use ``(role:reader and system_scope:all)`` ``identity:check_endpoint_in_project`` policies now use ``role:reader and system_scope:all`` and ``identity:list_endpoints_for_project`` now use ``(role:reader and system_scope:all)`` instead of ``rule:admin_required``. The ``identity:add_endpoint_to_project`` now use ``(role:admin and system_scope:all)`` instead of ``rule:admin_required``and ``identity:remove_endpoint_from_project`` policies now use ``role:admin and system_scope:all`` instead of ``rule:admin_required``. These new defaults automatically account for system-scope and support a read-only role, making it easier for system administrators to delegate subsets of responsibility without compromising security. Please consider these new defaults if your deployment overrides the Project Endpoints policies. security: - | [`bug 1844664 `_] The Project Endpoints API now uses system-scope and default roles to provide better accessibility to users in a secure manner. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1848238-f6533644f7907358.yaml0000664000175000017500000000036600000000000023550 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1848238 `_] Allow deleting a domain when using the ldap driver for a domain. There was an attempt to delete the group on the ldap whereas this one is read-only. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1848342-317c9e4afa65a3ff.yaml0000664000175000017500000000203600000000000024161 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1848342 `_] There was an inconsistency in the ephemeral user update flow. Every time a federated user logged in, keystone created an entry in the local_user table instead of just updating the entries in the user and federated_user tables, which caused duplicate entries when listing users. Now, the keystone will not create the entry in the local_user table while updating an ephemeral user. If you are affected by this bug, a fix in the keystone database will be needed so we recommend to dump the users' tables before doing this process: mysql db example: - mysqldump -h -p -P -u keystone keystone federated_user local_user user > user_tables.sql - mysql -h -D keystone -p -P -u keystone -e 'delete from local_user where user_id in (select user_id from federated_user);' SQL: - delete from local_user where user_id in (select user_id from federated_user); ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1855080-08b28181b7cb2470.yaml0000664000175000017500000000237000000000000023645 0ustar00zuulzuul00000000000000--- critical: - | [`bug 1855080 `_] An error in the policy target filtering inadvertently allowed any user to list any credential object with the /v3/credentials API when ``[oslo_policy]/enforce_scope`` was set to false, which is the default. This has been addressed: users with non-admin roles on a project may not list other users' credentials. However, users with the admin role on a project may still list any users credentials when ``[oslo_policy]/enforce_scope`` is false due to `bug 968696 `_. security: - | [`bug 1855080 `_] An error in the policy target filtering inadvertently allowed any user to list any credential object with the /v3/credentials API when ``[oslo_policy]/enforce_scope`` was set to false, which is the default. This has been addressed: users with non-admin roles on a project may not list other users' credentials. However, users with the admin role on a project may still list any users credentials when ``[oslo_policy]/enforce_scope`` is false due to `bug 968696 `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1856881-277103af343187f1.yaml0000664000175000017500000000037700000000000023607 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1856881 `_] ``keystone-manage bootstrap`` can be run in upgrade scenarios where pre-existing domain-specific roles exist named ``admin``, ``member``, and ``reader``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1856904-101af15bb48eb3ca.yaml0000664000175000017500000000071100000000000024134 0ustar00zuulzuul00000000000000--- fixes: - | [`Bug 1856904 `_] The initiator object for CADF notifications now will always contain the username for the user who initated the action. Previously, the initator object only contained the user_id, which lead to issues mapping to users when using LDAP-backed identity providers. This also helps the initiator object better conform to the OpenStack standard for CADF. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1856962-2c87d541da61c727.yaml0000664000175000017500000000031100000000000023740 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1856962 `_] Fixes an issue where federated users could not authenticate if their mapped group membership was empty. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1858012-584267ada7e33f2c.yaml0000664000175000017500000000044700000000000024020 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1858012 `_] Fixes a bug in the /v3/role_assignments filtering where the `role.id` query parameter didn't properly filter role assignments by role in cases where there were multiple system role assignments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1872732-7261816d0b170008.yaml0000664000175000017500000000035600000000000023506 0ustar00zuulzuul00000000000000--- features: - > [`bug 1872732 `_] 'user_limit' is added to config file of credentials that allows user to set maximum number of credentials a user is permitted to create. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1872733-2377f456a57ad32c.yaml0000664000175000017500000000125200000000000023740 0ustar00zuulzuul00000000000000--- critical: - | [`bug 1872733 `_] Fixed a critical security issue in which an authenticated user could escalate their privileges by altering a valid EC2 credential. security: - | [`bug 1872733 `_] Fixed a critical security issue in which an authenticated user could escalate their privileges by altering a valid EC2 credential. fixes: - | [`bug 1872733 `_] Fixed a critical security issue in which an authenticated user could escalate their privileges by altering a valid EC2 credential. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1872735-0989e51d2248ce1e.yaml0000664000175000017500000000327000000000000023751 0ustar00zuulzuul00000000000000--- critical: - | [`bug 1872735 `_] Fixed a security issue in which a trustee or an application credential user could create an EC2 credential or an application credential that would permit them to get a token that elevated their role assignments beyond the subset delegated to them in the trust or application credential. A new attribute ``app_cred_id`` is now automatically added to the access blob of an EC2 credential and the role list in the trust or application credential is respected. security: - | [`bug 1872735 `_] Fixed a security issue in which a trustee or an application credential user could create an EC2 credential or an application credential that would permit them to get a token that elevated their role assignments beyond the subset delegated to them in the trust or application credential. A new attribute ``app_cred_id`` is now automatically added to the access blob of an EC2 credential and the role list in the trust or application credential is respected. fixes: - | [`bug 1872735 `_] Fixed a security issue in which a trustee or an application credential user could create an EC2 credential or an application credential that would permit them to get a token that elevated their role assignments beyond the subset delegated to them in the trust or application credential. A new attribute ``app_cred_id`` is now automatically added to the access blob of an EC2 credential and the role list in the trust or application credential is respected. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1872737-f8e1ad3b6705b766.yaml0000664000175000017500000000245400000000000024034 0ustar00zuulzuul00000000000000--- feature: - | [`bug 1872737 `_] Added a new config option ``auth_ttl`` in the ``[credential]`` config section to allow configuring the period for which a signed token request from AWS is valid. The default is 15 minutes in accordance with the AWS Signature V4 API reference. upgrade: - | [`bug 1872737 `_] Added a default TTL of 15 minutes for signed EC2 credential requests, where previously an EC2 signed token request was valid indefinitely. This change in behavior is needed to protect against replay attacks. security: - | [`bug 1872737 `_] Fixed an incorrect EC2 token validation implementation in which the timestamp of the signed request was ignored, which made EC2 and S3 token requests vulnerable to replay attacks. The default TTL is 15 minutes but is configurable. fixes: - | [`bug 1872737 `_] Fixed an incorrect EC2 token validation implementation in which the timestamp of the signed request was ignored, which made EC2 and S3 token requests vulnerable to replay attacks. The default TTL is 15 minutes but is configurable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1872753-e2a934eac919ccde.yaml0000664000175000017500000000055100000000000024244 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1872753 `_] Added validation to the EC2 credential API to prevent altering the ``access_id`` field in the blob attribute. This prevents accidentally orphaning an EC2 credential resource when an altered ``access_id`` no longer resolves to the credential's resource ID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1872755-2c81d3267b89f124.yaml0000664000175000017500000000207300000000000023670 0ustar00zuulzuul00000000000000--- security: - | [`bug 1872755 `_] Added validation to the EC2 credentials update API to ensure the metadata labels 'trust_id' and 'app_cred_id' are not altered by the user. These labels are used by keystone to determine the scope allowed by the credential, and altering these automatic labels could enable an EC2 credential holder to elevate their access beyond what is permitted by the application credential or trust that was used to create the EC2 credential. fixes: - | [`bug 1872755 `_] Added validation to the EC2 credentials update API to ensure the metadata labels 'trust_id' and 'app_cred_id' are not altered by the user. These labels are used by keystone to determine the scope allowed by the credential, and altering these automatic labels could enable an EC2 credential holder to elevate their access beyond what is permitted by the application credential or trust that was used to create the EC2 credential. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1873290-ff7f8e4cee15b75a.yaml0000664000175000017500000000212100000000000024245 0ustar00zuulzuul00000000000000--- security: - | [`bug 1873290 `_] [`bug 1872735 `_] Fixed the token model to respect the roles authorized OAuth1 access tokens. Previously, the list of roles authorized for an OAuth1 access token were ignored, so when an access token was used to request a keystone token, the keystone token would contain every role assignment the creator had for the project. This also fixed EC2 credentials to respect those roles as well. fixes: - | [`bug 1873290 `_] [`bug 1872735 `_] Fixed the token model to respect the roles authorized OAuth1 access tokens. Previously, the list of roles authorized for an OAuth1 access token were ignored, so when an access token was used to request a keystone token, the keystone token would contain every role assignment the creator had for the project. This also fixed EC2 credentials to respect those roles as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1878938-70ee2af6fdf66004.yaml0000664000175000017500000000140000000000000024106 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1878938 `_] Previously when a user used to have system role assignment and tries to delete the same role, the system role assignments still existed in system_assignment table. This causes keystone to return `HTTP 404 Not Found` errors when listing role assignments with names (e.g., `--names` or `?include_names`). If you are affected by this bug, you must remove stale role assignments manually. The following is an example SQL statement you can use to fix the issue, but you should verify it's applicability to your deployment's SQL implementation and version. SQL: - delete from system_assignment where role_id not in (select id from role); ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1880252-51036d5353125e15.yaml0000664000175000017500000000056500000000000023505 0ustar00zuulzuul00000000000000--- features: - | Mappings can now specify "whitelist" and "blacklist" conditionals as regular expressions. Prior, only "not_any_of" and "any_one_of" conditionals supported regular expression matching. fixes: - | [`bug 1880252 `_] Regexes are not allowed in "whitelist" and "blacklist" conditionals ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1885753-51df25f3ff1d9ae8.yaml0000664000175000017500000000035700000000000024201 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1885753 `_] Keystone's SQL identity backend now retries update user requests to safely handle stale data when two clients update a user at the same time. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1886017-bc2ad648d57101a2.yaml0000664000175000017500000000023300000000000023777 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1886017 `_] JWT validation now supports `allow_expired` query parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1889936-78d6853b5212b8f1.yaml0000664000175000017500000000024000000000000023675 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1889936 `_] Properly decode octet strings, or byte arrays, returned from LDAP. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1896125-b17a4d12730fe493.yaml0000664000175000017500000000037500000000000023737 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1896125 `_] Introduced more robust connection handling for asynchronous LDAP requests to address memory leaks fetching data from LDAP backends with low page sizes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1897280-e7065c4368a325ad.yaml0000664000175000017500000000045300000000000023743 0ustar00zuulzuul00000000000000--- fixes: - | [ `Bug 1897230 `_] Allows s3 tokens with service types sts and iam to authenticate. This is necessary when using assumed role features of Ceph object storage and keystone is providing the authentication service for Rados Gateway. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1901207-13762f85b8a04481.yaml0000664000175000017500000000047200000000000023570 0ustar00zuulzuul00000000000000--- security: - | [`bug 1901207 `_] Policy enforcement for application credentials has been updated to protect against invalid ownership checks resulting in unauthorized users being able to get and delete application credentials for other users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1901654-69b9f35d11cd0c75.yaml0000664000175000017500000000123100000000000024012 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1901654 `_] Previously, generate_public_ID() in sha256.py assumed the passed arguments is str data type. However, python-ldap 3.0 or later returns bytes data type for attribute values except fields of distinguished names, relative distinguished names, attribute names, queries. If keystone running on Python3 is integrated with LDAP and the LDAP server has local_id variable in its attribute, user login operations will fail due to the assumption and modifiation of python-ldap. By this fix, generate_public_ID() properly handles bytes data type in the parameter. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1926483-a77ab887e0e7f5c9.yaml0000664000175000017500000000037700000000000024130 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1926483 `_] Keystone will only log warnings about token length for Fernet tokens when the token length exceeds the value of `keystone.conf [DEFAULT] max_token_size`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1929066-6e741c9182620a37.yaml0000664000175000017500000000037700000000000023610 0ustar00zuulzuul00000000000000--- upgrade: - | [`bug 1929066 `_] Increase the length of the `local_id` column in the `id_mapping` table to accommodate LDAP group names that result in names greater than 64 characters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1941020-cleanup-541a2d372a1cf4cd.yaml0000664000175000017500000000033700000000000025473 0ustar00zuulzuul00000000000000--- upgrade: - | The following deprecated options in the ``[memcache]`` section have been removed. - ``dead_retry`` - ``pool_maxsize`` - ``pool_unused_timeout`` - ``pool_connection_get_timeout`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1941020-f694395a9bcea72f.yaml0000664000175000017500000000053100000000000024075 0ustar00zuulzuul00000000000000--- deprecations: - | The following options in the ``[memcache]`` section have been deprecated because these options have had no effect since Pike. Please use ``memcache_*`` options in the ``[cache]`` section instead. - ``dead_retry`` - ``pool_maxsize`` - ``pool_unused_timeout`` - ``pool_connection_get_timeout`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-1951632-11272e49e2fa439d.yaml0000664000175000017500000000075700000000000023744 0ustar00zuulzuul00000000000000--- features: - | [`bug 1951632 `_] ``Support has been added for deploying `service` role during the bootstrap process in addition to the `admin`, `member` and `reader` role.`` upgrades: - | ``If the bootstrap process is re-run, and a `service` role already exists, it does not recreate the `service` role. See [`bug 1951632 `_] for more details.`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug-2074018-28f7bbe8f28f5efe.yaml0000664000175000017500000000261500000000000024254 0ustar00zuulzuul00000000000000--- features: - | Added a new command to the admin cli tool: `keystone-manage reset_last_active`. This new command updates the database to overwritet any NULL values in `last_active_at` in the user table to the current time. This is a necessary step to fix Bug #2074018. See launchpad for details. fixes: - | Fixed Bug #2074018: Changed the user model to always save the date of the last user activity in `last_active_at`. Previous to this change, the `last_active_at` field was only updated when the option for `[security_compliance] disable_user_account_days_inactive` was set. If your deployment is affected by this bug, you must run `keystone-manage reset_last_active` before setting the `disable_user_account_days_inactive` option. security: - | The new `keystone-manage rest_last_active` command resets all NULL values in `last_active_at` in the user table to help fix Bug #2074018. Running this command may be necessary in environments that have been deployed for a long time and later decide to adopt the `[security_compliance disable_user_account_days_inactive = X` option. See Bug #2074018 for details. A side-effect of this command is that it resets the amount of time that an unused account is active for. Unused accounts will remain active until the configured days have elapsed since the day the command is run. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug1828565-0790c4c60ba34100.yaml0000664000175000017500000000032200000000000023555 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1828565 `_] Fixes endpoint group listing by name. This allows the openstackclient command to search endpoint groups by name. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug_1526462-df9a3f3974d9040f.yaml0000664000175000017500000000027500000000000024112 0ustar00zuulzuul00000000000000--- features: - > [`bug 1526462 `_] Support for posixGroups with OpenDirectory and UNIX when using the LDAP identity driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug_1543048_and_1668503-7ead4e15faaab778.yaml0000664000175000017500000000450700000000000026057 0ustar00zuulzuul00000000000000--- features: - | * [`bug 1543048 `_] [`bug 1668503 `_] Keystone now supports multiple forms of password hashing. Notably bcrypt, scrypt, and pbkdf2_sha512. The options are now located in the `[identity]` section of the configuration file. To set the algorithm use `[identity] password_hash_algorithm`. To set the number of rounds (time-complexity, and memory-use in the case of scrypt) use `[identity] password_hash_rounds`. `scrypt` and `pbkdf2_sha512` have further tuning options available. Keystone now defaults to using `bcrypt` as the hashing algorithm. All passwords will continue to function with the old sha512_crypt hash, but new password hashes will be bcrypt. upgrade: - | * If performing rolling upgrades, set `[identity] rolling_upgrade_password_hash_compat` to `True`. This will instruct keystone to continue to hash passwords in a manner that older (pre Pike release) keystones can still verify passwords. Once all upgrades are complete, ensure this option is set back to `False`. deprecations: - | * `[DEFAULT] crypt_strength` is deprecated in favor of `[identity] password_hash_rounds`. Note that `[DEFAULT] crypt_strength` is still used when `[identity] rolling_upgrade_password_hash_compat` is set to `True`. security: - | * The use of `sha512_crypt` is considered inadequate for password hashing in an application like Keystone. The use of bcrypt or scrypt is recommended to ensure protection against password cracking utilities if the hashes are exposed. This is due to Time-Complexity requirements for computing the hashes in light of modern hardware (CPU, GPU, ASIC, FPGA, etc). Keystone has moved to bcrypt as a default and no longer hashes new passwords (and password changes) with sha512_crypt. It is recommended passwords be changed after upgrade to Pike. The risk of password hash exposure is limited, but for the best possible protection against cracking the hash it is recommended passwords be changed after upgrade. The password change will then result in a more secure hash (bcrypt by default) being used to store the password in the DB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug_1674415-e8a7345aa2b05ab7.yaml0000664000175000017500000000037400000000000024147 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1674415 `_] Fixed issue with translation of keystone error messages which was not happening in case of any error messages from identity API with locale being set. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug_1688188-256e3572295231a1.yaml0000664000175000017500000000042700000000000023607 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1688188 `_] When creating an IdP, if a domain was generated for it and a conflict was raised while effectively creating the IdP in the database, the auto-generated domain is now cleaned up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/bug_1698900-f195125bf341d887.yaml0000664000175000017500000000055400000000000023756 0ustar00zuulzuul00000000000000fixes: - | [`bug 1698900 `_] The implementation for checking database state during an upgrade with the use of `keystone-manage db_sync --check` has been corrected. This allows users and automation to determine what step is next in a rolling upgrade based on logging and command status codes. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/catalog-caching-12f2532cfb71325a.yaml0000664000175000017500000000046400000000000025320 0ustar00zuulzuul00000000000000--- features: - > [`bug 1489061 `_] Caching has been added to catalog retrieval on a per user ID and project ID basis. This affects both the v2 and v3 APIs. As a result this should provide a performance benefit to fernet-based deployments. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/catalog_project_id-519f5a70f9f7c4c6.yaml0000664000175000017500000000053000000000000026235 0ustar00zuulzuul00000000000000--- deprecations: - Use of ``$(tenant_id)s`` in the catalog endpoints is deprecated in favor of ``$(project_id)s``. features: - Keystone supports ``$(project_id)s`` in the catalog. It works the same as ``$(tenant_id)s``. Use of ``$(tenant_id)s`` is deprecated and catalog endpoints should be updated to use ``$(project_id)s``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/change_min_pool_retry_max-f5e7c8d315401426.yaml0000664000175000017500000000027700000000000027466 0ustar00zuulzuul00000000000000--- fixes: - | Change the min value of pool_retry_max to 1. Setting this value to 0 caused the pool to fail before connecting to ldap, always raising MaxConnectionReachedError. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/convert-keystone-to-flask-80d980e239b662b0.yaml0000664000175000017500000000527100000000000027311 0ustar00zuulzuul00000000000000--- prelude: > Keystone has historically used a custom rolled WSGI framework based loosely on [`webob `_] which was in turn loaded by the [`pythonpaste library `_]. The Keystone team has been planning to move away from the home-rolled solution and to a common framework for a number of release cycles. As of the Rocky release Keystone is moving to the ``Flask`` framework. upgrade: - > Keystone no longer is loaded via ``paste.deploy`` and instead directly loads the ``Flask`` based application. If a deployment is relying on the entry-point generated wsgi files, it is important to get the newest ones. These new files have minor changes to support the new loading mechanisms. The files will be auto-generated via ``PBR`` and setup. The ``paste.ini`` file will now be ignored, but will remain on disk until the ``Stein`` release to ensure deployment tools are not inadvertently broken. The ``paste.ini`` file will have a comment added to indicate it is ignored. - > With the change to not load via ``paste.deploy`` it is no longer possible to remove any elements from the pipeline that keystone relies on. This includes former extensions (``S3``, ``EC2``) or middleware. If these APIs must be disabled, it is recommended to utilize policy to deny access. - > With the change to not load via ``paste.deploy`` it is no longer possible to inject custom middleware into the pipeline directly, it is recommended to wrap the entire stack if custom middleware is needed outside of what Keystone relies on. It is also possible to change/modify requests and responses via a smart proxy layer (e.g. ``HAProxy``). security: - | It is no longer possible to, via the ``paste.ini`` file to inject middleware into the running keystone application. This reduces the attack surface area. While this is not a huge reduction in surface area, it is one less potential place that malicious code could be loaded. Malicious middleware historically could collect information and/or modify the requests and responses from Keystone. other: - | [`#openstack-tc IRC log `_] With Technical Comittee consensus the Keystone team is not wiring up the reminents of the V2.0 API that was maintained strictly due to a failure to copy/paste a direct deprecation notice on the controllers even though the V2.0 API was deprecated in its entirety. This should have no meaningful impact on any user as the APIs (``ec2token``) have a v3 equivalent ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecate-endpoint-policy-cfg-option-d018acab72a398a0.yaml0000664000175000017500000000044300000000000031563 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] Deprecate the ``enabled`` option from ``[endpoint_policy]``, it will be removed in the 'O' release, and the extension will always be enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecate-json-formatted-policy-file-95f6307f88358f58.yaml0000664000175000017500000000176000000000000031326 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Wallaby cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecate-memcache-token-persistence-eac88c80147ea241.yaml0000664000175000017500000000045000000000000031535 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The token memcache and memcache_pool persistence backends have been deprecated in favor of using Fernet tokens (which require no persistence). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecate-policies-api-b104fbd1d2367b1b.yaml0000664000175000017500000000016100000000000026752 0ustar00zuulzuul00000000000000--- deprecations: - | The ``policies`` API is deprecated. Keystone is not a policy management service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecate-templated-catalog-driver-f811a6040abdc4a8.yaml0000664000175000017500000000017200000000000031261 0ustar00zuulzuul00000000000000--- deprecations: - | The templated catalog driver has been deprecated and will be removed in a future release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecate-v2-apis-894284c17be881d2.yaml0000664000175000017500000000107700000000000025501 0ustar00zuulzuul00000000000000--- prelude: > Deprecated all v2.0 APIs. Most v2.0 APIs will be removed in the 'Q' release. However, the authentication APIs and EC2 APIs are indefinitely deprecated. deprecations: - > [`blueprint deprecated-as-of-mitaka `_] Deprecated all v2.0 APIs. The keystone team recommends using v3 APIs instead. Most v2.0 APIs will be removed in the 'Q' release. However, the authentication APIs and EC2 APIs are indefinitely deprecated and will not be removed in the 'Q' release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-mitaka-8534e43fa40c1d09.yaml0000664000175000017500000000331600000000000026673 0ustar00zuulzuul00000000000000--- prelude: > Deprecated the PKI and PKIz token formats. They will be removed in the 'O' release. deprecations: - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the PKI and PKIz token formats have been deprecated. They will be removed in the 'O' release. Due to this change, the `hash_algorithm` option in the `[token]` section of the configuration file has also been deprecated. Also due to this change, the ``keystone-manage pki_setup`` command has been deprecated as well. - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, write support for the LDAP driver of the Identity backend has been deprecated. This includes the following operations: create user, create group, delete user, delete group, update user, update group, add user to group, and remove user from group. These operations will be removed in the 'O' release. - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the auth plugin `keystone.auth.plugins.saml2.Saml2` has been deprecated. It is recommended to use `keystone.auth.plugins.mapped.Mapped` instead. The ``saml2`` plugin will be removed in the 'O' release. - > [`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the simple_cert_extension is deprecated since it is only used in support of the PKI and PKIz token formats. It will be removed in the 'O' release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-newton-be1d8dbcc6bdc68f.yaml0000664000175000017500000000057100000000000027406 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-newton `_] As of the Newton release, the class plugin `keystone.common.kvs.core.KeyValueStore` has been deprecated. It is recommended to use alternative backends instead. The ``KeyValueStore`` class will be removed in the 'P' release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-ocata-a5b2f1e3e39f818e.yaml0000664000175000017500000000202100000000000026656 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-ocata `_] The catalog backend ``endpoint_filter.sql`` has been deprecated in the `Ocata` release, it has been consolidated with the ``sql`` backend. It is recommended to replace the ``endpoint_filter.sql`` catalog backend with the ``sql`` backend. The ``endpoint_filter.sql`` backend will be removed in the `Pike` release. - > [`blueprint deprecated-as-of-ocata `_] Various KVS backends and config options have been deprecated and will be removed in the `Pike` release. This includes: * ``keystone.common.kvs.backends.inmemdb.MemoryBackend`` * ``keystone.common.kvs.backends.memcached.MemcachedBackend`` * ``keystone.token.persistence.backends.kvs.Token`` * all config options under ``[kvs]`` in `keystone.conf` * the config option ``[memcached] servers`` in `keystone.conf` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-pike-506f9aca91674550.yaml0000664000175000017500000000074100000000000026304 0ustar00zuulzuul00000000000000--- deprecations: - | * UUID token provider ``[token] provider=uuid`` has been deprecated in favor of Fernet tokens ``[token] provider=fernet``. With Fernet tokens becoming the default UUID tokens can be slated for removal in the R release. This also deprecates token-bind support as it was never implemented for fernet. * Token persistence driver/code (SQL) is deprecated with this patch since it is only used by the UUID token provider.. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-queens-8ad7f826e4f08f57.yaml0000664000175000017500000000220600000000000027031 0ustar00zuulzuul00000000000000--- deprecations: - > The ``/OS-FEDERATION/projects`` and ``/OS-FEDERATION/domains`` APIs are deprecated in favor of the ``/v3/auth/projects`` and ``/v3/auth/domains`` APIs. These APIs were originally marked as deprecated during the Juno release cycle, but we never deprecated using ``versionutils`` from oslo. More information regarding this deprecation can be found in the `patch `_ that proposed the deprecation. - > [`bug 1728690 `_] The ``member_role_id`` and ``member_role_name`` config options were used to create a default member role for keystone v2 role assignments, but with the removal of the v2 API it is no longer necessary to create this default role. This option is deprecated and will be removed in the S release. If you are depending on having a predictable role name and ID for this member role you will need to update your tooling. - > The ``enabled`` config option of the ``trust`` feature is deprecated and will be removed in the next release. Trusts will then always be enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-rocky-60b2fa05d07d3a28.yaml0000664000175000017500000000107400000000000026622 0ustar00zuulzuul00000000000000--- deprecations: - > The option ``[token] infer_roles=False`` is being deprecated in favor of always expanding role implications during token validation. `Default roles `_ depend on a chain of implied role assignments, ex: an admin user will also have the reader and member role. Therefore by ensuring that all these roles will always appear on the token validation response, we can improve the simplicity and readability of policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-stein-0166965502cb3be2.yaml0000664000175000017500000000040100000000000026454 0ustar00zuulzuul00000000000000--- deprecations: - > The commandline options `standard-threads, `pydev-debug-host` and `pydev-debug-port` are only used by Keystone eventlet model in Newton release before. They are deprecated now and will be removed in the next release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-as-of-train-de3fe41ff2251385.yaml0000664000175000017500000000040300000000000026622 0ustar00zuulzuul00000000000000--- deprecations: - | [`bug 1829454 `_] The `[federation] federated_domain_name` option is deprecated. All users live in the identity provider's domain now, and the option is no longer used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/deprecated-socket_timeout-option-d3358b4f2310706c.yaml0000664000175000017500000000116400000000000030702 0ustar00zuulzuul00000000000000--- deprecations: - | The socket timeout configuration option for memcache of Keystone's definition isn't actually used anywhere [0], it would appear to be a broken knob. In fact oslo.cache has a duplicate option that appears to be used instead [1]. We can deprecate the keystone-specific option and point people to the oslo.cache option. [0] https://opendev.org/openstack/keystone/src/commit/a0aa21c237f7b42077fc945f157844deb77be5ef/keystone/conf/memcache.py#L26-L32 [1] https://opendev.org/openstack/oslo.cache/src/commit/a5023ba2754dd537c802d4a59290ff6378bd6285/oslo_cache/_opts.py#L85-L89 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/domain-level-limit-support-60e1e330d06227ed.yaml0000664000175000017500000000100300000000000027506 0ustar00zuulzuul00000000000000--- features: - | [`blueprint domain-level-limit `_] Keystone now supports domain level unified limit. When creating a limit, users can specify a ``domain_id`` instead of ``project_id``. For `flat` model, the domain limit is still non-hierarchical. For `strict-two-level` model, the domain limit is now considered as the first level, so that the project limit is the second level and the project can't contain any child. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/domain-manager-persona-7921587ce2fab4fd.yaml0000664000175000017500000000121200000000000027015 0ustar00zuulzuul00000000000000--- features: - > [`bug 2045974 `_] The Domain Manager Persona has been added. This makes identity-related self-service capabilities for users within domains possible without requiring the 'admin' role. Assigning the 'manager' role to users in domain scope now allows them to manage projects, groups, users and role assignments within the domain. This is subject to the following restriction: the roles that domain managers can assign and revoke are limited by a new ``domain_managed_target_role`` policy rule which defaults to 'reader', 'member' and 'manager'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/dont-enforce-get-s3tokens-ec2tokens-62b90b199e8075d8.yaml0000664000175000017500000000033700000000000031067 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 2052916 `_] Fixed a bug where a HTTP GET request against ``/v3/s3tokens`` or ``/v3/ec2tokens`` would return HTTP 500 instead of HTTP 405. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/drop-project-id-fk-b683b414e1585be8.yaml0000664000175000017500000000063700000000000025740 0ustar00zuulzuul00000000000000--- upgrade: - | The foreign key constraint between the ``user.domain_id`` column and the ``project.id`` column and between the ``identity_provider.domain_id`` column and the ``project.id`` column will be dropped upon running the keystone db_sync contraction step. These constraints are enforced in code and do not need to be enforced by the database. This should have no impact on users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/drop-python-3-6-and-3-7-dc90b86cedced92b.yaml0000664000175000017500000000020100000000000026535 0ustar00zuulzuul00000000000000--- upgrade: - | Python 3.6 & 3.7 support has been dropped. The minimum version of Python now supported is Python 3.8. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/enable-filter-idp-d0135f4615178cfc.yaml0000664000175000017500000000053000000000000025605 0ustar00zuulzuul00000000000000--- features: - > [`bug 1525317 `_] Enable filtering of identity providers based on `id`, and `enabled` attributes. - > [`bug 1555830 `_] Enable filtering of service providers based on `id`, and `enabled` attributes.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/enable-inherit-on-default-54ac435230261a6a.yaml0000664000175000017500000000067500000000000027156 0ustar00zuulzuul00000000000000--- upgrade: - > The default setting for the `os_inherit` configuration option is changed to True. If it is required to continue with this portion of the API disabled, then override the default setting by explicitly specifying the os_inherit option as False. deprecations: - The `os_inherit` configuration option is disabled. In the future, this option will be removed and this portion of the API will be always enabled. ././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=keystone-26.0.0/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb6.yaml 22 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/endpoints-from-endpoint_group-project-association-7271fba600322fb0000664000175000017500000000045000000000000033236 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1516469 `_] Endpoints filtered by endpoint_group project association will be included in the service catalog when a project scoped token is issued and ``endpoint_filter.sql`` is used for the catalog driver. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/eventlet-cleanup-f35fc5f83c16ea1c.yaml0000664000175000017500000000140600000000000026022 0ustar00zuulzuul00000000000000--- upgrade: - | The following options have been removed. - ``[eventlet_server] public_bind_host`` - ``[eventlet_server] public_bind_port`` - ``[eventlet_server] public_admin_host`` - ``[eventlet_server] public_admin_port`` - | The following command line options have been removed. These options were used by Keystone eventlet model which was removed in Newton release. - ``standard-threads`` - ``pydev-debug-host`` - ``pydev-debug-port`` - | Keystone no longer substitute the following string interpolations in catalog information. Replace string interpolations by hard-coded strings before upgrade. - ``public_bind_host`` - ``public_bind_port`` - ``public_admin_host`` - ``public_admin_port`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/extensions-to-core-a0d270d216d47276.yaml0000664000175000017500000000256300000000000026011 0ustar00zuulzuul00000000000000--- upgrade: - > The `keystone-paste.ini` file must be updated to remove extension filters, and their use in ``[pipeline:api_v3]``. Remove the following filters: ``[filter:oauth1_extension]``, ``[filter:federation_extension]``, ``[filter:endpoint_filter_extension]``, and ``[filter:revoke_extension]``. See the sample `keystone-paste.ini `_ file for guidance. - > The `keystone-paste.ini` file must be updated to remove extension filters, and their use in ``[pipeline:public_api]`` and ``[pipeline:admin_api]`` pipelines. Remove the following filters: ``[filter:user_crud_extension]``, ``[filter:crud_extension]``. See the sample `keystone-paste.ini `_ file for guidance. other: - > [`blueprint move-extensions `_] If any extension migrations are run, for example: ``keystone-manage db_sync --extension endpoint_policy`` an error will be returned. This is working as designed. To run these migrations simply run: ``keystone-manage db_sync``. The complete list of affected extensions are: ``oauth1``, ``federation``, ``endpoint_filter``, ``endpoint_policy``, and ``revoke``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/federation-group-ids-mapping-6c56120d65a5cb22.yaml0000664000175000017500000000045400000000000027775 0ustar00zuulzuul00000000000000--- features: - > [`blueprint federation-group-ids-mapped-without-domain-reference `_] Enhanced the federation mapping engine to allow for group IDs to be referenced without a domain ID. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/filter-mappings-by-entity-77162a146d375385.yaml0000664000175000017500000000075600000000000027146 0ustar00zuulzuul00000000000000--- upgrade: - | As a performance improvement, the base mapping driver's method ``get_domain_mapping_list`` now accepts an optional named argument ``entity_type`` that can be used to get the mappings for a given entity type only. As this new call signature is already used in the ``identity.core`` module, authors/maintainers of out-of-tree custom mapping drivers are expected to update their implementations of ``get_domain_mapping_list`` method accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/fix_application_credentials_implied_roles-b445fa56cb335a4d.yaml0000664000175000017500000000016600000000000033117 0ustar00zuulzuul00000000000000--- fixes: - | Application credentials will also include all implied by the user roles upon their creation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/httpd-keystone-d51b7335559b09c8.yaml0000664000175000017500000000046400000000000025242 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-mitaka `_] The file ``httpd/keystone.py`` has been deprecated in favor of ``keystone-wsgi-admin`` and ``keystone-wsgi-public`` and may be removed in the 'O' release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/identity_driver_new_change_password_method-e8c0e06795bca2d8.yaml0000664000175000017500000000040300000000000033332 0ustar00zuulzuul00000000000000--- upgrade: - The identity backend driver interface has changed. We've added a new ``change_password()`` method for self service password changes. If you have a custom implementation for the identity driver, you will need to implement this new method. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/immutable-resource-options-bug-1807751-acc1e3c689484337.yaml0000664000175000017500000000071700000000000031243 0ustar00zuulzuul00000000000000--- features: - > [`bug 1807751 `_] Keystone now implements the scaffolding for resource options in projects and roles. Functionally new options (such as "immutable" flags) will appear in returned JSON under the `options` field (dict) returned in the project, domain, and role structures. The `options` field will be empty until resource options are implemented for project, domain, and role. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/impl-templated-catalog-1d8f6333726b34f8.yaml0000664000175000017500000000063000000000000026602 0ustar00zuulzuul00000000000000--- other: - > [`bug 1367113 `_] The "get entity" and "list entities" functionality for the KVS catalog backend has been reimplemented to use the data from the catalog template. Previously this would only act on temporary data that was created at runtime. The create, update and delete entity functionality now raises an exception. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/implied-roles-026f401adc0f7fb6.yaml0000664000175000017500000000120100000000000025212 0ustar00zuulzuul00000000000000--- features: - > [`blueprint implied-roles `_] Keystone now supports creating implied roles. Role inference rules can now be added to indicate when the assignment of one role implies the assignment of another. The rules are of the form `prior_role` implies `implied_role`. At token generation time, user/group assignments of roles that have implied roles will be expanded to also include such roles in the token. The expansion of implied roles is controlled by the `prohibited_implied_role` option in the `[assignment]` section of `keystone.conf`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/implied-roles-stable-8b293e187c5620ad.yaml0000664000175000017500000000053000000000000026342 0ustar00zuulzuul00000000000000--- other: - | The `implied roles API `_ has been marked as stable. This API was originally implemented in Mitaka and marked as experimental. There haven't been any backwards incompatible updates since then. As a result, the API is being marked as stable. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/improve-driver-donfiguration-ecedaf6ad0c3f9d2.yaml0000664000175000017500000000060200000000000030601 0ustar00zuulzuul00000000000000--- features: - | Improve configuration management for the out-of-tree identity drivers. When driver implements a special method it is being invoked before instantiating the driver when reading configuration from the database. Also 2 new `domain_config` section configuration options are added to allow such driver specific parameters to be managed using the API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/insecure_reponse-2a168230709bc8e7.yaml0000664000175000017500000000047200000000000025620 0ustar00zuulzuul00000000000000--- upgrade: - A new config option, `insecure_debug`, is added to control whether debug information is returned to clients. This used to be controlled by the `debug` option. If you'd like to return extra information to clients set the value to ``true``. This extra information may help an attacker. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/integrate-osprofiler-ad0e16a542b12899.yaml0000664000175000017500000000111300000000000026461 0ustar00zuulzuul00000000000000--- features: - OSprofiler support was added. This cross-project profiling library allows to trace various requests through all OpenStack services that support it. To initiate OpenStack request tracing `--profile ` option needs to be added to the CLI command. Configuration and usage details can be found in [`OSProfiler documentation `_] upgrade: - OSprofiler support was introduced. To allow its usage the keystone-paste.ini file needs to be modified to contain osprofiler middleware. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/is-admin-24b34238c83b3a82.yaml0000664000175000017500000000140700000000000023742 0ustar00zuulzuul00000000000000--- features: - > [`bug 96869 `_] A pair of configuration options have been added to the ``[resource]`` section to specify a special ``admin`` project: ``admin_project_domain_name`` and ``admin_project_name``. If these are defined, any scoped token issued for that project will have an additional identifier ``is_admin_project`` added to the token. This identifier can then be checked by the policy rules in the policy files of the services when evaluating access control policy for an API. Keystone does not yet support the ability for a project acting as a domain to be the admin project. That will be added once the rest of the code for projects acting as domains is merged. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/ldap-conn-pool-enabled-90df94652f1ded53.yaml0000664000175000017500000000051100000000000026625 0ustar00zuulzuul00000000000000--- upgrade: - > The configuration options for LDAP connection pooling, `[ldap] use_pool` and `[ldap] use_auth_pool`, are now both enabled by default. Only deployments using LDAP drivers are affected. Additional configuration options are available in the `[ldap]` section to tune connection pool size, etc. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/ldap-emulation-91c4d535eb9c3d10.yaml0000664000175000017500000000055600000000000025323 0ustar00zuulzuul00000000000000--- features: - > [`bug 1515302 `_] Two new configuration options have been added to the `[ldap]` section. `user_enabled_emulation_use_group_config` and `project_enabled_emulation_use_group_config`, which allow deployers to choose if they want to override the default group LDAP schema option. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/limits-api-refactor-05abf9e6c2e75852.yaml0000664000175000017500000000106700000000000026273 0ustar00zuulzuul00000000000000--- fixes: - | [`bug 1754184 `_] The unified limit APIs has been refactored to align with the following API-WG guidelines: 1. POST unified limits no longer returns all the limits during create operations. It now only returns the newly created limits. 2. Support for updating multiple limits in a single request has been removed by implementing PATCH instead of PUT. Please note that the unified limits APIs is still experimental making it possible to include these improvements. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/list_limit-ldap-support-5d31d51466fc49a6.yaml0000664000175000017500000000025200000000000027131 0ustar00zuulzuul00000000000000--- features: - > [`bug 1501698 `_] Support parameter `list_limit` when LDAP is used as identity backend. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/list_role_assignment_names-33aedc1e521230b6.yaml0000664000175000017500000000043300000000000027772 0ustar00zuulzuul00000000000000--- features: - > [`bug 1479569 `_] Names have been added to list role assignments (GET /role_assignments?include_names=True), rather than returning just the internal IDs of the objects the names are also returned. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/mapping_populate-521d92445505b8a3.yaml0000664000175000017500000000107400000000000025527 0ustar00zuulzuul00000000000000--- prelude: > Add ``keystone-manage mapping_populate`` command, which should be used when domain-specific LDAP backend is used. features: - Add ``keystone-manage mapping_populate`` command. This command will pre-populate a mapping table with all users from LDAP, in order to improve future query performance. It should be used when an LDAP is first configured, or after calling ``keystone-manage mapping_purge``, before any queries related to the domain are made. For more information see ``keystone-manage mapping_populate --help`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/max-password-length-truncation-and-warning-bd69090315ec18a7.yaml0000664000175000017500000000072700000000000032626 0ustar00zuulzuul00000000000000--- security: - | Passwords will now be automatically truncated if the max_password_length is greater than the allowed length for the selected password hashing algorithm. Currently only bcrypt has fixed allowed lengths defined which is 54 characters. A warning will be generated in the log if a password is truncated. This will not affect existing passwords, however only the first 54 characters of existing bcrypt passwords will be validated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/migration_squash-f655329ddad7fc2a.yaml0000664000175000017500000000023100000000000026122 0ustar00zuulzuul00000000000000--- upgrade: - > [`bug 1541092 `_] Only database upgrades from Kilo and newer are supported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/no-default-domain-2161ada44bf7a3f7.yaml0000664000175000017500000000047000000000000025761 0ustar00zuulzuul00000000000000--- other: - > ``keystone-manage db_sync`` will no longer create the Default domain. This domain is used as the domain for any users created using the legacy v2.0 API. A default domain is created by ``keystone-manage bootstrap`` and when a user or project is created using the legacy v2.0 API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/notify-on-user-group-membership-8c0136ee0484e255.yaml0000664000175000017500000000035700000000000030433 0ustar00zuulzuul00000000000000--- fixes: - Support has now been added to send notification events on user/group membership. When a user is added or removed from a group a notification will be sent including the identifiers of both the user and the group. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/oauth1-headers-content-type-9a9245d9bbec8f8e.yaml0000664000175000017500000000035700000000000030035 0ustar00zuulzuul00000000000000--- other: - > The response's content type for creating request token or access token is changed to `application/x-www-form-urlencoded`, the old value `application/x-www-urlformencoded` is invalid and will no longer be used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/oslo.cache-a9ce47bfa8809efa.yaml0000664000175000017500000000136600000000000024664 0ustar00zuulzuul00000000000000--- upgrade: - > Keystone now uses oslo.cache. Update the `[cache]` section of `keystone.conf` to point to oslo.cache backends: ``oslo_cache.memcache_pool`` or ``oslo_cache.mongo``. Refer to the sample configuration file for examples. See `oslo.cache `_ for additional documentation. deprecations: - > [`blueprint deprecated-as-of-mitaka `_] ``keystone.common.cache.backends.memcache_pool``, ``keystone.common.cache.backends.mongo``, and ``keystone.common.cache.backends.noop`` are deprecated in favor of oslo.cache backends. The keystone backends will be removed in the 'O' release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/password-created_at-nullable-b3c284be50d93ef5.yaml0000664000175000017500000000036200000000000030217 0ustar00zuulzuul00000000000000--- upgrade: - Fixes a bug related to the password create date. If you deployed master during Newton development, the password create date may be reset. This would only be apparent if you have security compliance features enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/policy_new_federated_projects_for_user-dcd7bd148efef049.yaml0000664000175000017500000000057600000000000032635 0ustar00zuulzuul00000000000000--- upgrade: - In the policy.json file, we changed `identity:list_projects_for_groups` to `identity:list_projects_for_user`. Likewise, we changed `identity:list_domains_for_groups` to `identity:list_domains_for_user`. If you have customized the policy.json file, you will need to make these changes. This was done to better support new features around federation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/pre-cache-tokens-73450934918af26b.yaml0000664000175000017500000000043100000000000025321 0ustar00zuulzuul00000000000000--- prelude: > Tokens can now be cached when issued. features: - Add ``cache_on_issue`` flag to ``[token]`` section that enables placing issued tokens to validation cache thus reducing the first validation time as if token is already validated and token data cached. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/project-tags-1e72a6779d9d02c5.yaml0000664000175000017500000000135600000000000024744 0ustar00zuulzuul00000000000000--- features: - | [`blueprint project-tags `_] Projects have a new property called tags. These tags are simple strings that can be used to allow projects to be filtered/searched. Project tags will have the following properties: * Tags are case sensitive * '/' and ',' are not allowed to be in a tag * Each project can have up to 100 tags * Each tag can be up to 255 characters See `Project Tags `_ Project tags are implemented following the guidelines set by the `API Working Group `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/projects_as_domains-3ea8a58b4c2965e1.yaml0000664000175000017500000000051000000000000026433 0ustar00zuulzuul00000000000000--- features: - Domains are now represented as top level projects with the attribute `is_domain` set to true. Such projects will appear as parents for any previous top level projects. Projects acting as domains can be created, read, updated, and deleted via either the project API or the domain API (V3 only). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/python3-support-e4189e0a1a6e2e4f.yaml0000664000175000017500000000017400000000000025604 0ustar00zuulzuul00000000000000--- other: - Keystone now supports being run under Python 3. The Python 3 and Python 3.4 classifiers have been added. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/randomize_urls-c0c19f48b2bfa299.yaml0000664000175000017500000000027600000000000025532 0ustar00zuulzuul00000000000000--- features: - | A new option 'randomize_urls' can be used to randomize the order in which keystone connects to the LDAP servers in [ldap] 'url' list. It is false by default. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/remove-db_sync-extension-opt-2ab1f29340281215.yaml0000664000175000017500000000031000000000000027666 0ustar00zuulzuul00000000000000--- upgrade: - | The ``--extension`` option of ``keystone-manage db_sync`` has been deprecated since 10.0.0 (Newton) and raised an error when provided. It has now been removed entirely. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/remove-legacy-migrations-647f60019c8dd9e8.yaml0000664000175000017500000000042300000000000027255 0ustar00zuulzuul00000000000000--- upgrade: - | The legacy migrations that existed before the split into separate expand schema, contract schema, and data migration migration have now been removed. These have been deprecated since 10.0.0 (Newton). This should have no user-facing impact. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/remove-sqlalchemy-migrate-a4fa47685c7e28c6.yaml0000664000175000017500000000024600000000000027506 0ustar00zuulzuul00000000000000--- upgrade: - | The legacy ``sqlalchemy-migrate`` migrations, which have been deprecated since Zed, have been removed. There should be no end-user impact. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/remove-token-auth-middleware-5ea3b3734ce1d9e6.yaml0000664000175000017500000000156400000000000030162 0ustar00zuulzuul00000000000000--- prelude: > The token_auth middleware functionality has been merged into the main auth middleware (keystone.middleware.auth.AuthContextMiddleware). `admin_token_auth` must be removed from the [pipeline:api_v3], [pipeline:admin_api], and [pipeline:public_api] sections of your paste ini file. The [filter:token_auth] block will also need to be removed from your paste ini file. Failure to remove these elements from your paste ini file will result in keystone to no longer start/run when the `token_auth` is removed in the Stein release. upgrade: - Remove token_auth from your keystone paste.ini file. Failure to remove these elements from your paste ini file will result in keystone to no longer start/run when the `token_auth` is removed in the Stein release. deprecations: - The keystone.middleware.core:TokenAuthMiddleware is deprecated for removal. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/remove-trust-auth-support-from-v2-de316c9ba46d556d.yaml0000664000175000017500000000022300000000000031101 0ustar00zuulzuul00000000000000--- other: - The ability to validate a trust-scoped token against the v2.0 API has been removed, in favor of using the version 3 of the API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-mitaka-9ff14f87d0b98e7e.yaml0000664000175000017500000000464300000000000026433 0ustar00zuulzuul00000000000000--- other: - > [`blueprint removed-as-of-mitaka `_] Removed ``extras`` from token responses. These fields should not be necessary and a well-defined API makes this field redundant. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed ``RequestBodySizeLimiter`` from keystone middleware. The keystone team suggests using ``oslo_middleware.sizelimit.RequestBodySizeLimiter`` instead. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Notifications with event_type ``identity.created.role_assignment`` and ``identity.deleted.role_assignment`` have been removed. The keystone team suggests listening for ``identity.role_assignment.created`` and ``identity.role_assignment.deleted`` instead. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed ``check_role_for_trust`` from the trust controller, ensure policy files do not refer to this target. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed Catalog KVS backend (``keystone.catalog.backends.sql.Catalog``). This was deprecated in the Icehouse release. - > [`blueprint removed-as-of-mitaka `_] The LDAP backend for Assignment has been removed. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] The LDAP backend for Resource has been removed. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] The LDAP backend for Role has been removed. This was deprecated in the Kilo release. - > [`blueprint removed-as-of-mitaka `_] Removed Revoke KVS backend (``keystone.revoke.backends.kvs.Revoke``). This was deprecated in the Juno release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-newton-721c06b5dcb1b34a.yaml0000664000175000017500000000224200000000000026417 0ustar00zuulzuul00000000000000--- other: - > [`blueprint removed-as-of-newton `_] Removed the backend and route from ``keystone.contrib.endpoint_policy``. The package has been moved to ``keystone.endpoint_policy``. This was deprecated in the Liberty release. - > [`blueprint removed-as-of-newton `_] Removed ``[eventlet_server]`` and ``[eventlet_server_ssl]`` sections from the `keystone.conf`. - > [`blueprint removed-as-of-newton `_] Removed support for running keystone under eventlet. It is recommended to run keystone in an HTTP server. - > [`blueprint removed-as-of-newton `_] Removed support for generating SSL certificates. - > [`blueprint removed-as-of-newton `_] The ``revoke_by_expiration`` method in ``keystone.revoke.core`` has been removed. This was deprecated in the Juno release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-ocata-436bb4b839e74494.yaml0000664000175000017500000001070600000000000026016 0ustar00zuulzuul00000000000000--- prelude: > - The PKI and PKIz token format has been removed. See ``Other Notes`` for more details. - Support for writing to LDAP has been removed. See ``Other Notes`` for more details. other: - > PKI and PKIz token formats have been removed in favor of Fernet tokens. - > Write support for the LDAP has been removed in favor of read-only support. The following operations are no longer supported for LDAP: * ``create user`` * ``create group`` * ``delete user`` * ``delete group`` * ``update user`` * ``update group`` * ``add user to group`` * ``remove user from group`` - > Routes and SQL backends for the contrib extensions have been removed, they have been incorporated into keystone and are no longer optional. This affects: * ``keystone/contrib/admin_crud`` * ``keystone/contrib/endpoint_filter`` * ``keystone/contrib/federation`` * ``keystone/contrib/oauth1`` * ``keystone/contrib/revoke`` * ``keystone/contrib/simple_cert`` * ``keystone/contrib/user_crud`` - > Keystone cache backends have been removed in favor of their `oslo.cache` counter-part. This affects: * ``keystone/common/cache/backends/mongo`` * ``keystone/common/cache/backends/memcache_pool`` * ``keystone/common/cache/backends/noop`` - > Several token validation methods from the abstract class ``keystone.token.providers.base.Provider`` were removed (see below) in favor of a single method to validate tokens (``validate_token``), that has the signature ``validate_token(self, token_ref)``. If using a custom token provider, update the custom provider accordingly. * ``validate_v2_token`` * ``validate_v3_token`` * ``validate_non_persistent_token`` - > Several token issuance methods from the abstract class ``keystone.token.providers.base.Provider`` were removed (see below) in favor of a single method to issue tokens (``issue_token``). If using a custom token provider, updated the custom provider accordingly. * ``issue_v2_token`` * ``issue_v3_token`` - > The ``[DEFAULT] domain_id_immutable`` configuration option has been removed in favor of strictly immutable domain IDs. - > The ``[endpoint_policy] enabled`` configuration option has been removed in favor of always enabling the endpoint policy extension. - > The auth plugin ``keystone.auth.plugins.saml2.Saml2`` has been removed in favor of the auth plugin ``keystone.auth.plugins.mapped.Mapped``. - > The ``memcache`` and ``memcache_pool`` token persistence backends have been removed in favor of using Fernet tokens (which require no persistence). - > The ``httpd/keystone.py`` file has been removed in favor of the ``keystone-wsgi-admin`` and ``keystone-wsgi-public`` scripts. - > The ``keystone/service.py`` file has been removed, the logic has been moved to the ``keystone/version/service.py``. - > The check for admin token from ``build_auth_context`` middleware has been removed. If your deployment requires the use of `admin token`, update ``keystone-paste.ini`` so that ``admin_token_auth`` is before ``build_auth_context`` in the paste pipelines, otherwise remove the ``admin_token_auth`` middleware from ``keystone-paste.ini`` entirely. - > The ``[assignment] driver`` now defaults to ``sql``. Logic to determine the default assignment driver if one wasn't supplied through configuration has been removed. Keystone only supports one assignment driver and it shouldn't be changed unless you're deploying a custom assignment driver. - > The ``[resource] driver`` now defaults to ``sql``. Logic to determine the default resource driver if one wasn't supplied through configuration has been removed. Keystone only supports one resource driver and it shouldn't be changed unless you're deploying a custom resource driver. - > The ``[os_inherit] enabled`` config option has been removed, the `OS-INHERIT` extension is now always enabled. - > The ``[DEFAULT] domain_id_immutable`` option has been removed. This removes the ability to change the ``domain_id`` attribute of users, groups, and projects. The behavior was introduced to allow deployers to migrate entities from one domain to another by updating the ``domain_id`` attribute of an entity. This functionality was deprecated in the Mitaka release is now removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-pike-deadbeefdeadbeef.yaml0000664000175000017500000000516600000000000026747 0ustar00zuulzuul00000000000000--- other: - > [`blueprint removed-as-of-pike `_] All key-value-store code, options, and documentation has been removed as of the Pike release. The removed code included ``keystone.common.kvs`` configuration options for the KVS code, unit tests, and the KVS token persistence driver ``keystone.token.persistence.backends.kvs``. All associated documentation has been removed. - > [`blueprint removed-as-of-pike `_] The ``admin_token_auth`` filter has been removed from all sample pipelines, specifically, the following section has been removed from ``keystone-paste.ini``:: [filter:admin_token_auth] use = egg:keystone#admin_token_auth The functionality of the ``ADMIN_TOKEN`` remains, but has been incorporated into the main auth middleware (``keystone.middleware.auth.AuthContextMiddleware``). - > The catalog backend ``endpoint_filter.sql`` has been removed. It has been consolidated with the ``sql`` backend, therefore replace the ``endpoint_filter.sql`` catalog backend with the ``sql`` backend. - > The ``[security_compliance] password_expires_ignore_user_ids`` option has been removed. Each user that should ignore password expiry should have the value set to "true" in the user's ``options`` attribute (e.g. ``user['options']['ignore_password_expiry'] = True``) with a user update call. - > [`blueprint removed-as-of-pike `_] The ``keystone.common.ldap`` module was removed from the code tree. It was deprecated in the Newton release in favor of using ``keystone.identity.backends.ldap.common`` which has the same functionality. - > [`blueprint removed-as-of-pike `_] The ``keystone-manage pki_setup`` was added to aid developer setup by hiding the sometimes cryptic openssl commands. This is no longer needed since keystone no longer supports PKI tokens and can no longer serve SSL. This was deprecated in the Mitaka release. - > [`blueprint removed-as-of-pike `_] Direct import of drivers outside of their `keystone` namespace has been removed. Ex. identity drivers are loaded from the `keystone.identity` namespace and assignment drivers from the `keystone.assignment` namespace. Loading drivers outside of their keystone namespaces was deprecated in the Liberty release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-queens-94c04e88c08f89aa.yaml0000664000175000017500000000131600000000000026365 0ustar00zuulzuul00000000000000--- other: - > [`bug 1728690 `_] The ``keystone-manage bootstrap`` command will only create the admin role and will no longer create a default member role. Please create any additional roles you need after running ``bootstrap`` by using the ``openstack role create`` command. - > The config option ``rolling_upgrade_password_hash_compat`` is removed. It is only used for rolling-upgrade from Ocata release to Pike release. - > [`blueprint removed-as-of-queens `_] The ``admin_token_auth`` middleware is removed now. The related doc is removed as well. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-rocky-f44c3ba7c3e73d01.yaml0000664000175000017500000000212000000000000026241 0ustar00zuulzuul00000000000000--- other: - | [`blueprint removed-as-of-rocky `_] The ``sql`` token driver and ``uuid`` token providers have been removed in favor of the ``fernet`` token provider. - > [`blueprint removed-as-of-rocky `_] Removed support for direct import of authentication drivers. If you're using full path names for authentication methods in configuration, please update your configuration to use the corresponding namespaces. - > [`blueprint removed-as-of-rocky `_] Removed support for token bind operations, which were supported by the ``uuid``, ``pki``, and ``pkiz`` token providers. Support for this feature was deprecated in Pike. - > [`blueprint removed-as-of-rocky `_] The deprecated `enable` config option of the trust feature is removed. Trusts now is always enabled. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-stein-5eb23253b72ab54e.yaml0000664000175000017500000000274000000000000026163 0ustar00zuulzuul00000000000000--- other: - | [`blueprint removed-as-of-stein `_] The options ``member_role_id`` and ``member_role_name`` which were deprecated in Queens and only used for V2 are removed now. - > [`blueprint removed-as-of-stein `_] The deprecated token_flush is removed now. - > [`blueprint removed-as-of-stein `_] The deprecated config option `bind` is removed now. - > [`blueprint removed-as-of-stein `_] The deprecated option `crypt_strength` is removed now. It was only useful for `sha512_crypt` password hashes which has been superseded by more secure hashing implementations. - > [`blueprint removed-as-of-stein `_] The ``keystone.conf [DEFAULT] secure_proxy_ssl_header`` configuration option was slated for removal in Pike and has now officially been removed. Please use ``oslo.middleware.http_proxy_to_wsgi`` instead. - > [`blueprint removed-as-of-stein `_] The interface ``create_arguments_apply`` in token formatter payload has been removed. The token payload now doesn't need to be force ordered any more. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-train-92b2942a680eb859.yaml0000664000175000017500000000122000000000000026031 0ustar00zuulzuul00000000000000--- other: - | [`bug 1829453 `_] The deprecated config option `infer_roles` is removed now. - > [`bug 1829453 `_] The deprecated config option `admin_endpoint` is removed now. - > [`bug 1829453 `_] The deprecated config options in `signing` are removed now. upgrade: - | [`bug 1829453 `_] The os-simple-cert-api will return 410 due to the removal of config options signing [ca_certs] and signing [cert_file]. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/removed-as-of-ussuri-d2f6ef8901ef54ed.yaml0000664000175000017500000000033100000000000026552 0ustar00zuulzuul00000000000000--- upgrade: - | Dropping the Python2 support in OpenStack Ussuri according to `the TC deprecation timeline `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/request_context-e143ba9c446a5952.yaml0000664000175000017500000000045700000000000025567 0ustar00zuulzuul00000000000000--- features: - > [`bug 1500222 `_] Added information such as: user ID, project ID, and domain ID to log entries. As a side effect of this change, both the user's domain ID and project's domain ID are now included in the auth context. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/resource-backend-sql-only-03154d8712b36bd0.yaml0000664000175000017500000000120000000000000027212 0ustar00zuulzuul00000000000000--- upgrade: - | The resource backend cannot be configured to anything but SQL if the SQL Identity backend is being used. The resource backend must now be SQL which allows for the use of Foreign Keys to domains/projects wherever desired. This makes managing project relationships and such much more straight forward. The inability to configure non-SQL resource backends has been in Keystone since at least Ocata. This is eliminating some complexity and preventing the need for some really ugly back-port SQL migrations in favor of a better model. Resource is highly relational and should be SQL based. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/resource-driver-33793dd5080ee4d2.yaml0000664000175000017500000000031600000000000025447 0ustar00zuulzuul00000000000000--- features: - | Restores the configurability of the resource driver, so it is now possible to create a custom resource driver if the built-in sql driver does not meet business requirements. ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=keystone-26.0.0/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.yaml 22 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/revert-v2-token-issued-for-non-default-domain-25ea5337f158ef13.ya0000664000175000017500000000104000000000000032500 0ustar00zuulzuul00000000000000fixes: - > [`bug 1527759 `_] Reverted the change that eliminates the ability to get a V2 token with a user or project that is not in the default domain. This change broke real-world deployments that utilized the ability to authenticate via V2 API with a user not in the default domain or with a project not in the default domain. The deployer is being convinced to update code to properly handle V3 auth but the fix broke expected and tested behavior. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/s3-aws-v4-c6cb75ce8d2289d4.yaml0000664000175000017500000000027400000000000024147 0ustar00zuulzuul00000000000000--- features: - > [`bug 1473042 `_] Keystone's S3 compatibility support can now authenticate using AWS Signature Version 4. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/scope-and-default-roles-a733c235731bb558.yaml0000664000175000017500000000305200000000000026656 0ustar00zuulzuul00000000000000--- prelude: > This release leverages oslo.policy's policy-in-code feature to modify the default check strings and scope types for nearly all of keystone's API policies. These changes make the policies more precise than they were before, using the reader, member, and admin roles where previously only the admin role and a catch-all rule was available. The changes also take advantage of system, domain, and project scope, allowing you to create role assignments for your users that are appropriate to the actions they need to perform. Eventually this will allow you to set ``[oslo_policy]/enforce_scope=true`` in your keystone configuration, which simplifies access control management by ensuring that oslo.policy checks both the role and the scope on API requests. However, please be aware that not all policies have been converted in this release and some changes are still under development. During the transition phase, if you have not overridden a policy, the old default and the new default will be OR'd together. This means that, for example, where we have changed the policy rule from ``'rule:admin_required'`` to ``'role:reader and system_scope:all'``, both policy rules will be in effect. Please check your current policies and role assignments before upgrading to ensure the policies will not be too permissive for your deployment. To hide the deprecation warnings and opt into the less permissive rules, you can override the policy configuration to use the newer policy rule. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/support_encrypted_credentials_at_rest-93dcb67b3508e91a.yaml0000664000175000017500000000122600000000000032301 0ustar00zuulzuul00000000000000--- upgrade: - Keystone now supports encrypted credentials at rest. In order to upgrade successfully to Newton, deployers must encrypt all credentials currently stored before contracting the database. Deployers must run `keystone-manage credential_setup` in order to use the credential API within Newton, or finish the upgrade from Mitaka to Newton. This will result in a service outage for the credential API where credentials will be read-only for the duration of the upgrade process. Once the database is contracted credentials will be writeable again. Database contraction phases only apply to rolling upgrades. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/switch-to-alembic-1fa5248f0ce824ae.yaml0000664000175000017500000000231600000000000025777 0ustar00zuulzuul00000000000000--- upgrade: - | The database migration engine has changed from `sqlalchemy-migrate`__ to `alembic`__. For most deployments, this should have minimal to no impact and the switch should be mostly transparent. The main user-facing impact is the change in schema versioning. While sqlalchemy-migrate used a linear, integer-based versioning scheme, which required placeholder migrations to allow for potential migration backports, alembic uses a distributed version control-like schema where a migration's ancestor is encoded in the file and branches are possible. The alembic migration files therefore use a arbitrary UUID-like naming scheme and the ``keystone-manage db_version`` command returns such a version. When the ``keystone-manage db_sync`` command is run without options or with the ``--expand`` or ``--contract`` options, all remaining sqlalchemy-migrate-based migrations will be automatically applied. Data migrations are now included in the expand phase and the ``--migrate`` option is now a no-op. It may be removed in a future release. .. __: https://sqlalchemy-migrate.readthedocs.io/en/latest/ .. __: https://alembic.sqlalchemy.org/en/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/tenant_id_to_project_id-42d95d93011785cb.yaml0000664000175000017500000000020700000000000027120 0ustar00zuulzuul00000000000000--- upgrade: - > The assignment driver interface has changed to use the named parameter 'project_id' instead of 'tenant_id'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/token-formatter-ec58aba00fa83706.yaml0000664000175000017500000000051100000000000025573 0ustar00zuulzuul00000000000000--- features: - | The token_formatter utility class has been moved from under fernet to the default token directory. This is to allow for the reuse of functionality with other token providers. Any deployments that are specifically using the fernet utils may be affected and will need to adjust accordingly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/token-provider-refactor-a3a64146807daf36.yaml0000664000175000017500000000046600000000000027103 0ustar00zuulzuul00000000000000--- upgrade: - | The token provider API has removed the ``needs_persistence`` property from the abstract interface. Token providers are expected to handle persistence requirement if needed. This will require out-of-tree token providers to remove the unused property and handle token storage. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=keystone-26.0.0/releasenotes/notes/token_expiration_to_match_application_credential-56d058355a9f240d.yaml 22 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/token_expiration_to_match_application_credential-56d058355a9f240d0000664000175000017500000000101100000000000033315 0ustar00zuulzuul00000000000000--- security: - | [`bug 1992183 `_] [`CVE-2022-2447 `_] Tokens issued with application credentials will now have their expiration validated against that of the application credential. If the application credential expires before the token the token's expiration will be set to the same expiration as the application credential. Otherwise the token will use the configured value. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/totp-40d93231714c6a20.yaml0000664000175000017500000000073200000000000023137 0ustar00zuulzuul00000000000000--- features: - > [`blueprint totp-auth `_] Keystone now supports authenticating via Time-based One-time Password (TOTP). To enable this feature, add the ``totp`` auth plugin to the `methods` option in the `[auth]` section of `keystone.conf`. More information about using TOTP can be found in `keystone's developer documentation `_. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/unified-limit-api-improvment-b34d18769d18a0a7.yaml0000664000175000017500000000134700000000000030040 0ustar00zuulzuul00000000000000--- fixes: - > Some bugs for unified limit APIs have been fixed, it includes: * [`bug 1798716 `_] The `region_id` of registered limit now can be updated to `None`. * [`bug 1798495 `_] The length of unified limit's `resource_name` now is limited from `1` to `255` (string). * [`bug 1797876 `_] The `default_limit` of registered limit and the `resource_limit` of limit now are limited from `-1` to `2147483647` (integer). `-1` means no limit. `2147483647` is the max value for integer by default in SQL (4 bytes). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/use-correct-inspect-8142e317c1e39c2a.yaml0000664000175000017500000000050600000000000026216 0ustar00zuulzuul00000000000000--- fixes: - > Replaced the usage of SQLAlchemy Inspector.from_engine() with the sqlalchemy.inspect() call, within several Alembic migration files as well as a test suite. SQLAlchemy will be deprecating the former syntax, so this change allows forwads compatibility with the next series of SQLAlchemy. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/use-pyldap-6e811c28bf350d6d.yaml0000664000175000017500000000030500000000000024466 0ustar00zuulzuul00000000000000--- upgrade: - Keystone now relies on pyldap instead of python-ldap. The pyldap library is a fork of python-ldap and is a drop-in replacement with modifications to be py3 compatible. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/use-python-ldap-0318ff7798bdd98d.yaml0000664000175000017500000000034100000000000025462 0ustar00zuulzuul00000000000000--- upgrade: - Keystone now relies on python-ldap instead of pyldap. The pyldap library is a deprecated fork from python-ldap. Starting with python-ldap 3.0 release this has been merged and is maintained there. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/v2-dep-d6e7ab2d08119549.yaml0000664000175000017500000000057400000000000023434 0ustar00zuulzuul00000000000000--- deprecations: - > [`blueprint deprecated-as-of-pike `_] The v2.0 ``auth`` and ``ec2`` APIs were already maked as deprecated in the Mitaka release, although no removal release had yet been identified. These APIs will now be removed in the 'T' release. The v3 APIs should be used instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/v3-endpoints-in-v2-list-b0439816938713d6.yaml0000664000175000017500000000033000000000000026346 0ustar00zuulzuul00000000000000--- fixes: - > [`bug 1480270 `_] Endpoints created when using v3 of the keystone REST API will now be included when listing endpoints via the v2.0 API. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/v9FederationDriver-cbebcf5f97e1eae2.yaml0000664000175000017500000000035000000000000026501 0ustar00zuulzuul00000000000000--- deprecations: - The V8 Federation driver interface is deprecated in favor of the V9 Federation driver interface. Support for the V8 Federation driver interface is planned to be removed in the 'O' release of OpenStack. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/notes/x509-auth-df0a229780b8e3ff.yaml0000664000175000017500000000036300000000000024140 0ustar00zuulzuul00000000000000--- features: - > [`blueprint x509-ssl-client-cert-authn `_] Keystone now supports tokenless client SSL x.509 certificate authentication and authorization. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/releasenotes/source/0000775000175000017500000000000000000000000017454 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/2023.1.rst0000664000175000017500000000020200000000000020725 0ustar00zuulzuul00000000000000=========================== 2023.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.1 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/2023.2.rst0000664000175000017500000000020200000000000020726 0ustar00zuulzuul00000000000000=========================== 2023.2 Series Release Notes =========================== .. release-notes:: :branch: stable/2023.2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/2024.1.rst0000664000175000017500000000020200000000000020726 0ustar00zuulzuul00000000000000=========================== 2024.1 Series Release Notes =========================== .. release-notes:: :branch: stable/2024.1 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000021102 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000023353 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021611 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000024062 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/conf.py0000664000175000017500000002156000000000000020757 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Keystone Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = 'Keystone Release Notes' copyright = '2015, Keystone Developers' # Release notes are version independent # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'KeystoneReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ( 'index', 'KeystoneReleaseNotes.tex', 'Keystone Release Notes Documentation', 'Keystone Developers', 'manual', ), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ( 'index', 'keystonereleasenotes', 'Keystone Release Notes Documentation', ['Keystone Developers'], 1, ) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ( 'index', 'KeystoneReleaseNotes', 'Keystone Release Notes Documentation', 'Keystone Developers', 'KeystoneReleaseNotes', 'Identity, Authentication and Access Management for OpenStack.', 'Miscellaneous', ), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] # -- Options for openstackdocstheme ------------------------------------------- openstackdocs_auto_name = False openstackdocs_repo_name = 'openstack/keystone' openstackdocs_bug_project = 'keystone' openstackdocs_bug_tag = 'doc' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/index.rst0000664000175000017500000000155000000000000021316 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ======================== Keystone Release Notes ======================== .. toctree:: :maxdepth: 1 unreleased 2024.1 2023.2 2023.1 zed yoga xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/liberty.rst0000664000175000017500000000022200000000000021654 0ustar00zuulzuul00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4301152 keystone-26.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020713 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4301152 keystone-26.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021665 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023452 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000027070000000000000026511 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata # Andi Chandler , 2022. #zanata # Andi Chandler , 2023. #zanata msgid "" msgstr "" "Project-Id-Version: Keystone Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-12-13 19:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2023-09-09 10:43+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "'/' and ',' are not allowed to be in a tag" msgstr "'/' and ',' are not allowed to be in a tag" msgid "" "**Experimental** - Domain specific configuration options can be stored in " "SQL instead of configuration files, using the new REST APIs." msgstr "" "**Experimental** - Domain specific configuration options can be stored in " "SQL instead of configuration files, using the new REST APIs." msgid "" "**Experimental** - Keystone now supports tokenless authorization with X.509 " "SSL client certificate." msgstr "" "**Experimental** - Keystone now supports tokenless authorisation with X.509 " "SSL client certificate." msgid "10.0.0" msgstr "10.0.0" msgid "10.0.1" msgstr "10.0.1" msgid "10.0.3" msgstr "10.0.3" msgid "11.0.0" msgstr "11.0.0" msgid "11.0.1" msgstr "11.0.1" msgid "11.0.3" msgstr "11.0.3" msgid "11.0.4" msgstr "11.0.4" msgid "12.0.0" msgstr "12.0.0" msgid "12.0.1" msgstr "12.0.1" msgid "12.0.2" msgstr "12.0.2" msgid "12.0.3" msgstr "12.0.3" msgid "12.0.3-9" msgstr "12.0.3-9" msgid "13.0.0" msgstr "13.0.0" msgid "13.0.1" msgstr "13.0.1" msgid "13.0.2" msgstr "13.0.2" msgid "13.0.3" msgstr "13.0.3" msgid "13.0.4-9" msgstr "13.0.4-9" msgid "14.0.0" msgstr "14.0.0" msgid "14.0.1" msgstr "14.0.1" msgid "14.1.0" msgstr "14.1.0" msgid "14.2.0" msgstr "14.2.0" msgid "14.2.0-7" msgstr "14.2.0-7" msgid "15.0.0" msgstr "15.0.0" msgid "15.0.1" msgstr "15.0.1" msgid "15.0.1-9" msgstr "15.0.1-9" msgid "16.0.0" msgstr "16.0.0" msgid "16.0.1" msgstr "16.0.1" msgid "16.0.2" msgstr "16.0.2" msgid "16.0.2-9" msgstr "16.0.2-9" msgid "17.0.0" msgstr "17.0.0" msgid "17.0.1" msgstr "17.0.1" msgid "18.0.0" msgstr "18.0.0" msgid "18.1.0" msgstr "18.1.0" msgid "19.0.0" msgstr "19.0.0" msgid "19.0.1" msgstr "19.0.1" msgid "20.0.0" msgstr "20.0.0" msgid "20.0.1" msgstr "20.0.1" msgid "21.0.0" msgstr "21.0.0" msgid "22.0.0" msgstr "22.0.0" msgid "23.0.0" msgstr "23.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "8.1.0" msgstr "8.1.0" msgid "9.0.0" msgstr "9.0.0" msgid "9.2.0" msgstr "9.2.0" msgid "" "A Federated user gets an entry in the shadow-users table. This entry has a " "unique ID. It was generated using a UUID. This fix changes to reuse the " "mechanism for LDAP, where the ID is generated from the domain ID + the local " "id of the user (an attribute that uniquely ids the user from the IdP). This " "generator is specified by the configuration file. Now Both LDAP and " "Federated Ids are generated the same way. It also means that Federated IDs " "can be kept in sync between two independtent Keystone servers." msgstr "" "A Federated user gets an entry in the shadow-users table. This entry has a " "unique ID. It was generated using a UUID. This fix changes to reuse the " "mechanism for LDAP, where the ID is generated from the domain ID + the local " "id of the user (an attribute that uniquely ids the user from the IdP). This " "generator is specified by the configuration file. Now Both LDAP and " "Federated Ids are generated the same way. It also means that Federated IDs " "can be kept in sync between two independent Keystone servers." msgid "" "A new ``secure_proxy_ssl_header`` configuration option is available when " "running keystone behind a proxy." msgstr "" "A new ``secure_proxy_ssl_header`` configuration option is available when " "running keystone behind a proxy." msgid "" "A new config option, `insecure_debug`, is added to control whether debug " "information is returned to clients. This used to be controlled by the " "`debug` option. If you'd like to return extra information to clients set the " "value to ``true``. This extra information may help an attacker." msgstr "" "A new config option, `insecure_debug`, is added to control whether debug " "information is returned to clients. This used to be controlled by the " "`debug` option. If you'd like to return extra information to clients set the " "value to ``true``. This extra information may help an attacker." msgid "" "A new interface called `list_federated_users_info` is added to shadow " "backend. It's used to get the shadow user information internally. If you are " "maintaining any out-tree shadow backends, please implement this function for " "them as well." msgstr "" "A new interface called `list_federated_users_info` is added to shadow " "backend. It's used to get the shadow user information internally. If you are " "maintaining any out-tree shadow backends, please implement this function for " "them as well." msgid "" "Add ``cache_on_issue`` flag to ``[token]`` section that enables placing " "issued tokens to validation cache thus reducing the first validation time as " "if token is already validated and token data cached." msgstr "" "Add ``cache_on_issue`` flag to ``[token]`` section that enables placing " "issued tokens to validation cache thus reducing the first validation time as " "if token is already validated and token data cached." msgid "" "Add ``keystone-manage mapping_populate`` command, which should be used when " "domain-specific LDAP backend is used." msgstr "" "Add ``keystone-manage mapping_populate`` command, which should be used when " "domain-specific LDAP backend is used." msgid "" "Add ``keystone-manage mapping_populate`` command. This command will pre-" "populate a mapping table with all users from LDAP, in order to improve " "future query performance. It should be used when an LDAP is first " "configured, or after calling ``keystone-manage mapping_purge``, before any " "queries related to the domain are made. For more information see ``keystone-" "manage mapping_populate --help``" msgstr "" "Add ``keystone-manage mapping_populate`` command. This command will pre-" "populate a mapping table with all users from LDAP, in order to improve " "future query performance. It should be used when an LDAP is first " "configured, or after calling ``keystone-manage mapping_purge``, before any " "queries related to the domain are made. For more information see ``keystone-" "manage mapping_populate --help``" msgid "" "Added an option ``--check`` to ``keystone-manage db_sync``, the option will " "allow a user to check the status of rolling upgrades in the database." msgstr "" "Added an option ``--check`` to ``keystone-manage db_sync``, the option will " "allow a user to check the status of rolling upgrades in the database." msgid "" "Adjust configuration tools as necessary, see the ``fixes`` section for more " "details on this change." msgstr "" "Adjust configuration tools as necessary, see the ``fixes`` section for more " "details on this change." msgid "" "All policies in ``policy.v3cloudsample.json`` that are redundant with the " "defaults in code have been removed. This improves maintainability and leaves " "the ``policy.v3cloudsample.json`` policy file with only overrides. These " "overrides will eventually be moved into code or new defaults in keystone " "directly. If you're using the policies removed from ``policy.v3cloudsample." "json`` please check to see if you can migrate to the new defaults or " "continue maintaining the policy as an override." msgstr "" "All policies in ``policy.v3cloudsample.json`` that are redundant with the " "defaults in code have been removed. This improves maintainability and leaves " "the ``policy.v3cloudsample.json`` policy file with only overrides. These " "overrides will eventually be moved into code or new defaults in keystone " "directly. If you're using the policies removed from ``policy.v3cloudsample." "json`` please check to see if you can migrate to the new defaults or " "continue maintaining the policy as an override." msgid "" "Allow the creating of a domain with the additional, optional parameter of " "`explicit_domain_id` instead of auto-creating a domain_id from a uuid." msgstr "" "Allow the creating of a domain with the additional, optional parameter of " "`explicit_domain_id` instead of auto-creating a domain_id from a UUID." msgid "" "Any auth methods that are not defined in ``keystone.conf`` in the ``[auth] " "methods`` option are ignored when the rules are processed. Empty rules are " "not allowed. If a rule is empty due to no-valid auth methods existing within " "it, the rule is discarded at authentication time. If there are no rules or " "no valid rules for the user, authentication occurs in the default manner: " "any single configured auth method is sufficient to receive a token." msgstr "" "Any auth methods that are not defined in ``keystone.conf`` in the ``[auth] " "methods`` option are ignored when the rules are processed. Empty rules are " "not allowed. If a rule is empty due to no-valid auth methods existing within " "it, the rule is discarded at authentication time. If there are no rules or " "no valid rules for the user, authentication occurs in the default manner: " "any single configured auth method is sufficient to receive a token." msgid "" "Any middleware defined in Keystone's tree is no longer loaded via stevedore, " "and likewise the entry points were removed." msgstr "" "Any middleware defined in Keystone's tree is no longer loaded via Stevedore, " "and likewise the entry points were removed." msgid "" "As a performance improvement, the base mapping driver's method " "``get_domain_mapping_list`` now accepts an optional named argument " "``entity_type`` that can be used to get the mappings for a given entity type " "only. As this new call signature is already used in the ``identity.core`` " "module, authors/maintainers of out-of-tree custom mapping drivers are " "expected to update their implementations of ``get_domain_mapping_list`` " "method accordingly." msgstr "" "As a performance improvement, the base mapping driver's method " "``get_domain_mapping_list`` now accepts an optional named argument " "``entity_type`` that can be used to get the mappings for a given entity type " "only. As this new call signature is already used in the ``identity.core`` " "module, authors/maintainers of out-of-tree custom mapping drivers are " "expected to update their implementations of ``get_domain_mapping_list`` " "method accordingly." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Certain deprecated methods from the assignment manager were removed in favor " "of the same methods in the [resource] and [role] manager." msgstr "" "Certain deprecated methods from the assignment manager were removed in " "favour of the same methods in the [resource] and [role] manager." msgid "" "Certain variables in ``keystone.conf`` now have options, which determine if " "the user's setting is valid." msgstr "" "Certain variables in ``keystone.conf`` now have options, which determine if " "the user's setting is valid." msgid "" "Change the min value of pool_retry_max to 1. Setting this value to 0 caused " "the pool to fail before connecting to ldap, always raising " "MaxConnectionReachedError." msgstr "" "Change the min value of pool_retry_max to 1. Setting this value to 0 caused " "the pool to fail before connecting to ldap, always raising " "MaxConnectionReachedError." msgid "Configuring per-Identity Provider WebSSO is now supported." msgstr "Configuring per-Identity Provider WebSSO is now supported." msgid "Critical Issues" msgstr "Critical Issues" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "" "Data migrations are now included in the expand phase and the ``--migrate`` " "option is now a no-op. It may be removed in a future release." msgstr "" "Data migrations are now included in the expand phase and the ``--migrate`` " "option is now a no-op. It may be removed in a future release." msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Domain name information can now be used in policy rules with the attribute " "``domain_name``." msgstr "" "Domain name information can now be used in policy rules with the attribute " "``domain_name``." msgid "" "Domains are now represented as top level projects with the attribute " "`is_domain` set to true. Such projects will appear as parents for any " "previous top level projects. Projects acting as domains can be created, " "read, updated, and deleted via either the project API or the domain API (V3 " "only)." msgstr "" "Domains are now represented as top level projects with the attribute " "`is_domain` set to true. Such projects will appear as parents for any " "previous top level projects. Projects acting as domains can be created, " "read, updated, and deleted via either the project API or the domain API (V3 " "only)." msgid "" "Dropping the Python2 support in OpenStack Ussuri according to `the TC " "deprecation timeline `_" msgstr "" "Dropping the Python2 support in OpenStack Ussuri according to `the TC " "deprecation timeline `_" msgid "" "Each list of methods specifies a rule. If the auth methods provided by a " "user match (or exceed) the auth methods in the list, that rule is used. The " "first rule found (rules will not be processed in a specific order) that " "matches will be used. If a user has the ruleset defined as ``[[\"password\", " "\"totp\"]]`` the user must provide both password and totp auth methods (and " "both methods must succeed) to receive a token. However, if a user has a " "ruleset defined as ``[[\"password\"], [\"password\", \"totp\"]]`` the user " "may use the ``password`` method on it's own but would be required to use " "both ``password`` and ``totp`` if ``totp`` is specified at all." msgstr "" "Each list of methods specifies a rule. If the auth methods provided by a " "user match (or exceed) the auth methods in the list, that rule is used. The " "first rule found (rules will not be processed in a specific order) that " "matches will be used. If a user has the ruleset defined as ``[[\"password\", " "\"totp\"]]`` the user must provide both password and totp auth methods (and " "both methods must succeed) to receive a token. However, if a user has a " "ruleset defined as ``[[\"password\"], [\"password\", \"totp\"]]`` the user " "may use the ``password`` method on it's own but would be required to use " "both ``password`` and ``totp`` if ``totp`` is specified at all." msgid "Each project can have up to 100 tags" msgstr "Each project can have up to 100 tags" msgid "Each tag can be up to 255 characters" msgstr "Each tag can be up to 255 characters" msgid "" "Features that were \"extensions\" in previous releases (OAuth delegation, " "Federated Identity support, Endpoint Policy, etc) are now enabled by default." msgstr "" "Features that were \"extensions\" in previous releases (OAuth delegation, " "Federated Identity support, Endpoint Policy, etc) are now enabled by default." msgid "" "Fixes a bug related to the password create date. If you deployed master " "during Newton development, the password create date may be reset. This would " "only be apparent if you have security compliance features enabled." msgstr "" "Fixes a bug related to the password create date. If you deployed master " "during Newton development, the password create date may be reset. This would " "only be apparent if you have security compliance features enabled." msgid "" "For additional details see: `event notifications `_" msgstr "" "For additional details see: `event notifications `_" msgid "" "If PCI support is enabled, via the ``[security_compliance]`` configuration " "options, then the ``password_expires_at`` field will be populated with a " "timestamp. Otherwise, it will default to ``null``, indicating the password " "does not expire." msgstr "" "If PCI support is enabled, via the ``[security_compliance]`` configuration " "options, then the ``password_expires_at`` field will be populated with a " "timestamp. Otherwise, it will default to ``null``, indicating the password " "does not expire." msgid "" "If a password does not meet the specified criteria. See " "``[security_compliance] password_regex``." msgstr "" "If a password does not meet the specified criteria. See " "``[security_compliance] password_regex``." msgid "" "If a user attempts to change their password too often. See " "``[security_compliance] minimum_password_age``." msgstr "" "If a user attempts to change their password too often. See " "``[security_compliance] minimum_password_age``." msgid "" "If a user does not change their passwords at least once every X days. See " "``[security_compliance] password_expires_days``." msgstr "" "If a user does not change their passwords at least once every X days. See " "``[security_compliance] password_expires_days``." msgid "" "If a user is locked out after many failed authentication attempts. See " "``[security_compliance] lockout_failure_attempts``." msgstr "" "If a user is locked out after many failed authentication attempts. See " "``[security_compliance] lockout_failure_attempts``." msgid "" "If a user submits a new password that was recently used. See " "``[security_compliance] unique_last_password_count``." msgstr "" "If a user submits a new password that was recently used. See " "``[security_compliance] unique_last_password_count``." msgid "" "If expiring user group memberships are enabled via the `[federation] " "default_authorization_ttl` configuration option, or on an idp by idp basis " "by setting `authorization_ttl`, there will be a lag between when a user is " "removed from a group in an identity provider, and when that will be " "reflected in keystone. That amount of time will be equal to the last time " "the user logged in + idp ttl." msgstr "" "If expiring user group memberships are enabled via the `[federation] " "default_authorization_ttl` configuration option, or on an idp by idp basis " "by setting `authorization_ttl`, there will be a lag between when a user is " "removed from a group in an identity provider, and when that will be " "reflected in keystone. That amount of time will be equal to the last time " "the user logged in + idp ttl." msgid "" "If performing rolling upgrades, set `[identity] " "rolling_upgrade_password_hash_compat` to `True`. This will instruct keystone " "to continue to hash passwords in a manner that older (pre Pike release) " "keystones can still verify passwords. Once all upgrades are complete, ensure " "this option is set back to `False`." msgstr "" "If performing rolling upgrades, set `[identity] " "rolling_upgrade_password_hash_compat` to `True`. This will instruct keystone " "to continue to hash passwords in a manner that older (pre Pike release) " "keystones can still verify passwords. Once all upgrades are complete, ensure " "this option is set back to `False`." msgid "" "If you are affected by this bug, a fix in the keystone database will be " "needed so we recommend to dump the users' tables before doing this process:" msgstr "" "If you are affected by this bug, a fix in the keystone database will be " "needed so we recommend to dump the users' tables before doing this process:" msgid "" "If you are affected by this bug, you must remove stale role assignments " "manually. The following is an example SQL statement you can use to fix the " "issue, but you should verify it's applicability to your deployment's SQL " "implementation and version." msgstr "" "If you are affected by this bug, you must remove stale role assignments " "manually. The following is an example SQL statement you can use to fix the " "issue, but you should verify it's applicability to your deployment's SQL " "implementation and version." msgid "" "If you have a custom implementation for the shadow users backend, you will " "need to implement the new methods: ``delete_federated_object``, " "``create_federated_object``, ``get_federated_objects``. These methods are " "needed to support federated attributes via the user API." msgstr "" "If you have a custom implementation for the shadow users backend, you will " "need to implement the new methods: ``delete_federated_object``, " "``create_federated_object``, ``get_federated_objects``. These methods are " "needed to support federated attributes via the user API." msgid "" "In ``keystone-paste.ini``, using ``paste.filter_factory`` is deprecated in " "favor of the \"use\" directive, specifying an entrypoint." msgstr "" "In ``keystone-paste.ini``, using ``paste.filter_factory`` is deprecated in " "favour of the \"use\" directive, specifying an entrypoint." msgid "" "In the [resource] and [role] sections of the ``keystone.conf`` file, not " "specifying the driver and using the assignment driver is deprecated. In the " "Mitaka release, the resource and role drivers will default to the SQL driver." msgstr "" "In the [resource] and [role] sections of the ``keystone.conf`` file, not " "specifying the driver and using the assignment driver is deprecated. In the " "Mitaka release, the resource and role drivers will default to the SQL driver." msgid "" "In the case a user should be exempt from MFA Rules, regardless if they are " "set, the User-Option ``multi_factor_auth_enabled`` may be set to ``False`` " "for that user via the user create and update API (``POST/PATCH /v3/users``) " "call. If this option is set to ``False`` the MFA rules will be ignored for " "the user. Any other value except ``False`` will result in the MFA Rules " "being processed; the option can only be a boolean (``True`` or ``False``) or " "\"None\" (which will result in the default behavior (same as ``True``) but " "the option will no longer be shown in the ``user[\"options\"]`` dictionary." msgstr "" "In the case a user should be exempt from MFA Rules, regardless if they are " "set, the User-Option ``multi_factor_auth_enabled`` may be set to ``False`` " "for that user via the user create and update API (``POST/PATCH /v3/users``) " "call. If this option is set to ``False`` the MFA rules will be ignored for " "the user. Any other value except ``False`` will result in the MFA Rules " "being processed; the option can only be a boolean (``True`` or ``False``) or " "\"None\" (which will result in the default behaviour (same as ``True``) but " "the option will no longer be shown in the ``user[\"options\"]`` dictionary." msgid "" "In the policy.json file, we changed `identity:list_projects_for_groups` to " "`identity:list_projects_for_user`. Likewise, we changed `identity:" "list_domains_for_groups` to `identity:list_domains_for_user`. If you have " "customized the policy.json file, you will need to make these changes. This " "was done to better support new features around federation." msgstr "" "In the policy.json file, we changed `identity:list_projects_for_groups` to " "`identity:list_projects_for_user`. Likewise, we changed `identity:" "list_domains_for_groups` to `identity:list_domains_for_user`. If you have " "customized the policy.json file, you will need to make these changes. This " "was done to better support new features around federation." msgid "" "Included in this change is a removal of a legacy WSGI environment data " "holder calld `openstack.params`. The data holder was used exclusively for " "communicating data down the chain under paste-deploy. The data in `openstack." "params` was generally \"normalized\" in an odd way and unreferenced in the " "rest of the openstack code-base." msgstr "" "Included in this change is a removal of a legacy WSGI environment data " "holder calld `openstack.params`. The data holder was used exclusively for " "communicating data down the chain under paste-deploy. The data in `openstack." "params` was generally \"normalized\" in an odd way and unreferenced in the " "rest of the OpenStack code-base." msgid "" "It is no longer possible to, via the ``paste.ini`` file to inject middleware " "into the running keystone application. This reduces the attack surface area. " "While this is not a huge reduction in surface area, it is one less potential " "place that malicious code could be loaded. Malicious middleware historically " "could collect information and/or modify the requests and responses from " "Keystone." msgstr "" "It is no longer possible to, via the ``paste.ini`` file to inject middleware " "into the running Keystone application. This reduces the attack surface area. " "While this is not a huge reduction in surface area, it is one less potential " "place that malicious code could be loaded. Malicious middleware historically " "could collect information and/or modify the requests and responses from " "Keystone." msgid "" "It is recommended to have the ``healthcheck`` middleware first in the " "pipeline::" msgstr "" "It is recommended to have the ``healthcheck`` middleware first in the " "pipeline::" msgid "" "JSON Body and URL Normalizing middleware were move to a flask-native model." msgstr "" "JSON Body and URL Normalising middleware were moved to a flask-native model." msgid "Keystone Release Notes" msgstr "Keystone Release Notes" msgid "" "Keystone cache backends have been removed in favor of their `oslo.cache` " "counter-part. This affects:" msgstr "" "Keystone cache backends have been removed in favour of their `oslo.cache` " "counter-part. This affects:" msgid "" "Keystone has been fully converted to run under flask. All of the APIs are " "now natively dispatched under flask." msgstr "" "Keystone has been fully converted to run under flask. All of the APIs are " "now natively dispatched under flask." msgid "" "Keystone has historically used a custom rolled WSGI framework based loosely " "on [`webob `_] which was in turn loaded by " "the [`pythonpaste library `_]. The " "Keystone team has been planning to move away from the home-rolled solution " "and to a common framework for a number of release cycles. As of the Rocky " "release Keystone is moving to the ``Flask`` framework." msgstr "" "Keystone has historically used a custom rolled WSGI framework based loosely " "on [`webob `_] which was in turn loaded by " "the [`pythonpaste library `_]. The " "Keystone team has been planning to move away from the home-rolled solution " "and to a common framework for a number of release cycles. As of the Rocky " "release Keystone is moving to the ``Flask`` framework." msgid "" "Keystone no longer is loaded via ``paste.deploy`` and instead directly loads " "the ``Flask`` based application. If a deployment is relying on the entry-" "point generated wsgi files, it is important to get the newest ones. These " "new files have minor changes to support the new loading mechanisms. The " "files will be auto-generated via ``PBR`` and setup. The ``paste.ini`` file " "will now be ignored, but will remain on disk until the ``Stein`` release to " "ensure deployment tools are not inadvertently broken. The ``paste.ini`` file " "will have a comment added to indicate it is ignored." msgstr "" "Keystone no longer is loaded via ``paste.deploy`` and instead directly loads " "the ``Flask`` based application. If a deployment is relying on the entry-" "point generated wsgi files, it is important to get the newest ones. These " "new files have minor changes to support the new loading mechanisms. The " "files will be auto-generated via ``PBR`` and setup. The ``paste.ini`` file " "will now be ignored, but will remain on disk until the ``Stein`` release to " "ensure deployment tools are not inadvertently broken. The ``paste.ini`` file " "will have a comment added to indicate it is ignored." msgid "" "Keystone now relies on pyldap instead of python-ldap. The pyldap library is " "a fork of python-ldap and is a drop-in replacement with modifications to be " "py3 compatible." msgstr "" "Keystone now relies on pyldap instead of python-ldap. The pyldap library is " "a fork of python-ldap and is a drop-in replacement with modifications to be " "py3 compatible." msgid "" "Keystone now relies on python-ldap instead of pyldap. The pyldap library is " "a deprecated fork from python-ldap. Starting with python-ldap 3.0 release " "this has been merged and is maintained there." msgstr "" "Keystone now relies on python-ldap instead of pyldap. The pyldap library is " "a deprecated fork from python-ldap. Starting with python-ldap 3.0 release " "this has been merged and is maintained there." msgid "" "Keystone now supports authorizing a request token by providing a role name. " "A `role` in the `roles` parameter can include either a role name or role id, " "but not both." msgstr "" "Keystone now supports authorising a request token by providing a role name. " "A `role` in the `roles` parameter can include either a role name or role id, " "but not both." msgid "" "Keystone now supports being run under Python 3. The Python 3 and Python 3.4 " "classifiers have been added." msgstr "" "Keystone now supports being run under Python 3. The Python 3 and Python 3.4 " "classifiers have been added." msgid "" "Keystone now supports encrypted credentials at rest. In order to upgrade " "successfully to Newton, deployers must encrypt all credentials currently " "stored before contracting the database. Deployers must run `keystone-manage " "credential_setup` in order to use the credential API within Newton, or " "finish the upgrade from Mitaka to Newton. This will result in a service " "outage for the credential API where credentials will be read-only for the " "duration of the upgrade process. Once the database is contracted credentials " "will be writeable again. Database contraction phases only apply to rolling " "upgrades." msgstr "" "Keystone now supports encrypted credentials at rest. In order to upgrade " "successfully to Newton, deployers must encrypt all credentials currently " "stored before contracting the database. Deployers must run `keystone-manage " "credential_setup` in order to use the credential API within Newton, or " "finish the upgrade from Mitaka to Newton. This will result in a service " "outage for the credential API where credentials will be read-only for the " "duration of the upgrade process. Once the database is contracted credentials " "will be writeable again. Database contraction phases only apply to rolling " "upgrades." msgid "" "Keystone now uses oslo.cache. Update the `[cache]` section of `keystone." "conf` to point to oslo.cache backends: ``oslo_cache.memcache_pool`` or " "``oslo_cache.mongo``. Refer to the sample configuration file for examples. " "See `oslo.cache `_ for " "additional documentation." msgstr "" "Keystone now uses oslo.cache. Update the `[cache]` section of `keystone." "conf` to point to oslo.cache backends: ``oslo_cache.memcache_pool`` or " "``oslo_cache.mongo``. Refer to the sample configuration file for examples. " "See `oslo.cache `_ for " "additional documentation." msgid "" "Keystone supports ``$(project_id)s`` in the catalog. It works the same as ``" "$(tenant_id)s``. Use of ``$(tenant_id)s`` is deprecated and catalog " "endpoints should be updated to use ``$(project_id)s``." msgstr "" "Keystone supports ``$(project_id)s`` in the catalogue. It works the same as " "``$(tenant_id)s``. Use of ``$(tenant_id)s`` is deprecated and catalogue " "endpoints should be updated to use ``$(project_id)s``." msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "" "Mappings can now specify \"whitelist\" and \"blacklist\" conditionals as " "regular expressions. Prior, only \"not_any_of\" and \"any_one_of\" " "conditionals supported regular expression matching." msgstr "" "Mappings can now specify \"whitelist\" and \"blacklist\" conditionals as " "regular expressions. Prior, only \"not_any_of\" and \"any_one_of\" " "conditionals supported regular expression matching." msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "New Features" msgstr "New Features" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "" "Not specifying a domain during a create user, group or project call, which " "relied on falling back to the default domain, is now deprecated and will be " "removed in the N release." msgstr "" "Not specifying a domain during a create user, group or project call, which " "relied on falling back to the default domain, is now deprecated and will be " "removed in the N release." msgid "Note that at a minimum python-ldappool 2.3.1 is required." msgstr "Note that at a minimum python-ldappool 2.3.1 is required." msgid "" "OSprofiler support was added. This cross-project profiling library allows to " "trace various requests through all OpenStack services that support it. To " "initiate OpenStack request tracing `--profile ` option needs to be " "added to the CLI command. Configuration and usage details can be foung in " "[`OSProfiler documentation `_]" msgstr "" "OSprofiler support was added. This cross-project profiling library allows to " "trace various requests through all OpenStack services that support it. To " "initiate OpenStack request tracing `--profile ` option needs to be " "added to the CLI command. Configuration and usage details can be foung in " "[`OSProfiler documentation `_]" msgid "" "OSprofiler support was introduced. To allow its usage the keystone-paste.ini " "file needs to be modified to contain osprofiler middleware." msgstr "" "OSprofiler support was introduced. To allow its usage the keystone-paste.ini " "file needs to be modified to contain osprofiler middleware." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "" "Original WSGI Framework (custom, home-rolled, based on WEBOB) has been " "removed from the codebase." msgstr "" "Original WSGI Framework (custom, home-rolled, based on WEBOB) has been " "removed from the codebase." msgid "Other Notes" msgstr "Other Notes" msgid "PKI and PKIz token formats have been removed in favor of Fernet tokens." msgstr "" "PKI and PKIz token formats have been removed in favour of Fernet tokens." msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Please consider these new default if your deployment overrides domain " "policies." msgstr "" "Please consider these new default if your deployment overrides domain " "policies." msgid "Prelude" msgstr "Prelude" msgid "" "Project tags are implemented following the guidelines set by the `API " "Working Group `_" msgstr "" "Project tags are implemented following the guidelines set by the `API " "Working Group `_" msgid "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgstr "" "Python 3.6 & 3.7 support has been dropped. The minimum version of Python now " "supported is Python 3.8." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Remove token_auth from your keystone paste.ini file. Failure to remove these " "elements from your paste ini file will result in keystone to no longer start/" "run when the `token_auth` is removed in the Stein release." msgstr "" "Remove token_auth from your Keystone paste.ini file. Failure to remove these " "elements from your paste ini file will result in Keystone to no longer start/" "run when the `token_auth` is removed in the Stein release." msgid "" "Replaced the usage of SQLAlchemy Inspector.from_engine() with the sqlalchemy." "inspect() call, within several Alembic migration files as well as a test " "suite. SQLAlchemy will be deprecating the former syntax, so this change " "allows forwads compatibility with the next series of SQLAlchemy." msgstr "" "Replaced the usage of SQLAlchemy Inspector.from_engine() with the sqlalchemy." "inspect() call, within several Alembic migration files as well as a test " "suite. SQLAlchemy will be deprecating the former syntax, so this change " "allows forward compatibility with the next series of SQLAlchemy." msgid "" "Restores the configurability of the resource driver, so it is now possible " "to create a custom resource driver if the built-in sql driver does not meet " "business requirements." msgstr "" "Restores the configurability of the resource driver, so it is now possible " "to create a custom resource driver if the built-in SQL driver does not meet " "business requirements." msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "" "Routes and SQL backends for the contrib extensions have been removed, they " "have been incorporated into keystone and are no longer optional. This " "affects:" msgstr "" "Routes and SQL backends for the contrib extensions have been removed, they " "have been incorporated into Keystone and are no longer optional. This " "affects:" msgid "" "Running keystone in eventlet remains deprecated and will be removed in the " "Mitaka release." msgstr "" "Running Keystone in eventlet remains deprecated and will be removed in the " "Mitaka release." msgid "" "SECURITY INFO: The MFA rules are only processed when authentication happens " "through the V3 authentication APIs. If V2 Auth is enabled it is possible to " "circumvent the MFA rules if the user can authenticate via V2 Auth API. It is " "recommended to disable V2 authentication for full enforcement of the MFA " "rules." msgstr "" "SECURITY INFO: The MFA rules are only processed when authentication happens " "through the V3 authentication APIs. If V2 Auth is enabled it is possible to " "circumvent the MFA rules if the user can authenticate via V2 Auth API. It is " "recommended to disable V2 authentication for full enforcement of the MFA " "rules." msgid "SQL:" msgstr "SQL:" msgid "" "Schema downgrades via ``keystone-manage db_sync`` are no longer supported. " "Only upgrades are supported." msgstr "" "Schema downgrades via ``keystone-manage db_sync`` are no longer supported. " "Only upgrades are supported." msgid "Security Issues" msgstr "Security Issues" msgid "" "See `Project Tags `_" msgstr "" "See `Project Tags `_" msgid "" "Set the following user attributes to ``True`` or ``False`` in an API " "request. To mark a user as exempt from the PCI password lockout policy::" msgstr "" "Set the following user attributes to ``True`` or ``False`` in an API " "request. To mark a user as exempt from the PCI password lockout policy::" msgid "" "Several configuration options have been deprecated, renamed, or moved to new " "sections in the ``keystone.conf`` file." msgstr "" "Several configuration options have been deprecated, renamed, or moved to new " "sections in the ``keystone.conf`` file." msgid "" "Several features were hardened, including Fernet tokens, federation, domain " "specific configurations from database and role assignments." msgstr "" "Several features were hardened, including Fernet tokens, federation, domain " "specific configurations from database and role assignments." msgid "" "Several token issuance methods from the abstract class ``keystone.token." "providers.base.Provider`` were removed (see below) in favor of a single " "method to issue tokens (``issue_token``). If using a custom token provider, " "updated the custom provider accordingly." msgstr "" "Several token issuance methods from the abstract class ``keystone.token." "providers.base.Provider`` were removed (see below) in favour of a single " "method to issue tokens (``issue_token``). If using a custom token provider, " "updated the custom provider accordingly." msgid "" "Several token validation methods from the abstract class ``keystone.token." "providers.base.Provider`` were removed (see below) in favor of a single " "method to validate tokens (``validate_token``), that has the signature " "``validate_token(self, token_ref)``. If using a custom token provider, " "update the custom provider accordingly." msgstr "" "Several token validation methods from the abstract class ``keystone.token." "providers.base.Provider`` were removed (see below) in favour of a single " "method to validate tokens (``validate_token``), that has the signature " "``validate_token(self, token_ref)``. If using a custom token provider, " "update the custom provider accordingly." msgid "" "Since the scope information is now available in the credential dictionary, " "we can just make use of it instead. Those who have custom policies must " "update their policy files accordingly." msgstr "" "Since the scope information is now available in the credential dictionary, " "we can just make use of it instead. Those who have custom policies must " "update their policy files accordingly." msgid "Some bugs for unified limit APIs have been fixed, it includes:" msgstr "Some bugs for unified limit APIs have been fixed, it includes:" msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "" "Support for writing to LDAP has been removed. See ``Other Notes`` for more " "details." msgstr "" "Support for writing to LDAP has been removed. See ``Other Notes`` for more " "details." msgid "" "Support has now been added to send notification events on user/group " "membership. When a user is added or removed from a group a notification will " "be sent including the identifiers of both the user and the group." msgstr "" "Support has now been added to send notification events on user/group " "membership. When a user is added or removed from a group a notification will " "be sent including the identifiers of both the user and the group." msgid "" "Support was improved for out-of-tree drivers by defining stable driver " "interfaces." msgstr "" "Support was improved for out-of-tree drivers by defining stable driver " "interfaces." msgid "Tags are case sensitive" msgstr "Tags are case sensitive" msgid "" "The EC2 token middleware, deprecated in Juno, is no longer available in " "keystone. It has been moved to the keystonemiddleware package." msgstr "" "The EC2 token middleware, deprecated in Juno, is no longer available in " "Keystone. It has been moved to the keystonemiddleware package." msgid "" "The LDAP driver now also maps the user description attribute after user " "retrieval from LDAP. If this is undesired behavior for your setup, please " "add `description` to the `user_attribute_ignore` LDAP driver config setting. " "The default mapping of the description attribute is set to `description`. " "Please adjust the LDAP driver config setting `user_description_attribute` if " "your LDAP uses a different attribute name (for instance to `displayName` in " "case of an AD backed LDAP). If your `user_additional_attribute_mapping` " "setting contains `description:description` you can remove this mapping, " "since this is now the default behavior." msgstr "" "The LDAP driver now also maps the user description attribute after user " "retrieval from LDAP. If this is undesired behaviour for your setup, please " "add `description` to the `user_attribute_ignore` LDAP driver config setting. " "The default mapping of the description attribute is set to `description`. " "Please adjust the LDAP driver config setting `user_description_attribute` if " "your LDAP uses a different attribute name (for instance to `displayName` in " "case of an AD backed LDAP). If your `user_additional_attribute_mapping` " "setting contains `description:description` you can remove this mapping, " "since this is now the default behaviour." msgid "" "The MFA rules are set via the user create and update API (``POST/PATCH /v3/" "users``) call; the options allow an admin to force a user to use specific " "forms of authentication or combinations of forms of authentication to get a " "token. The rules are specified as follows::" msgstr "" "The MFA rules are set via the user create and update API (``POST/PATCH /v3/" "users``) call; the options allow an admin to force a user to use specific " "forms of authentication or combinations of forms of authentication to get a " "token. The rules are specified as follows::" msgid "" "The PKI and PKIz token format has been removed. See ``Other Notes`` for more " "details." msgstr "" "The PKI and PKIz token format has been removed. See ``Other Notes`` for more " "details." msgid "" "The V8 Federation driver interface is deprecated in favor of the V9 " "Federation driver interface. Support for the V8 Federation driver interface " "is planned to be removed in the 'O' release of OpenStack." msgstr "" "The V8 Federation driver interface is deprecated in favour of the V9 " "Federation driver interface. Support for the V8 Federation driver interface " "is planned to be removed in the 'O' release of OpenStack." msgid "" "The V8 Resource driver interface is deprecated. Support for the V8 Resource " "driver interface is planned to be removed in the 'O' release of OpenStack." msgstr "" "The V8 Resource driver interface is deprecated. Support for the V8 Resource " "driver interface is planned to be removed in the 'O' release of OpenStack." msgid "" "The XML middleware stub has been removed, so references to it must be " "removed from the ``keystone-paste.ini`` configuration file." msgstr "" "The XML middleware stub has been removed, so references to it must be " "removed from the ``keystone-paste.ini`` configuration file." msgid "" "The ``/OS-FEDERATION/projects`` and ``/OS-FEDERATION/domains`` APIs are " "deprecated in favor of the ``/v3/auth/projects`` and ``/v3/auth/domains`` " "APIs. These APIs were originally marked as deprecated during the Juno " "release cycle, but we never deprecated using ``versionutils`` from oslo. " "More information regarding this deprecation can be found in the `patch " "`_ that proposed the deprecation." msgstr "" "The ``/OS-FEDERATION/projects`` and ``/OS-FEDERATION/domains`` APIs are " "deprecated in favour of the ``/v3/auth/projects`` and ``/v3/auth/domains`` " "APIs. These APIs were originally marked as deprecated during the Juno " "release cycle, but we never deprecated using ``versionutils`` from oslo. " "More information regarding this deprecation can be found in the `patch " "`_ that proposed the deprecation." msgid "" "The ``[DEFAULT] domain_id_immutable`` configuration option has been removed " "in favor of strictly immutable domain IDs." msgstr "" "The ``[DEFAULT] domain_id_immutable`` configuration option has been removed " "in favour of strictly immutable domain IDs." msgid "" "The ``[DEFAULT] domain_id_immutable`` option has been removed. This removes " "the ability to change the ``domain_id`` attribute of users, groups, and " "projects. The behavior was introduced to allow deployers to migrate entities " "from one domain to another by updating the ``domain_id`` attribute of an " "entity. This functionality was deprecated in the Mitaka release is now " "removed." msgstr "" "The ``[DEFAULT] domain_id_immutable`` option has been removed. This removes " "the ability to change the ``domain_id`` attribute of users, groups, and " "projects. The behaviour was introduced to allow deployers to migrate " "entities from one domain to another by updating the ``domain_id`` attribute " "of an entity. This functionality was deprecated in the Mitaka release is now " "removed." msgid "" "The ``[assignment] driver`` now defaults to ``sql``. Logic to determine the " "default assignment driver if one wasn't supplied through configuration has " "been removed. Keystone only supports one assignment driver and it shouldn't " "be changed unless you're deploying a custom assignment driver." msgstr "" "The ``[assignment] driver`` now defaults to ``sql``. Logic to determine the " "default assignment driver if one wasn't supplied through configuration has " "been removed. Keystone only supports one assignment driver and it shouldn't " "be changed unless you're deploying a custom assignment driver." msgid "" "The ``[endpoint_policy] enabled`` configuration option has been removed in " "favor of always enabling the endpoint policy extension." msgstr "" "The ``[endpoint_policy] enabled`` configuration option has been removed in " "favour of always enabling the endpoint policy extension." msgid "" "The ``[os_inherit] enabled`` config option has been removed, the `OS-" "INHERIT` extension is now always enabled." msgstr "" "The ``[os_inherit] enabled`` config option has been removed, the `OS-" "INHERIT` extension is now always enabled." msgid "" "The ``[resource] driver`` now defaults to ``sql``. Logic to determine the " "default resource driver if one wasn't supplied through configuration has " "been removed. Keystone only supports one resource driver and it shouldn't be " "changed unless you're deploying a custom resource driver." msgstr "" "The ``[resource] driver`` now defaults to ``sql``. Logic to determine the " "default resource driver if one wasn't supplied through configuration has " "been removed. Keystone only supports one resource driver and it shouldn't be " "changed unless you're deploying a custom resource driver." msgid "" "The ``[security_compliance] password_expires_ignore_user_ids`` option has " "been removed. Each user that should ignore password expiry should have the " "value set to \"true\" in the user's ``options`` attribute (e.g. " "``user['options']['ignore_password_expiry'] = True``) with a user update " "call." msgstr "" "The ``[security_compliance] password_expires_ignore_user_ids`` option has " "been removed. Each user that should ignore password expiry should have the " "value set to \"true\" in the user's ``options`` attribute (e.g. " "``user['options']['ignore_password_expiry'] = True``) with a user update " "call." msgid "" "The ``compute_port`` configuration option, deprecated in Juno, is no longer " "available." msgstr "" "The ``compute_port`` configuration option, deprecated in Juno, is no longer " "available." msgid "" "The ``enabled`` config option of the ``trust`` feature is deprecated and " "will be removed in the next release. Trusts will then always be enabled." msgstr "" "The ``enabled`` config option of the ``trust`` feature is deprecated and " "will be removed in the next release. Trusts will then always be enabled." msgid "" "The ``httpd/keystone.py`` file has been removed in favor of the ``keystone-" "wsgi-admin`` and ``keystone-wsgi-public`` scripts." msgstr "" "The ``httpd/keystone.py`` file has been removed in favour of the ``keystone-" "wsgi-admin`` and ``keystone-wsgi-public`` scripts." msgid "" "The ``keystone.conf`` file now references entrypoint names for drivers. For " "example, the drivers are now specified as \"sql\", \"ldap\", \"uuid\", " "rather than the full module path. See the sample configuration file for " "other examples." msgstr "" "The ``keystone.conf`` file now references entrypoint names for drivers. For " "example, the drivers are now specified as \"sql\", \"ldap\", \"uuid\", " "rather than the full module path. See the sample configuration file for " "other examples." msgid "" "The ``keystone/service.py`` file has been removed, the logic has been moved " "to the ``keystone/version/service.py``." msgstr "" "The ``keystone/service.py`` file has been removed, the logic has been moved " "to the ``keystone/version/service.py``." msgid "" "The ``memcache`` and ``memcache_pool`` token persistence backends have been " "removed in favor of using Fernet tokens (which require no persistence)." msgstr "" "The ``memcache`` and ``memcache_pool`` token persistence backends have been " "removed in favour of using Fernet tokens (which require no persistence)." msgid "" "The ``policies`` API is deprecated. Keystone is not a policy management " "service." msgstr "" "The ``policies`` API is deprecated. Keystone is not a policy management " "service." msgid "" "The ``token`` auth method typically should not be specified in any MFA " "Rules. The ``token`` auth method will include all previous auth methods for " "the original auth request and will match the appropriate ruleset. This is " "intentional, as the ``token`` method is used for rescoping/changing active " "projects." msgstr "" "The ``token`` auth method typically should not be specified in any MFA " "Rules. The ``token`` auth method will include all previous auth methods for " "the original auth request and will match the appropriate ruleset. This is " "intentional, as the ``token`` method is used for rescoping/changing active " "projects." msgid "" "The `keystone-paste.ini` file must be updated to remove extension filters, " "and their use in ``[pipeline:api_v3]``. Remove the following filters: " "``[filter:oauth1_extension]``, ``[filter:federation_extension]``, ``[filter:" "endpoint_filter_extension]``, and ``[filter:revoke_extension]``. See the " "sample `keystone-paste.ini `_ file for guidance." msgstr "" "The `keystone-paste.ini` file must be updated to remove extension filters, " "and their use in ``[pipeline:api_v3]``. Remove the following filters: " "``[filter:oauth1_extension]``, ``[filter:federation_extension]``, ``[filter:" "endpoint_filter_extension]``, and ``[filter:revoke_extension]``. See the " "sample `keystone-paste.ini `_ file for guidance." msgid "" "The `keystone-paste.ini` file must be updated to remove extension filters, " "and their use in ``[pipeline:public_api]`` and ``[pipeline:admin_api]`` " "pipelines. Remove the following filters: ``[filter:user_crud_extension]``, " "``[filter:crud_extension]``. See the sample `keystone-paste.ini `_ file " "for guidance." msgstr "" "The `keystone-paste.ini` file must be updated to remove extension filters, " "and their use in ``[pipeline:public_api]`` and ``[pipeline:admin_api]`` " "pipelines. Remove the following filters: ``[filter:user_crud_extension]``, " "``[filter:crud_extension]``. See the sample `keystone-paste.ini `_ file " "for guidance." msgid "" "The `os_inherit` configuration option is disabled. In the future, this " "option will be removed and this portion of the API will be always enabled." msgstr "" "The `os_inherit` configuration option is disabled. In the future, this " "option will be removed and this portion of the API will be always enabled." msgid "" "The ability to validate a trust-scoped token against the v2.0 API has been " "removed, in favor of using the version 3 of the API." msgstr "" "The ability to validate a trust-scoped token against the v2.0 API has been " "removed, in favour of using the version 3 of the API." msgid "" "The admin_token method of authentication was never intended to be used for " "any purpose other than bootstrapping an install. However many deployments " "had to leave the admin_token method enabled due to restrictions on editing " "the paste file used to configure the web pipelines. To minimize the risk " "from this mechanism, the `admin_token` configuration value now defaults to a " "python `None` value. In addition, if the value is set to `None`, either " "explicitly or implicitly, the `admin_token` will not be enabled, and an " "attempt to use it will lead to a failed authentication." msgstr "" "The admin_token method of authentication was never intended to be used for " "any purpose other than bootstrapping an install. However many deployments " "had to leave the admin_token method enabled due to restrictions on editing " "the paste file used to configure the web pipelines. To minimize the risk " "from this mechanism, the `admin_token` configuration value now defaults to a " "python `None` value. In addition, if the value is set to `None`, either " "explicitly or implicitly, the `admin_token` will not be enabled, and an " "attempt to use it will lead to a failed authentication." msgid "" "The auth plugin ``keystone.auth.plugins.saml2.Saml2`` has been removed in " "favor of the auth plugin ``keystone.auth.plugins.mapped.Mapped``." msgstr "" "The auth plugin ``keystone.auth.plugins.saml2.Saml2`` has been removed in " "favour of the auth plugin ``keystone.auth.plugins.mapped.Mapped``." msgid "" "The catalog backend ``endpoint_filter.sql`` has been removed. It has been " "consolidated with the ``sql`` backend, therefore replace the " "``endpoint_filter.sql`` catalog backend with the ``sql`` backend." msgstr "" "The catalogue backend ``endpoint_filter.sql`` has been removed. It has been " "consolidated with the ``sql`` backend, therefore replace the " "``endpoint_filter.sql`` catalogue backend with the ``sql`` backend." msgid "" "The check for admin token from ``build_auth_context`` middleware has been " "removed. If your deployment requires the use of `admin token`, update " "``keystone-paste.ini`` so that ``admin_token_auth`` is before " "``build_auth_context`` in the paste pipelines, otherwise remove the " "``admin_token_auth`` middleware from ``keystone-paste.ini`` entirely." msgstr "" "The check for admin token from ``build_auth_context`` middleware has been " "removed. If your deployment requires the use of `admin token`, update " "``keystone-paste.ini`` so that ``admin_token_auth`` is before " "``build_auth_context`` in the paste pipelines, otherwise remove the " "``admin_token_auth`` middleware from ``keystone-paste.ini`` entirely." msgid "" "The config option ``rolling_upgrade_password_hash_compat`` is removed. It is " "only used for rolling-upgrade from Ocata release to Pike release." msgstr "" "The config option ``rolling_upgrade_password_hash_compat`` is removed. It is " "only used for rolling-upgrade from Ocata release to Pike release." msgid "" "The configuration options for LDAP connection pooling, `[ldap] use_pool` and " "`[ldap] use_auth_pool`, are now both enabled by default. Only deployments " "using LDAP drivers are affected. Additional configuration options are " "available in the `[ldap]` section to tune connection pool size, etc." msgstr "" "The configuration options for LDAP connection pooling, `[ldap] use_pool` and " "`[ldap] use_auth_pool`, are now both enabled by default. Only deployments " "using LDAP drivers are affected. Additional configuration options are " "available in the `[ldap]` section to tune connection pool size, etc." msgid "" "The credentials list call can now have its results filtered by credential " "type." msgstr "" "The credentials list call can now have its results filtered by credential " "type." msgid "" "The default setting for the `os_inherit` configuration option is changed to " "True. If it is required to continue with this portion of the API disabled, " "then override the default setting by explicitly specifying the os_inherit " "option as False." msgstr "" "The default setting for the `os_inherit` configuration option is changed to " "True. If it is required to continue with this portion of the API disabled, " "then override the default setting by explicitly specifying the os_inherit " "option as False." msgid "The default token provider is now Fernet." msgstr "The default token provider is now Fernet." msgid "" "The external authentication plugins ExternalDefault, ExternalDomain, " "LegacyDefaultDomain, and LegacyDomain, deprecated in Icehouse, are no longer " "available." msgstr "" "The external authentication plugins ExternalDefault, ExternalDomain, " "LegacyDefaultDomain, and LegacyDomain, deprecated in Icehouse, are no longer " "available." msgid "" "The functionality of the ``ADMIN_TOKEN`` remains, but has been incorporated " "into the main auth middleware (``keystone.middleware.auth." "AuthContextMiddleware``)." msgstr "" "The functionality of the ``ADMIN_TOKEN`` remains, but has been incorporated " "into the main auth middleware (``keystone.middleware.auth." "AuthContextMiddleware``)." msgid "" "The identity backend driver interface has changed. A new method, " "`unset_default_project_id(project_id)`, was added to unset a user's default " "project ID for a given project ID. Custom backend implementations must " "implement this method." msgstr "" "The identity backend driver interface has changed. A new method, " "`unset_default_project_id(project_id)`, was added to unset a user's default " "project ID for a given project ID. Custom backend implementations must " "implement this method." msgid "" "The identity backend driver interface has changed. We've added a new " "``change_password()`` method for self service password changes. If you have " "a custom implementation for the identity driver, you will need to implement " "this new method." msgstr "" "The identity backend driver interface has changed. We've added a new " "``change_password()`` method for self service password changes. If you have " "a custom implementation for the identity driver, you will need to implement " "this new method." msgid "" "The implementation for checking database state during an upgrade with the " "use of `keystone-manage db_sync --check` has been corrected. This allows " "users and automation to determine what step is next in a rolling upgrade " "based on logging and command status codes." msgstr "" "The implementation for checking database state during an upgrade with the " "use of `keystone-manage db_sync --check` has been corrected. This allows " "users and automation to determine what step is next in a rolling upgrade " "based on logging and command status codes." msgid "" "The list_project_ids_for_user(), list_domain_ids_for_user(), " "list_user_ids_for_project(), list_project_ids_for_groups(), " "list_domain_ids_for_groups(), list_role_ids_for_groups_on_project() and " "list_role_ids_for_groups_on_domain() methods have been removed from the V9 " "version of the Assignment driver." msgstr "" "The list_project_ids_for_user(), list_domain_ids_for_user(), " "list_user_ids_for_project(), list_project_ids_for_groups(), " "list_domain_ids_for_groups(), list_role_ids_for_groups_on_project() and " "list_role_ids_for_groups_on_domain() methods have been removed from the V9 " "version of the Assignment driver." msgid "The method signature has changed from::" msgstr "The method signature has changed from::" msgid "" "The resource backend cannot be configured to anything but SQL if the SQL " "Identity backend is being used. The resource backend must now be SQL which " "allows for the use of Foreign Keys to domains/projects wherever desired. " "This makes managing project relationships and such much more straight " "forward. The inability to configure non-SQL resource backends has been in " "Keystone since at least Ocata. This is eliminating some complexity and " "preventing the need for some really ugly back-port SQL migrations in favor " "of a better model. Resource is highly relational and should be SQL based." msgstr "" "The resource backend cannot be configured to anything but SQL if the SQL " "Identity backend is being used. The resource backend must now be SQL which " "allows for the use of Foreign Keys to domains/projects wherever desired. " "This makes managing project relationships and such much more straight " "forward. The inability to configure non-SQL resource backends has been in " "Keystone since at least Ocata. This is eliminating some complexity and " "preventing the need for some really ugly back-port SQL migrations in favour " "of a better model. Resource is highly relational and should be SQL based." msgid "" "The response's content type for creating request token or access token is " "changed to `application/x-www-form-urlencoded`, the old value `application/x-" "www-urlformencoded` is invalid and will no longer be used." msgstr "" "The response's content type for creating request token or access token is " "changed to `application/x-www-form-urlencoded`, the old value `application/x-" "www-urlformencoded` is invalid and will no longer be used." msgid "" "The rules are specified as a list of lists. The elements of the sub-lists " "must be strings and are intended to mirror the required authentication " "method names (e.g. ``password``, ``totp``, etc) as defined in the ``keystone." "conf`` file in the ``[auth] methods`` option." msgstr "" "The rules are specified as a list of lists. The elements of the sub-lists " "must be strings and are intended to mirror the required authentication " "method names (e.g. ``password``, ``totp``, etc) as defined in the ``keystone." "conf`` file in the ``[auth] methods`` option." msgid "" "The token provider API has removed the ``needs_persistence`` property from " "the abstract interface. Token providers are expected to handle persistence " "requirement if needed. This will require out-of-tree token providers to " "remove the unused property and handle token storage." msgstr "" "The token provider API has removed the ``needs_persistence`` property from " "the abstract interface. Token providers are expected to handle persistence " "requirement if needed. This will require out-of-tree token providers to " "remove the unused property and handle token storage." msgid "" "The token_formatter utility class has been moved from under fernet to the " "default token directory. This is to allow for the reuse of functionality " "with other token providers. Any deployments that are specifically using the " "fernet utils may be affected and will need to adjust accordingly." msgstr "" "The token_formatter utility class has been moved from under fernet to the " "default token directory. This is to allow for the reuse of functionality " "with other token providers. Any deployments that are specifically using the " "fernet utils may be affected and will need to adjust accordingly." msgid "" "The trusts table now has an expires_at_int column that represents the " "expiration time as an integer instead of a datetime object. This will " "prevent rounding errors related to the way date objects are stored in some " "versions of MySQL. The expires_at column remains, but will be dropped in " "Rocky." msgstr "" "The trusts table now has an expires_at_int column that represents the " "expiration time as an integer instead of a datetime object. This will " "prevent rounding errors related to the way date objects are stored in some " "versions of MySQL. The expires_at column remains, but will be dropped in " "Rocky." msgid "" "The use of `sha512_crypt` is considered inadequate for password hashing in " "an application like Keystone. The use of bcrypt or scrypt is recommended to " "ensure protection against password cracking utilities if the hashes are " "exposed. This is due to Time-Complexity requirements for computing the " "hashes in light of modern hardware (CPU, GPU, ASIC, FPGA, etc). Keystone has " "moved to bcrypt as a default and no longer hashes new passwords (and " "password changes) with sha512_crypt. It is recommended passwords be changed " "after upgrade to Pike. The risk of password hash exposure is limited, but " "for the best possible protection against cracking the hash it is recommended " "passwords be changed after upgrade. The password change will then result in " "a more secure hash (bcrypt by default) being used to store the password in " "the DB." msgstr "" "The use of `sha512_crypt` is considered inadequate for password hashing in " "an application like Keystone. The use of bcrypt or scrypt is recommended to " "ensure protection against password cracking utilities if the hashes are " "exposed. This is due to Time-Complexity requirements for computing the " "hashes in light of modern hardware (CPU, GPU, ASIC, FPGA, etc). Keystone has " "moved to bcrypt as a default and no longer hashes new passwords (and " "password changes) with sha512_crypt. It is recommended passwords be changed " "after upgrade to Pike. The risk of password hash exposure is limited, but " "for the best possible protection against cracking the hash it is recommended " "passwords be changed after upgrade. The password change will then result in " "a more secure hash (bcrypt by default) being used to store the password in " "the DB." msgid "" "The use of admin_token filter is insecure compared to the use of a proper " "username/password. Historically the admin_token filter has been left enabled " "in Keystone after initialization due to the way CMS systems work. Moving to " "an out-of-band initialization using ``keystone-manage bootstrap`` will " "eliminate the security concerns around a static shared string that conveys " "admin access to keystone and therefore to the entire installation." msgstr "" "The use of admin_token filter is insecure compared to the use of a proper " "username/password. Historically the admin_token filter has been left enabled " "in Keystone after initialisation due to the way CMS systems work. Moving to " "an out-of-band initialisation using ``keystone-manage bootstrap`` will " "eliminate the security concerns around a static shared string that conveys " "admin access to Keystone and therefore to the entire installation." msgid "" "Third-party extensions that extend the abstract class " "(``ShadowUsersDriverBase``) should be updated according to the new parameter " "names." msgstr "" "Third-party extensions that extend the abstract class " "(``ShadowUsersDriverBase``) should be updated according to the new parameter " "names." msgid "" "This release adds support for Application Credentials, a new way to allow " "applications and automated tooling to authenticate with keystone. Rather " "than storing a username and password in an application's config file, which " "can pose security risks, you can now create an application credential to " "allow an application to authenticate and acquire a preset scope and role " "assignments. This is especially useful for LDAP and federated users, who can " "now delegate their cloud management tasks to a keystone-specific resource, " "rather than share their externally managed credentials with keystone and " "risk a compromise of those external systems. Users can delegate a subset of " "their role assignments to an application credential, allowing them to " "strategically limit their application's access to the minimum needed. Unlike " "passwords, a user can have more than one active application credential, " "which means they can be rotated without causing downtime for the " "applications using them." msgstr "" "This release adds support for Application Credentials, a new way to allow " "applications and automated tooling to authenticate with keystone. Rather " "than storing a username and password in an application's config file, which " "can pose security risks, you can now create an application credential to " "allow an application to authenticate and acquire a preset scope and role " "assignments. This is especially useful for LDAP and federated users, who can " "now delegate their cloud management tasks to a keystone-specific resource, " "rather than share their externally managed credentials with keystone and " "risk a compromise of those external systems. Users can delegate a subset of " "their role assignments to an application credential, allowing them to " "strategically limit their application's access to the minimum needed. Unlike " "passwords, a user can have more than one active application credential, " "which means they can be rotated without causing downtime for the " "applications using them." msgid "To mark a user as exempt from the PCI password expiry policy::" msgstr "To mark a user as exempt from the PCI password expiry policy::" msgid "To mark a user as exempt from the PCI reset policy::" msgstr "To mark a user as exempt from the PCI reset policy::" msgid "To mark a user exempt from the MFA Rules::" msgstr "To mark a user exempt from the MFA Rules::" msgid "To the properly written::" msgstr "To the properly written::" msgid "To::" msgstr "To::" msgid "" "Token persistence driver/code (SQL) is deprecated with this patch since it " "is only used by the UUID token provider.." msgstr "" "Token persistence driver/code (SQL) is deprecated with this patch since it " "is only used by the UUID token provider.." msgid "Tokens can now be cached when issued." msgstr "Tokens can now be cached when issued." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "" "UUID token provider ``[token] provider=uuid`` has been deprecated in favor " "of Fernet tokens ``[token] provider=fernet``. With Fernet tokens becoming " "the default UUID tokens can be slated for removal in the R release. This " "also deprecates token-bind support as it was never implemented for fernet." msgstr "" "UUID token provider ``[token] provider=uuid`` has been deprecated in favour " "of Fernet tokens ``[token] provider=fernet``. With Fernet tokens becoming " "the default UUID tokens can be slated for removal in the R release. This " "also deprecates token-bind support as it was never implemented for fernet." msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Use of ``$(tenant_id)s`` in the catalog endpoints is deprecated in favor of " "``$(project_id)s``." msgstr "" "Use of ``$(tenant_id)s`` in the catalogue endpoints is deprecated in favour " "of ``$(project_id)s``." msgid "" "Using LDAP as the resource backend, i.e for projects and domains, is now " "deprecated and will be removed in the Mitaka release." msgstr "" "Using LDAP as the resource backend, i.e for projects and domains, is now " "deprecated and will be removed in the Mitaka release." msgid "" "Using the full path to the driver class is deprecated in favor of using the " "entrypoint. In the Mitaka release, the entrypoint must be used." msgstr "" "Using the full path to the driver class is deprecated in favour of using the " "entrypoint. In the Mitaka release, the entrypoint must be used." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" msgid "Wallaby Series Release Notes" msgstr "Wallaby Series Release Notes" msgid "" "We have added the ``password_expires_at`` attribute to the user response " "object." msgstr "" "We have added the ``password_expires_at`` attribute to the user response " "object." msgid "" "We now expose entrypoints for the ``keystone-manage`` command instead of a " "file." msgstr "" "We now expose entrypoints for the ``keystone-manage`` command instead of a " "file." msgid "" "Write support for the LDAP has been removed in favor of read-only support. " "The following operations are no longer supported for LDAP:" msgstr "" "Write support for the LDAP has been removed in favour of read-only support. " "The following operations are no longer supported for LDAP:" msgid "Xena Series Release Notes" msgstr "Xena Series Release Notes" msgid "Yoga Series Release Notes" msgstr "Yoga Series Release Notes" msgid "" "[`Bug 1645487 `_] Added a " "new PCI-DSS feature that will require users to immediately change their " "password upon first use for new users and after an administrative password " "reset. The new feature can be enabled by setting [security_compliance] " "``change_password_upon_first_use`` to ``True``." msgstr "" "[`Bug 1645487 `_] Added a " "new PCI-DSS feature that will require users to immediately change their " "password upon first use for new users and after an administrative password " "reset. The new feature can be enabled by setting [security_compliance] " "``change_password_upon_first_use`` to ``True``." msgid "" "[`Bug 1649446 `_] The " "default policy for listing revocation events has changed. Previously, any " "authenticated user could list revocation events; it is now, by default, an " "admin or service user only function. This can be changed by modifying the " "policy file being used by keystone." msgstr "" "[`Bug 1649446 `_] The " "default policy for listing revocation events has changed. Previously, any " "authenticated user could list revocation events; it is now, by default, an " "admin or service user only function. This can be changed by modifying the " "policy file being used by Keystone." msgid "" "[`Related to Bug 1649446 `_] The ``identity:list_revoke_events`` rule has been changed " "in both sample policy files, ``policy.json`` and ``policy.v3cloudsample." "json``. From::" msgstr "" "[`Related to Bug 1649446 `_] The ``identity:list_revoke_events`` rule has been changed " "in both sample policy files, ``policy.json`` and ``policy.v3cloudsample." "json``. From::" msgid "" "[`blueprint allow-expired `_] An `allow_expired` flag is added to the token validation " "call (``GET/HEAD /v3/auth/tokens``) that allows fetching a token that has " "expired. This allows for validating tokens in long running operations." msgstr "" "[`blueprint allow-expired `_] An `allow_expired` flag is added to the token validation " "call (``GET/HEAD /v3/auth/tokens``) that allows fetching a token that has " "expired. This allows for validating tokens in long running operations." msgid "" "[`blueprint allow-expired `_] To allow long running operations to complete services must " "be able to fetch expired tokens via the ``allow_expired`` flag. The length " "of time a token is retrievable for beyond its traditional expiry is managed " "by the ``[token] allow_expired_window`` option and so the data must be " "retrievable for this about of time. When using fernet tokens this means that " "the key rotation period must exceed this time so that older tokens are still " "decrytable. Ensure that you do not rotate fernet keys faster than ``[token] " "expiration`` + ``[token] allow_expired_window`` seconds." msgstr "" "[`blueprint allow-expired `_] To allow long running operations to complete services must " "be able to fetch expired tokens via the ``allow_expired`` flag. The length " "of time a token is retrievable for beyond its traditional expiry is managed " "by the ``[token] allow_expired_window`` option and so the data must be " "retrievable for this about of time. When using fernet tokens this means that " "the key rotation period must exceed this time so that older tokens are still " "decrytable. Ensure that you do not rotate fernet keys faster than ``[token] " "expiration`` + ``[token] allow_expired_window`` seconds." msgid "" "[`blueprint application-credentials `_] Users can now create Application " "Credentials, a new keystone resource that can provide an application with " "the means to get a token from keystone with a preset scope and role " "assignments. To authenticate with an application credential, an application " "can use the normal token API with the 'application_credential' auth method." msgstr "" "[`blueprint application-credentials `_] Users can now create Application " "Credentials, a new keystone resource that can provide an application with " "the means to get a token from keystone with a preset scope and role " "assignments. To authenticate with an application credential, an application " "can use the normal token API with the 'application_credential' auth method." msgid "" "[`blueprint bootstrap `_] keystone-manage now supports the bootstrap command on the CLI " "so that a keystone install can be initialized without the need of the " "admin_token filter in the paste-ini." msgstr "" "[`blueprint bootstrap `_] keystone-manage now supports the bootstrap command on the CLI " "so that a keystone install can be initialised without the need of the " "admin_token filter in the paste-ini." msgid "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the PKI " "and PKIz token formats have been deprecated. They will be removed in the 'O' " "release. Due to this change, the `hash_algorithm` option in the `[token]` " "section of the configuration file has also been deprecated. Also due to this " "change, the ``keystone-manage pki_setup`` command has been deprecated as " "well." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the PKI " "and PKIz token formats have been deprecated. They will be removed in the 'O' " "release. Due to this change, the `hash_algorithm` option in the `[token]` " "section of the configuration file has also been deprecated. Also due to this " "change, the ``keystone-manage pki_setup`` command has been deprecated as " "well." msgid "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the " "auth plugin `keystone.auth.plugins.saml2.Saml2` has been deprecated. It is " "recommended to use `keystone.auth.plugins.mapped.Mapped` instead. The " "``saml2`` plugin will be removed in the 'O' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the " "auth plugin `keystone.auth.plugins.saml2.Saml2` has been deprecated. It is " "recommended to use `keystone.auth.plugins.mapped.Mapped` instead. The " "``saml2`` plugin will be removed in the 'O' release." msgid "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the " "simple_cert_extension is deprecated since it is only used in support of the " "PKI and PKIz token formats. It will be removed in the 'O' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the " "simple_cert_extension is deprecated since it is only used in support of the " "PKI and PKIz token formats. It will be removed in the 'O' release." msgid "" "[`bug 1748970 `_] A bug " "was introduced in Queens that resulted in system role assignments being " "returned when querying the role assignments API for a specific role. The " "issue is fixed and the list of roles returned from ``GET /v3/" "role_assignments?role.id={role_id}`` respects system role assignments." msgstr "" "[`bug 1748970 `_] A bug " "was introduced in Queens that resulted in system role assignments being " "returned when querying the role assignments API for a specific role. The " "issue is fixed and the list of roles returned from ``GET /v3/" "role_assignments?role.id={role_id}`` respects system role assignments." msgid "" "[`bug 1749264 `_] A user's " "system role assignment will be removed when the user is deleted." msgstr "" "[`bug 1749264 `_] A user's " "system role assignment will be removed when the user is deleted." msgid "" "[`bug 1749267 `_] A " "group's system role assignments are removed when the group is deleted." msgstr "" "[`bug 1749267 `_] A " "group's system role assignments are removed when the group is deleted." msgid "" "[`bug 1755874 `_] Users " "now can have the resource option ``lock_password`` set which prevents the " "user from utilizing the self-service password change API. Valid values are " "``True``, ``False``, or \"None\" (where ``None`` clears the option)." msgstr "" "[`bug 1755874 `_] Users " "now can have the resource option ``lock_password`` set which prevents the " "user from utilizing the self-service password change API. Valid values are " "``True``, ``False``, or \"None\" (where ``None`` clears the option)." msgid "" "[`bug 1756190 `_] When " "filtering projects based on tags, the filtering will now be performed by " "matching a subset containing the given tags against projects, rather than " "exact matching. Providing more tags when performing a search will yield more " "exact results while less will return any projects that match the given tags " "but could contain other tags as well." msgstr "" "[`bug 1756190 `_] When " "filtering projects based on tags, the filtering will now be performed by " "matching a subset containing the given tags against projects, rather than " "exact matching. Providing more tags when performing a search will yield more " "exact results while less will return any projects that match the given tags " "but could contain other tags as well." msgid "" "[`bug 1757022 `_] In " "previous releases, ``keystone-manage mapping_purge --type {user,group}`` " "command would purge all mapping incorrectly instead of only purging the " "specified type mappings. ``keystone-manage mapping_purge --type {user,group}" "`` now purges only specified type mappings as expected." msgstr "" "[`bug 1757022 `_] In " "previous releases, ``keystone-manage mapping_purge --type {user,group}`` " "command would purge all mapping incorrectly instead of only purging the " "specified type mappings. ``keystone-manage mapping_purge --type {user,group}" "`` now purges only specified type mappings as expected." msgid "" "[`bug 1759289 `_] The " "``keystone-manage token_flush`` command no longer establishes a connection " "to a database, or persistence backend. It's usage should be removed if " "you're using a supported non-persistent token format. If you're relying on " "external token providers that write tokens to disk and would like to " "maintain this functionality, please consider porting it to a separate tool." msgstr "" "[`bug 1759289 `_] The " "``keystone-manage token_flush`` command no longer establishes a connection " "to a database, or persistence backend. It's usage should be removed if " "you're using a supported non-persistent token format. If you're relying on " "external token providers that write tokens to disk and would like to " "maintain this functionality, please consider porting it to a separate tool." msgid "" "[`bug 1760205 `_] When " "deleting a shadow user, the related cache info is not invalidated so that " "Keystone will raise 404 UserNotFound error when authenticating with the " "previous federation info. This bug has been fixed now." msgstr "" "[`bug 1760205 `_] When " "deleting a shadow user, the related cache info is not invalidated so that " "Keystone will raise 404 UserNotFound error when authenticating with the " "previous federation info. This bug has been fixed now." msgid "``delete group``" msgstr "``delete group``" msgid "``delete user``" msgstr "``delete user``" msgid "``identity:update_domain``" msgstr "``identity:update_domain``" msgid "``identity:update_limit``" msgstr "``identity:update_limit``" msgid "``identity:update_registered_limit``" msgstr "``identity:update_registered_limit``" msgid "``identtity:delete_domain``" msgstr "``identtity:delete_domain``" msgid "``issue_v2_token``" msgstr "``issue_v2_token``" msgid "``issue_v3_token``" msgstr "``issue_v3_token``" msgid "" "``keystone-manage db_sync`` will no longer create the Default domain. This " "domain is used as the domain for any users created using the legacy v2.0 " "API. A default domain is created by ``keystone-manage bootstrap`` and when a " "user or project is created using the legacy v2.0 API." msgstr "" "``keystone-manage db_sync`` will no longer create the Default domain. This " "domain is used as the domain for any users created using the legacy v2.0 " "API. A default domain is created by ``keystone-manage bootstrap`` and when a " "user or project is created using the legacy v2.0 API." msgid "``keystone.common.kvs.backends.inmemdb.MemoryBackend``" msgstr "``keystone.common.kvs.backends.inmemdb.MemoryBackend``" msgid "``keystone.common.kvs.backends.memcached.MemcachedBackend``" msgstr "``keystone.common.kvs.backends.memcached.MemcachedBackend``" msgid "``keystone.token.persistence.backends.kvs.Token``" msgstr "``keystone.token.persistence.backends.kvs.Token``" msgid "``keystone/common/cache/backends/memcache_pool``" msgstr "``keystone/common/cache/backends/memcache_pool``" msgid "``keystone/common/cache/backends/mongo``" msgstr "``keystone/common/cache/backends/mongo``" msgid "``keystone/common/cache/backends/noop``" msgstr "``keystone/common/cache/backends/noop``" msgid "``keystone/contrib/admin_crud``" msgstr "``keystone/contrib/admin_crud``" msgid "``keystone/contrib/endpoint_filter``" msgstr "``keystone/contrib/endpoint_filter``" msgid "``keystone/contrib/federation``" msgstr "``keystone/contrib/federation``" msgid "``keystone/contrib/oauth1``" msgstr "``keystone/contrib/oauth1``" msgid "``keystone/contrib/revoke``" msgstr "``keystone/contrib/revoke``" msgid "``keystone/contrib/simple_cert``" msgstr "``keystone/contrib/simple_cert``" msgid "``keystone/contrib/user_crud``" msgstr "``keystone/contrib/user_crud``" msgid "" "``openstack_user_domain`` and ``openstack_project_domain`` attributes were " "added to SAML assertion in order to map user and project domains, " "respectively." msgstr "" "``openstack_user_domain`` and ``openstack_project_domain`` attributes were " "added to SAML assertion in order to map user and project domains, " "respectively." msgid "``pool_connection_get_timeout``" msgstr "``pool_connection_get_timeout``" msgid "``pool_maxsize``" msgstr "``pool_maxsize``" msgid "``pool_unused_timeout``" msgstr "``pool_unused_timeout``" msgid "``remove user from group``" msgstr "``remove user from group``" msgid "``update group``" msgstr "``update group``" msgid "``update user``" msgstr "``update user``" msgid "``validate_non_persistent_token``" msgstr "``validate_non_persistent_token``" msgid "``validate_v2_token``" msgstr "``validate_v2_token``" msgid "``validate_v3_token``" msgstr "``validate_v3_token``" msgid "all config options under ``[kvs]`` in `keystone.conf`" msgstr "all config options under ``[kvs]`` in `keystone.conf`" msgid "and will return a list of mappings for a given domain ID." msgstr "and will return a list of mappings for a given domain ID." msgid "" "delete from local_user where user_id in (select user_id from federated_user);" msgstr "" "delete from local_user where user_id in (select user_id from federated_user);" msgid "" "delete from system_assignment where role_id not in (select id from role);" msgstr "" "delete from system_assignment where role_id not in (select id from role);" msgid "eq - password expires at the timestamp" msgstr "eq - password expires at the timestamp" msgid "gt - password expires after the timestamp" msgstr "gt - password expires after the timestamp" msgid "gte - password expires at or after the timestamp" msgstr "gte - password expires at or after the timestamp" msgid "" "https://specs.openstack.org/openstack/keystone-specs/specs/keystone/train/" "explicit-domains-ids.html" msgstr "" "https://specs.openstack.org/openstack/keystone-specs/specs/keystone/train/" "explicit-domains-ids.html" msgid "lt - password expires before the timestamp" msgstr "lt - password expires before the timestamp" msgid "lte - password expires at or before timestamp" msgstr "lte - password expires at or before timestamp" msgid "" "mysqldump -h -p -P -u keystone keystone " "federated_user local_user user > user_tables.sql" msgstr "" "mysqldump -h -p -P -u keystone keystone " "federated_user local_user user > user_tables.sql" msgid "neq - password expires not at the timestamp" msgstr "neq - password expires not at the timestamp" msgid "" "stats_monitoring and stats_reporting paste filters have been removed, so " "references to it must be removed from the ``keystone-paste.ini`` " "configuration file." msgstr "" "stats_monitoring and stats_reporting paste filters have been removed, so " "references to it must be removed from the ``keystone-paste.ini`` " "configuration file." msgid "the config option ``[memcached] servers`` in `keystone.conf`" msgstr "the config option ``[memcached] servers`` in `keystone.conf`" msgid "to::" msgstr "to::" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4301152 keystone-26.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000021322 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000023107 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000525000000000000026142 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Keystone Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-05-16 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 05:03+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "10.0.0" msgstr "10.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "8.1.0" msgstr "8.1.0" msgid "9.0.0" msgstr "9.0.0" msgid "9.2.0" msgstr "9.2.0" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Critical Issues" msgstr "Erreurs critiques" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Keystone Release Notes" msgstr "Note de release de Keystone" msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "Mitaka Series Release Notes" msgstr "Note de release pour Mitaka" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Other Notes" msgstr "Autres notes" msgid "Security Issues" msgstr "Problèmes de sécurités" msgid "Upgrade Notes" msgstr "Notes de mises à jours" msgid "``add user to group``" msgstr "``add user to group``" msgid "``create group``" msgstr "``create group``" msgid "``create user``" msgstr "``create user``" msgid "``delete group``" msgstr "``delete group``" msgid "``delete user``" msgstr "``delete user``" msgid "``keystone/common/cache/backends/memcache_pool``" msgstr "``keystone/common/cache/backends/memcache_pool``" msgid "``keystone/common/cache/backends/mongo``" msgstr "``keystone/common/cache/backends/mongo``" msgid "``keystone/common/cache/backends/noop``" msgstr "``keystone/common/cache/backends/noop``" msgid "``keystone/contrib/admin_crud``" msgstr "``keystone/contrib/admin_crud``" msgid "``keystone/contrib/endpoint_filter``" msgstr "``keystone/contrib/endpoint_filter``" msgid "``keystone/contrib/federation``" msgstr "``keystone/contrib/federation``" msgid "``keystone/contrib/oauth1``" msgstr "``keystone/contrib/oauth1``" msgid "``keystone/contrib/revoke``" msgstr "``keystone/contrib/revoke``" msgid "``keystone/contrib/simple_cert``" msgstr "``keystone/contrib/simple_cert``" msgid "``keystone/contrib/user_crud``" msgstr "``keystone/contrib/user_crud``" msgid "``remove user from group``" msgstr "``remove user from group``" msgid "``update group``" msgstr "``update group``" msgid "``update user``" msgstr "``update user``" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4301152 keystone-26.0.0/releasenotes/source/locale/ja/0000775000175000017500000000000000000000000021305 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/releasenotes/source/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000023072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000664000175000017500000060217300000000000026134 0ustar00zuulzuul00000000000000# Shu Muto , 2017. #zanata # Shu Muto , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: Keystone Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-03-04 19:10+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-13 05:02+0000\n" "Last-Translator: Shu Muto \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "'/' and ',' are not allowed to be in a tag" msgstr "'/' と ',' は、タグでは使用できません。" msgid "" "**Experimental** - Domain specific configuration options can be stored in " "SQL instead of configuration files, using the new REST APIs." msgstr "" "** 実験的 ** - ドメイン固有の設定オプションは、新しいREST APIを使用して、設定" "ファイルの代わりに SQL で保存することができます。" msgid "" "**Experimental** - Keystone now supports tokenless authorization with X.509 " "SSL client certificate." msgstr "" "** 実験的 ** - Keystoneは X.509 SSL クライアント証明書でトークンレス認証をサ" "ポートするようになりました。" msgid "10.0.0" msgstr "10.0.0" msgid "10.0.1" msgstr "10.0.1" msgid "10.0.3" msgstr "10.0.3" msgid "11.0.0" msgstr "11.0.0" msgid "11.0.1" msgstr "11.0.1" msgid "11.0.3" msgstr "11.0.3" msgid "12.0.0" msgstr "12.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "8.1.0" msgstr "8.1.0" msgid "9.0.0" msgstr "9.0.0" msgid "9.2.0" msgstr "9.2.0" msgid "" "A new ``secure_proxy_ssl_header`` configuration option is available when " "running keystone behind a proxy." msgstr "" "新しい ``secure_proxy_ssl_header`` 設定オプションは、プロキシの背後で " "Keystone を実行するときに利用できます。" msgid "" "A new config option, `insecure_debug`, is added to control whether debug " "information is returned to clients. This used to be controlled by the " "`debug` option. If you'd like to return extra information to clients set the " "value to ``true``. This extra information may help an attacker." msgstr "" "新しい設定オプション `insecure_debug` が追加され、デバッグ情報をクライアント" "に返すか制御できます。 これは `debug` オプションによって制御されていました。 " "追加情報をクライアントに返したい場合は、値を ``true`` に設定してください。 こ" "の追加情報は、攻撃者を助けるかもしれません。" msgid "" "Add ``cache_on_issue`` flag to ``[token]`` section that enables placing " "issued tokens to validation cache thus reducing the first validation time as " "if token is already validated and token data cached." msgstr "" "発行されたトークンを検証キャッシュに置くことを可能にする ``cache_on_issue``フ" "ラグが ``[token]`` セクションに追加され、トークンがすでに検証され、これは、" "トークンがすでに検証され、トークンデータがキャッシュされているかのように、最" "初の検証時間を短縮します。" msgid "" "Add ``keystone-manage mapping_populate`` command, which should be used when " "domain-specific LDAP backend is used." msgstr "" "ドメイン固有の LDAP バックエンドが使用されるときに使用される ``keystone-" "manage mapping_populate`` コマンドを追加しました。" msgid "" "Add ``keystone-manage mapping_populate`` command. This command will pre-" "populate a mapping table with all users from LDAP, in order to improve " "future query performance. It should be used when an LDAP is first " "configured, or after calling ``keystone-manage mapping_purge``, before any " "queries related to the domain are made. For more information see ``keystone-" "manage mapping_populate --help``" msgstr "" "``keystone-manage mapping_populate`` コマンドを追加しました。 このコマンド" "は、以降のクエリーのパフォーマンスを向上させるために、LDAP のすべてのユーザー" "をマッピングテーブルをにあらかじめ入力します。 これは、 LDAP が最初に設定され" "たとき、あるいは ``keystone-manage mapping_purge`` を呼び出した後に、ドメイン" "に関連するクエリーが作成される前に使用されるべきです。 詳細は ``keystone-" "manage mapping_populate --help`` を参照してください。" msgid "" "Added an option ``--check`` to ``keystone-manage db_sync``, the option will " "allow a user to check the status of rolling upgrades in the database." msgstr "" "``keystone-manage db_sync`` にオプション ``--check`` を追加すると、ユーザーは" "データベース内のローリングアップグレードの状態を確認することができます。" msgid "" "Adjust configuration tools as necessary, see the ``fixes`` section for more " "details on this change." msgstr "" "必要に応じて設定ツールを調整してください。この変更の詳細については、 ``バグ修" "正`` のセクションを参照してください。" msgid "" "Any auth methods that are not defined in ``keystone.conf`` in the ``[auth] " "methods`` option are ignored when the rules are processed. Empty rules are " "not allowed. If a rule is empty due to no-valid auth methods existing within " "it, the rule is discarded at authentication time. If there are no rules or " "no valid rules for the user, authentication occurs in the default manner: " "any single configured auth method is sufficient to receive a token." msgstr "" "``[auth] methods`` オプションの ``keystone.conf`` で定義されていない認証方法" "は、ルールの処理時に無視されます。空のルールは設定できません。ルール内に無効" "な認証方法が存在するためにルールが空の場合、ルールは認証時に破棄されます。 " "ユーザーにルールがない場合、またはユーザーに対して有効なルールがない場合、認" "証はデフォルトの方法で行われます。一つの認証方法が設定されていいれば、トーク" "ンを受信するのに十分です。" msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "Certain deprecated methods from the assignment manager were removed in favor " "of the same methods in the [resource] and [role] manager." msgstr "" "割り当て管理で廃止された特定のメソッドが削除され、[リソース]と[ロール]管理の" "同じメソッドが使用されました。" msgid "" "Certain variables in ``keystone.conf`` now have options, which determine if " "the user's setting is valid." msgstr "" "``keystone.conf`` の特定の変数には、ユーザーの設定が有効かどうかを判断するオ" "プションがあります。" msgid "Configuring per-Identity Provider WebSSO is now supported." msgstr "認証プロバイダー WebSSO 毎の設定がサポートされました。" msgid "Critical Issues" msgstr "致命的な問題" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "" "Domain name information can now be used in policy rules with the attribute " "``domain_name``." msgstr "" "ドメイン名情報は、``domain_name`` 属性を持つポリシールールで使用できるように" "なりました。" msgid "" "Domains are now represented as top level projects with the attribute " "`is_domain` set to true. Such projects will appear as parents for any " "previous top level projects. Projects acting as domains can be created, " "read, updated, and deleted via either the project API or the domain API (V3 " "only)." msgstr "" "ドメインは、 is_domain 属性が true に設定されたトップレベルのプロジェクトとし" "て表されます。 そのようなプロジェクトは、以前のトップレベルプロジェクトの親と" "して表示されます。 ドメインとして機能するプロジェクトは、プロジェクト API ま" "たはドメイン API (V3 のみ)を使用して作成、読み取り、更新、および削除ができ" "ます。" msgid "" "Each list of methods specifies a rule. If the auth methods provided by a " "user match (or exceed) the auth methods in the list, that rule is used. The " "first rule found (rules will not be processed in a specific order) that " "matches will be used. If a user has the ruleset defined as ``[[\"password\", " "\"totp\"]]`` the user must provide both password and totp auth methods (and " "both methods must succeed) to receive a token. However, if a user has a " "ruleset defined as ``[[\"password\"], [\"password\", \"totp\"]]`` the user " "may use the ``password`` method on it's own but would be required to use " "both ``password`` and ``totp`` if ``totp`` is specified at all." msgstr "" "メソッドの各リストはルールを指定します。 ユーザーが提供する認証方法がリスト内" "の認証方法と一致(または複数が一致)した場合、そのルールが使用されます。 見つ" "かった最初のルール(ルールは特定の順序で処理されません)が使用されます。 ユー" "ザーがルールセットを ``[[\"password\",\"totp\"]]`` として定義している場合、" "ユーザーはトークンを受け取るためには password と totp の両方の認証方法を提供" "しなければなりません。 しかし、ユーザーが ``[[\"password\"],[\"password\"," "\"totp\"]]`` として定義されたルールセットを持っている場合、ユーザーは自身の " "``password``メソッドを使うことができますが、 ``totp`` が指定されていれば " "``password``と ``totp`` の両方を使います。" msgid "Each project can have up to 100 tags" msgstr "各プロジェクトは、タグを 100 個まで使えます。" msgid "Each tag can be up to 255 characters" msgstr "各タグは、255 文字まで使えます。" msgid "" "Features that were \"extensions\" in previous releases (OAuth delegation, " "Federated Identity support, Endpoint Policy, etc) are now enabled by default." msgstr "" "以前のリリースで「拡張機能」だった機能(OAuth 委任、統合認証サポート、エンド" "ポイントポリシーなど)は、デフォルトで有効になりました。" msgid "" "Fixes a bug related to the password create date. If you deployed master " "during Newton development, the password create date may be reset. This would " "only be apparent if you have security compliance features enabled." msgstr "" "パスワード作成日時に関連するバグが修正されました。Newton 開発の間にマスターを" "デプロイした場合は、パスワード作成日時がリセットされます。これは、セキュリ" "ティ保証機能を有効にしている場合に実施されます。" msgid "" "For additional details see: `event notifications `_" msgstr "" "追加の詳細は、`event notifications `_ を参照してください。" msgid "" "If PCI support is enabled, via the ``[security_compliance]`` configuration " "options, then the ``password_expires_at`` field will be populated with a " "timestamp. Otherwise, it will default to ``null``, indicating the password " "does not expire." msgstr "" "``[security_compliance]`` 設定オプションによってPCI サポートを有効にしている" "場合に、``password_expires_at`` 項目にタイムスタンプが入力されます。そうでな" "ければ、デフォルトで ``null`` になり、パスワードの有効期限が切れないことを意" "味します。" msgid "" "If a password does not meet the specified criteria. See " "``[security_compliance] password_regex``." msgstr "" "パスワードが指定された基準を満たしていない場合、 ``[security_compliance] " "password_regex`` を参照してください。" msgid "" "If a user attempts to change their password too often. See " "``[security_compliance] minimum_password_age``." msgstr "" "ユーザーが頻繁にパスワードを変更しようとする場合、 ``[security_compliance] " "minimum_password_age`` を参照してください。" msgid "" "If a user does not change their passwords at least once every X days. See " "``[security_compliance] password_expires_days``." msgstr "" "ユーザーが少なくとも X 日ごとにパスワードを変更しない場合、 " "``[security_compliance] password_expires_days`` を参照してください。" msgid "" "If a user is locked out after many failed authentication attempts. See " "``[security_compliance] lockout_failure_attempts``." msgstr "" "認証に複数回失敗してユーザーがロックアウトされた場合、 " "``[security_compliance] lockout_failure_attempts`` を参照してください。" msgid "" "If a user submits a new password that was recently used. See " "``[security_compliance] unique_last_password_count``." msgstr "" "ユーザーが新しいパスワードに最近使用されたものを送信した場合、 " "``[security_compliance] unique_last_password_count`` を参照してください。" msgid "" "If performing rolling upgrades, set `[identity] " "rolling_upgrade_password_hash_compat` to `True`. This will instruct keystone " "to continue to hash passwords in a manner that older (pre Pike release) " "keystones can still verify passwords. Once all upgrades are complete, ensure " "this option is set back to `False`." msgstr "" "ローリングアップグレードを実行する場合、 `[identity] " "rolling_upgrade_password_hash_compat` を ` True` に設定してください。 これに" "より、 Pike リリース以前の Keystone が引き続きパスワードを検証できるように、 " "Keystone がパスワードをハッシュし続けるように指示されます。すべてのアップグ" "レードが完了したら、このオプションが `False` に設定されていることを確認してく" "ださい。" msgid "" "In ``keystone-paste.ini``, using ``paste.filter_factory`` is deprecated in " "favor of the \"use\" directive, specifying an entrypoint." msgstr "" "``keystone-paste.ini`` では、 ``paste.filter_factory`` の使用は廃止され、 " "\"use\" ディレクティブでエントリーポイントを指定します。" msgid "" "In the [resource] and [role] sections of the ``keystone.conf`` file, not " "specifying the driver and using the assignment driver is deprecated. In the " "Mitaka release, the resource and role drivers will default to the SQL driver." msgstr "" "``keystone.conf`` ファイルの [resource] と [role] セクションでは、ドライバー" "を指定せずに割り当てドライバーの使用することは推奨されていません。 Mitaka リ" "リースでは、リソースとロールのドライバーはデフォルトで SQL ドライバーになりま" "す。" msgid "" "In the case a user should be exempt from MFA Rules, regardless if they are " "set, the User-Option ``multi_factor_auth_enabled`` may be set to ``False`` " "for that user via the user create and update API (``POST/PATCH /v3/users``) " "call. If this option is set to ``False`` the MFA rules will be ignored for " "the user. Any other value except ``False`` will result in the MFA Rules " "being processed; the option can only be a boolean (``True`` or ``False``) or " "\"None\" (which will result in the default behavior (same as ``True``) but " "the option will no longer be shown in the ``user[\"options\"]`` dictionary." msgstr "" "ユーザーが MFA ルールから免除されるべきである場合、設定されているかどうかにか" "かわらず、 ユーザーが作成と更新 API ( `` POST / PATCH / v3 / users``)を呼び" "出すことによって、そのユーザーの User-Option の " "``multi_factor_auth_enabled`` オプションが ``False`` に設定されます。このオプ" "ションが ``False`` に設定されている場合、 そのユーザに対して MFA ルールは無視" "されます。 ``False`` を除く他の値の場合は、 MFA ルールが処理される結果となり" "ます。 オプションはブール値(``True`` または ``False``)、あるいは None (こ" "れはデフォルトの動作( ``True`` と同じ)になりますが、このオプションは " "``user [\"options\"]`` ディクショナリーからなくなります)です。" msgid "" "In the policy.json file, we changed `identity:list_projects_for_groups` to " "`identity:list_projects_for_user`. Likewise, we changed `identity:" "list_domains_for_groups` to `identity:list_domains_for_user`. If you have " "customized the policy.json file, you will need to make these changes. This " "was done to better support new features around federation." msgstr "" "poicy.json ファイルの `identity:list_projects_for_groups` を `identity:" "list_projects_for_user` に変更しました。同様に、 `identity:" "list_domains_for_groups` を`identity:list_domains_for_user` に変更しました。 " "policy.jsonファイルをカスタマイズした場合は、これらの変更を行う必要がありま" "す。 これはフェデレーションの新しい機能をよりよくサポートするために行われまし" "た。" msgid "" "It is recommended to have the ``healthcheck`` middleware first in the " "pipeline::" msgstr "" "パイプラインの最初に ``healthcheck`` ミドルウェアを使うことをお勧めします::" msgid "Keystone Release Notes" msgstr "Keystone リリースノート" msgid "" "Keystone cache backends have been removed in favor of their `oslo.cache` " "counter-part. This affects:" msgstr "" "Keystone キャッシュバックエンドは、 `oslo.cache` と同等のため削除しました。 " "これは次のような影響を与えます。" msgid "" "Keystone now relies on pyldap instead of python-ldap. The pyldap library is " "a fork of python-ldap and is a drop-in replacement with modifications to be " "py3 compatible." msgstr "" "Keystone は python-ldap ではなく pyldap を使用するようになりました。 pyldap " "ライブラリは python-ldap のフォークであり、 py3 と互換性があるように変更され" "ています。" msgid "" "Keystone now supports authorizing a request token by providing a role name. " "A `role` in the `roles` parameter can include either a role name or role id, " "but not both." msgstr "" "Keystone は、ロール名の提供によるリクエストトークンの認可をサポートしました。" "`roles` パラメーターの `role` は、ロール名かロール ID のいずれかを含めること" "ができますが、両方を含めることはできません。" msgid "" "Keystone now supports being run under Python 3. The Python 3 and Python 3.4 " "classifiers have been added." msgstr "" "Keystone は Python 3 での動作をサポートしています。 Python3 および Python " "3.4 分類子が追加されました。" msgid "" "Keystone now supports encrypted credentials at rest. In order to upgrade " "successfully to Newton, deployers must encrypt all credentials currently " "stored before contracting the database. Deployers must run `keystone-manage " "credential_setup` in order to use the credential API within Newton, or " "finish the upgrade from Mitaka to Newton. This will result in a service " "outage for the credential API where credentials will be read-only for the " "duration of the upgrade process. Once the database is contracted credentials " "will be writeable again. Database contraction phases only apply to rolling " "upgrades." msgstr "" "Keystone は REST での暗号化された資格情報をサポートしています。 Newton に正常" "にアップグレードするためには、デプロイヤーは、データベースの縮小フェーズの前" "に、現在保管されているすべての資格情報を暗号化する必要があります。 デプロイ" "ヤーは、 Newton 内で資格情報 API を使用するために `keystone-manage " "credential_setup` を実行するか、 Mitaka から Newton へのアップグレードを完了" "する必要があります。 これにより、認証情報 API のサービス停止が発生し、資格情" "報はアップグレード処理中に読み取り専用になります。 データベースを縮小すると、" "資格情報は再び書き込み可能になります。 データベース縮小フェーズは、ローリング" "アップグレードにのみ適用されます。" msgid "" "Keystone now uses oslo.cache. Update the `[cache]` section of `keystone." "conf` to point to oslo.cache backends: ``oslo_cache.memcache_pool`` or " "``oslo_cache.mongo``. Refer to the sample configuration file for examples. " "See `oslo.cache `_ for " "additional documentation." msgstr "" "Keystone は oslo.cache を使用するようになりました。`keystone.conf` の " "`[cache]` セクションを oslo.cache バックエンドを指すよう、``oslo_cache." "memcache_pool`` または ``oslo_cache.mongo`` に、更新しました。 サンプルの設定" "ファイルを参照してください。追加のドキュメントを参照してください `oslo." "cache`_" msgid "" "Keystone supports ``$(project_id)s`` in the catalog. It works the same as ``" "$(tenant_id)s``. Use of ``$(tenant_id)s`` is deprecated and catalog " "endpoints should be updated to use ``$(project_id)s``." msgstr "" "Keystone はカタログでの ``$(project_id)s`` をサポートしました。これは、``" "$(tenant_id)s`` と同じ動作をします。``$(tenant_id)s`` は非推奨になり、カタロ" "グのエンドポイントは、``$(project_id)s`` が使用されるようになります。" msgid "Liberty Series Release Notes" msgstr "Liberty バージョンのリリースノート" msgid "Mitaka Series Release Notes" msgstr "Mitaka バージョンのリリースノート" msgid "New Features" msgstr "新機能" msgid "Newton Series Release Notes" msgstr "Newton バージョンのリリースノート" msgid "" "Not specifying a domain during a create user, group or project call, which " "relied on falling back to the default domain, is now deprecated and will be " "removed in the N release." msgstr "" "既定のドメインが設定される、ドメインを指定しないユーザー、グループまたはプロ" "ジェクトの作成は非推奨となり、 N リリースで削除されまする。" msgid "" "OSprofiler support was added. This cross-project profiling library allows to " "trace various requests through all OpenStack services that support it. To " "initiate OpenStack request tracing `--profile ` option needs to be " "added to the CLI command. Configuration and usage details can be foung in " "[`OSProfiler documentation `_]" msgstr "" "OSprofiler サポートが追加されました。 このプロジェクト間プロファイリングライ" "ブラリは、これをサポートするすべての OpenStack サービスのさまざまなリクエスト" "をトレースすることを可能にします。 OpenStack リクエストトレースを開始するに" "は、 `--profile ` オプションを CLI コマンドに追加する必要がありま" "す。 設定と使用の詳細は、[`` OSProfiler documentation `_] を参照してください。" msgid "" "OSprofiler support was introduced. To allow its usage the keystone-paste.ini " "file needs to be modified to contain osprofiler middleware." msgstr "" "OSprofiler のサポートが導入されました。これを使用可能にするには、 keystone-" "paste.ini ファイルを osprofiler ミドルウェアを使用するように編集する必要があ" "ります。" msgid "Ocata Series Release Notes" msgstr "Ocata バージョンのリリースノート" msgid "Other Notes" msgstr "その他の注意点" msgid "PKI and PKIz token formats have been removed in favor of Fernet tokens." msgstr "" "PKI および PKIz トークン形式が削除され、 Fernet トークンが採用されました。" msgid "Pike Series Release Notes" msgstr "Pike バージョンのリリースノート" msgid "Prelude" msgstr "紹介" msgid "" "Project tags are implemented following the guidelines set by the `API " "Working Group `_" msgstr "" "`API ワーキンググループ `_ によって設定されたガイドラインに従って、プロジェクト" "タグが実装されました。" msgid "Queens Series Release Notes" msgstr "Queens バージョンのリリースノート" msgid "" "Routes and SQL backends for the contrib extensions have been removed, they " "have been incorporated into keystone and are no longer optional. This " "affects:" msgstr "" "Route および contrib 拡張の SQL バックエンドが削除、 Keystone に組み込まれ、" "オプションではなくなりました。 これは次のような影響を与えます。" msgid "" "Running keystone in eventlet remains deprecated and will be removed in the " "Mitaka release." msgstr "" "イベントレットでの Keystone 実行は廃止され、 Mitaka リリースで削除されます。" msgid "" "SECURITY INFO: The MFA rules are only processed when authentication happens " "through the V3 authentication APIs. If V2 Auth is enabled it is possible to " "circumvent the MFA rules if the user can authenticate via V2 Auth API. It is " "recommended to disable V2 authentication for full enforcement of the MFA " "rules." msgstr "" "セキュリティ情報: MFA ルールは、認証が V3 認証 API を介して行われる場合にのみ" "処理されます。 V2 認証が有効な場合、ユーザーが V2 認証 APIを使用して認証でき" "る場合は、MFA ルールを迂回することができます。 MFA ルールを完全に実施するに" "は、V2 認証を無効にすることをお勧めします。" msgid "" "Schema downgrades via ``keystone-manage db_sync`` are no longer supported. " "Only upgrades are supported." msgstr "" "``keystone-manage db_sync`` によるスキーマのダウングレードは、もはやサポート" "されません。アップグレードのみがサポートされます。" msgid "Security Issues" msgstr "セキュリティー上の問題" msgid "" "See `Project Tags `_" msgstr "" "`プロジェクトタグ `_ を参照してください。" msgid "" "Set the following user attributes to ``True`` or ``False`` in an API " "request. To mark a user as exempt from the PCI password lockout policy::" msgstr "" "ユーザーを PCI パスワードロックアウトポリシーから免除するように設定するには、" "API リクエストで以下のユーザー属性を ``True`` あるいは ``False`` に設定してく" "ださい。" msgid "" "Several configuration options have been deprecated, renamed, or moved to new " "sections in the ``keystone.conf`` file." msgstr "" "幾つかの設定オプションが非推奨、名称変更、あるいは ``keystone.conf`` ファイル" "の新しいセクション移動になりました。" msgid "" "Several features were hardened, including Fernet tokens, federation, domain " "specific configurations from database and role assignments." msgstr "" "Fernet トークン、フェデレーション、データベースやロールの割り当てによるドメイ" "ン固有の設定など、幾つかの機能が強化されました。" msgid "" "Several token issuance methods from the abstract class ``keystone.token." "providers.base.Provider`` were removed (see below) in favor of a single " "method to issue tokens (``issue_token``). If using a custom token provider, " "updated the custom provider accordingly." msgstr "" "抽象クラス ``keystone.token.providers.base.Provider`` からの幾つかのトークン" "発行メソッド(下記参照)が削除され、トークンを発行するための単一のメソッド" "(``issue_token``)になりました。 カスタムトークンプロバイダを使用している場" "合は、それに応じてカスタムプロバイダを更新してください。" msgid "" "Several token validation methods from the abstract class ``keystone.token." "providers.base.Provider`` were removed (see below) in favor of a single " "method to validate tokens (``validate_token``), that has the signature " "``validate_token(self, token_ref)``. If using a custom token provider, " "update the custom provider accordingly." msgstr "" "抽象クラス ``keystone.token.providers.base.Provider`` からの幾つかのトークン" "検証メソッド(下記参照)が削除され、トークンを検証するための単一のメソッド" "(``validate_token``)になりました。これは署名を取ります " "``validate_token(self, token_ref)``。 カスタムトークンプロバイダを使用してい" "る場合は、それに応じてカスタムプロバイダを更新してください。" msgid "" "Support for writing to LDAP has been removed. See ``Other Notes`` for more " "details." msgstr "" "LDAP への書き出しのサポートは削除されました。詳細は ``その他の注意点`` を参照" "してください。" msgid "" "Support has now been added to send notification events on user/group " "membership. When a user is added or removed from a group a notification will " "be sent including the identifiers of both the user and the group." msgstr "" "ユーザー/グループのメンバーシップに関する通知イベント送信がサポートされまし" "た。 ユーザーがグループに追加または削除されると、ユーザーとグループの両方の識" "別子を含む通知が送信されます。" msgid "" "Support was improved for out-of-tree drivers by defining stable driver " "interfaces." msgstr "" "安定したドライバーインターフェースを定義することにより、カスタムドライバのサ" "ポートが改善されました。" msgid "Tags are case sensitive" msgstr "タグは、大文字小文字を区別します。" msgid "" "The EC2 token middleware, deprecated in Juno, is no longer available in " "keystone. It has been moved to the keystonemiddleware package." msgstr "" "Juno で非推奨となった EC2 トークンミドルウェアは、もはや Keystone では使用で" "きません。keystonemiddleware パッケージに移動しました。" msgid "" "The LDAP driver now also maps the user description attribute after user " "retrieval from LDAP. If this is undesired behavior for your setup, please " "add `description` to the `user_attribute_ignore` LDAP driver config setting. " "The default mapping of the description attribute is set to `description`. " "Please adjust the LDAP driver config setting `user_description_attribute` if " "your LDAP uses a different attribute name (for instance to `displayName` in " "case of an AD backed LDAP). If your `user_additional_attribute_mapping` " "setting contains `description:description` you can remove this mapping, " "since this is now the default behavior." msgstr "" "LDAP ドライバは、 LDAP からのユーザー取得後にユーザーの description 属性も" "マップするようになりました。 これが望ましくない動作である場合は、 LDAP ドライ" "バ設定の `user_attribute_ignore` に `description` を追加してください。 " "description 属性のデフォルトマッピングは `description`に設定されています。 " "LDAP が別の属性名を使用する場合(例えば、 AD がバックエンドの LDAP の場合は " "`displayName`)、 LDAP ドライバ設定 `user_description_attribute` を調整してく" "ださい。 あなたの `user_additional_attribute_mapping` 設定に `description:" "description` が含まれている場合、このマッピングを削除すれば、デフォルト動作に" "なります。" msgid "" "The MFA rules are set via the user create and update API (``POST/PATCH /v3/" "users``) call; the options allow an admin to force a user to use specific " "forms of authentication or combinations of forms of authentication to get a " "token. The rules are specified as follows::" msgstr "" "MFA ルールは、ユーザー作成および更新 API (``POST/PATCH /v3/users``) の呼び" "出しによって設定されます。 このオプションを使用すると、管理者は、ユーザーによ" "るトークン取得に、特定の形式の認証、または複数の形式の認証の組み合わせの使用" "を強制することができます。 ルールは次のように指定されます。" msgid "" "The PKI and PKIz token format has been removed. See ``Other Notes`` for more " "details." msgstr "" "PKI と PKIz トークン形式は削除されました。詳細は ``その他の注意点`` を参照し" "てください。" msgid "" "The V8 Federation driver interface is deprecated in favor of the V9 " "Federation driver interface. Support for the V8 Federation driver interface " "is planned to be removed in the 'O' release of OpenStack." msgstr "" "V8 フェデレーションドライバーインターフェースは非推奨になり、 V9 フェデレー" "ションドライバーインターフェースになりました。 V8 フェデレーションドライバー" "インターフェースのサポートは、 OpenStack の 'O' リリースで削除される予定で" "す。" msgid "" "The V8 Resource driver interface is deprecated. Support for the V8 Resource " "driver interface is planned to be removed in the 'O' release of OpenStack." msgstr "" "V8 リソースドライバーインターフェースは非推奨になりました。 V8 フェデレーショ" "ンドライバーインターフェースのサポートは、 OpenStack の 'O' リリースで削除さ" "れる予定です。" msgid "" "The XML middleware stub has been removed, so references to it must be " "removed from the ``keystone-paste.ini`` configuration file." msgstr "" "XML ミドルウェアのスタブは削除されているので、 ``keystone-paste.ini`` 設定" "ファイルから参照を削除する必要があります。" msgid "" "The ``/OS-FEDERATION/projects`` and ``/OS-FEDERATION/domains`` APIs are " "deprecated in favor of the ``/v3/auth/projects`` and ``/v3/auth/domains`` " "APIs. These APIs were originally marked as deprecated during the Juno " "release cycle, but we never deprecated using ``versionutils`` from oslo. " "More information regarding this deprecation can be found in the `patch " "`_ that proposed the deprecation." msgstr "" "``/v3/auth/projects`` と ``/v3/auth/domains`` API のために、``/OS-" "FEDERATION/projects`` と ``/OS-FEDERATION/domains`` API は非推奨となりまし" "た。これらの API は、もともと Juno リリースサイクル中に非推奨となりましたが、" "oslo の ``versionutils`` を使用して非推奨には決してしませんでした。この廃止に" "関する詳細は、廃止を提案した `パッチ `_ にあります。" msgid "" "The ``[DEFAULT] domain_id_immutable`` configuration option has been removed " "in favor of strictly immutable domain IDs." msgstr "" "``[DEFAULT] domain_id_immutable`` 設定オプションは、厳密で不変なドメイン ID " "のために削除されました。" msgid "" "The ``[DEFAULT] domain_id_immutable`` option has been removed. This removes " "the ability to change the ``domain_id`` attribute of users, groups, and " "projects. The behavior was introduced to allow deployers to migrate entities " "from one domain to another by updating the ``domain_id`` attribute of an " "entity. This functionality was deprecated in the Mitaka release is now " "removed." msgstr "" "``[DEFAULT] domain_id_immutable`` オプションは削除されました。これにより、" "ユーザー、グループ、およびプロジェクトの ``domain_id`` 属性を変更する機能が削" "除されます。 この振る舞いは、エンティティの ``domain_id`` 属性を更新すること" "によって、デプロイヤーがエンティティをあるドメインから別のドメインに移行でき" "るようにするために導入されました。 この機能は Mitaka リリースで非推奨になり、" "現在は廃止されました。" msgid "" "The ``[assignment] driver`` now defaults to ``sql``. Logic to determine the " "default assignment driver if one wasn't supplied through configuration has " "been removed. Keystone only supports one assignment driver and it shouldn't " "be changed unless you're deploying a custom assignment driver." msgstr "" "``[assignment] driver`` のデフォルトは ``sql`` になりました。設定で1つが提供" "されなかった場合のデフォルト割り当てドライバーを決定するロジックが削除されま" "した。 Keystone は1つの割り当てドライバーのみをサポートしており、カスタム割り" "当てドライバを配備していない限り、変更しないでください。" msgid "" "The ``[endpoint_policy] enabled`` configuration option has been removed in " "favor of always enabling the endpoint policy extension." msgstr "" "``[endpoint_policy] enabled`` 設定オプションは、エンドポイントのポリシー拡張" "を常に有効にするために削除されました。" msgid "" "The ``[os_inherit] enabled`` config option has been removed, the `OS-" "INHERIT` extension is now always enabled." msgstr "" "``[os_inherit] enabled`` 設定オプションは削除され、 `OS-INHERIT` 拡張が常に有" "効になりました。" msgid "" "The ``[resource] driver`` now defaults to ``sql``. Logic to determine the " "default resource driver if one wasn't supplied through configuration has " "been removed. Keystone only supports one resource driver and it shouldn't be " "changed unless you're deploying a custom resource driver." msgstr "" "``[resource] driver`` のデフォルトは ``sql`` になりました。設定で1つが提供さ" "れなかった場合のデフォルトリソースドライバーを決定するロジックが削除されまし" "た。 Keystone は1つのリソースドライバーのみをサポートしており、カスタムリソー" "スドライバを配備していない限り、変更しないでください。" msgid "" "The ``[security_compliance] password_expires_ignore_user_ids`` option has " "been removed. Each user that should ignore password expiry should have the " "value set to \"true\" in the user's ``options`` attribute (e.g. " "``user['options']['ignore_password_expiry'] = True``) with a user update " "call." msgstr "" "``[security_compliance] password_expires_ignore_user_ids`` オプションは削除さ" "れました。パスワードの有効期限を無視する必要のあるユーザーは、それぞれユー" "ザー更新呼び出しで、ユーザーの ``options`` 属性(例えば、 ``user['options']" "['ignore_password_expiry'] = True``)に \"true\" を設定する必要があります。" msgid "" "The ``compute_port`` configuration option, deprecated in Juno, is no longer " "available." msgstr "" "Juno で非推奨となった ``compute_port`` 設定オプションは、もはや使用できませ" "ん。" msgid "" "The ``enabled`` config option of the ``trust`` feature is deprecated and " "will be removed in the next release. Trusts will then always be enabled." msgstr "" "``trust`` 機能の ``enabled`` 設定オプションは、非推奨となり、次のリリースで削" "除される予定です。信頼は常に有効になる予定です。" msgid "" "The ``httpd/keystone.py`` file has been removed in favor of the ``keystone-" "wsgi-admin`` and ``keystone-wsgi-public`` scripts." msgstr "" "``httpd/keystone.py`` ファイルは、 ``keystone-wsgi-admin`` および ``keystone-" "wsgi-public`` スクリプトのために削除されました。" msgid "" "The ``keystone.conf`` file now references entrypoint names for drivers. For " "example, the drivers are now specified as \"sql\", \"ldap\", \"uuid\", " "rather than the full module path. See the sample configuration file for " "other examples." msgstr "" "``keystone.conf`` ファイルは、ドライバーのエントリーポイント名を参照するよう" "になりました。 たとえば、ドライバは完全なモジュールパスではなく、 \"sql\"、 " "\"ldap\"、 \"uuid\"と指定されています。 他の例については、サンプルの設定ファ" "イルを参照してください。" msgid "" "The ``keystone/service.py`` file has been removed, the logic has been moved " "to the ``keystone/version/service.py``." msgstr "" "``keystone/service.py`` ファイルを削除し、そのロジックは ``keystone/version/" "service.py`` に移動しました。" msgid "" "The ``memcache`` and ``memcache_pool`` token persistence backends have been " "removed in favor of using Fernet tokens (which require no persistence)." msgstr "" "``memcache`` と ``memcache_pool`` のトークン永続性バックエンドは、永続性を必" "要としない Fernet トークンの使用のために削除されました。" msgid "" "The ``policies`` API is deprecated. Keystone is not a policy management " "service." msgstr "" "``policies`` API は非推奨となりました。Keystone はポリシー管理サービスではあ" "りません。" msgid "" "The ``token`` auth method typically should not be specified in any MFA " "Rules. The ``token`` auth method will include all previous auth methods for " "the original auth request and will match the appropriate ruleset. This is " "intentional, as the ``token`` method is used for rescoping/changing active " "projects." msgstr "" "``token`` 認証方法は、通常、どの MFA ルールでも指定すべきではありません。 " "``token`` 認証方法は、元の認証リクエストに対するすべての以前の認証方法を含" "み、適切なルールセットと一致します。 ``token`` メソッドは、アクティブなプロ" "ジェクトの再スコープ/変更に使用されるもので、これは意図的なものです。" msgid "" "The `keystone-paste.ini` file must be updated to remove extension filters, " "and their use in ``[pipeline:api_v3]``. Remove the following filters: " "``[filter:oauth1_extension]``, ``[filter:federation_extension]``, ``[filter:" "endpoint_filter_extension]``, and ``[filter:revoke_extension]``. See the " "sample `keystone-paste.ini `_ file for guidance." msgstr "" "`keystone-paste.ini`ファイルを編集して拡張フィルタを削除し、``[pipeline:" "api_v3]`` でそれらを使う必要があります。 ``[filter:oauth1_extension]``、 " "``[filter:federation_extension]``、 `` [filter:" "endpoint_filter_extension]``、 ``[filter:revoke_extension]`` のフィルタを削除" "してください。 ガイダンスについては、サンプル `keystone-paste.ini ` _ " "ファイルを参照してください。" msgid "" "The `keystone-paste.ini` file must be updated to remove extension filters, " "and their use in ``[pipeline:public_api]`` and ``[pipeline:admin_api]`` " "pipelines. Remove the following filters: ``[filter:user_crud_extension]``, " "``[filter:crud_extension]``. See the sample `keystone-paste.ini `_ file " "for guidance." msgstr "" "``keystone-paste.ini`` ファイルを編集して拡張フィルタを削除し、``[pipe:" "public_api]`` と ``[pipeline:admin_api]`` パイプラインでそれらを使う必要があ" "ります。``[filter:user_crud_extension]``、 ``[filter:crud_extension]`` を削除" "してください。ガイダンスについては、サンプル `keystone-paste.ini ` _ " "ファイルを参照してください。" msgid "" "The `os_inherit` configuration option is disabled. In the future, this " "option will be removed and this portion of the API will be always enabled." msgstr "" "``os_inherit`` 設定オプションは無効になりました。将来、このオプションは削除さ" "れ、 API のこの部分は常に有効になります。" msgid "" "The ability to validate a trust-scoped token against the v2.0 API has been " "removed, in favor of using the version 3 of the API." msgstr "" "v2.0 API に対する信頼スコープのトークンを検証する機能が削除され、バージョン " "3 の API が使用されました。" msgid "" "The admin_token method of authentication was never intended to be used for " "any purpose other than bootstrapping an install. However many deployments " "had to leave the admin_token method enabled due to restrictions on editing " "the paste file used to configure the web pipelines. To minimize the risk " "from this mechanism, the `admin_token` configuration value now defaults to a " "python `None` value. In addition, if the value is set to `None`, either " "explicitly or implicitly, the `admin_token` will not be enabled, and an " "attempt to use it will lead to a failed authentication." msgstr "" "admin_token 認証方法は、インストールをブートストラップする以外の目的には使用" "されませんでした。しかし、Web パイプラインの設定に使用されるペーストファイル" "の編集の制限により、多くのデプロイメントで admin_token メソッドを有効にしてお" "かなければなりませんでした。このメカニズムによるリスクを最小限に抑えるため" "に、 `admin_token` 設定値のデフォルト値は、 Python の `None` 値です。さらに、" "明示的または暗黙的に値が `None` に設定されている場合、`admin_token` は有効に" "ならず、それを使用しようとすると認証に失敗します。" msgid "" "The auth plugin ``keystone.auth.plugins.saml2.Saml2`` has been removed in " "favor of the auth plugin ``keystone.auth.plugins.mapped.Mapped``." msgstr "" "認証プラグイン ``keystone.auth.plugins.saml2.Saml2`` は、認証プラグイン " "``keystone.auth.plugins.mapped.Mapped`` のために削除されました。" msgid "" "The catalog backend ``endpoint_filter.sql`` has been removed. It has been " "consolidated with the ``sql`` backend, therefore replace the " "``endpoint_filter.sql`` catalog backend with the ``sql`` backend." msgstr "" "カタログバックエンド ``endpoint_filter.sql`` は削除されました。これは " "``sql`` バックエンドと統合されているので、``endpoint_filter.sql`` カタログ" "バックエンドを `` sql`` バックエンドに置き換えます。" msgid "" "The check for admin token from ``build_auth_context`` middleware has been " "removed. If your deployment requires the use of `admin token`, update " "``keystone-paste.ini`` so that ``admin_token_auth`` is before " "``build_auth_context`` in the paste pipelines, otherwise remove the " "``admin_token_auth`` middleware from ``keystone-paste.ini`` entirely." msgstr "" "``build_auth_context`` ミドルウェアからの管理トークンのチェックが削除されまし" "た。デプロイメントで `admin token`が必要な場合は、``keystone-paste.ini`` を編" "集して、ペーストパイプラインで ``admin_token_auth`` が " "``build_auth_context`` の前にあるようにしてください。そうでなければ " "``admin_token_auth`` ミドルウェアを ``keystone-paste.ini`` から完全に削除して" "ください。" msgid "" "The config option ``rolling_upgrade_password_hash_compat`` is removed. It is " "only used for rolling-upgrade from Ocata release to Pike release." msgstr "" "``rolling_upgrade_password_hash_compat`` 設定オプションを削除しました。これ" "は、Ocata リリースから Pike リリースへのローリングアップグレードのみで使用さ" "れていました。" msgid "" "The configuration options for LDAP connection pooling, `[ldap] use_pool` and " "`[ldap] use_auth_pool`, are now both enabled by default. Only deployments " "using LDAP drivers are affected. Additional configuration options are " "available in the `[ldap]` section to tune connection pool size, etc." msgstr "" "LDAP 接続プーリングの設定オプション `[ldap] use_pool` と `[ldap] " "use_auth_pool` は、デフォルトで両方とも有効になりました。 LDAP ドライバを使用" "しているデプロイメントのみが影響を受けます。`[ldap]` セクションに追加の設定オ" "プションがあり、接続プールのサイズなどを調整できます。" msgid "" "The credentials list call can now have its results filtered by credential " "type." msgstr "" "資格情報リストの呼び出しは、資格情報の種別でフィルターされた結果を返すように" "なりました。" msgid "" "The default setting for the `os_inherit` configuration option is changed to " "True. If it is required to continue with this portion of the API disabled, " "then override the default setting by explicitly specifying the os_inherit " "option as False." msgstr "" "`os_inherit` 設定オプションのデフォルトは、 True に変更されました。この部分" "の API を無効にする必要がある場合は、os_inherit オプションを明示的に False に" "指定して、デフォルト設定をオーバーライドします。" msgid "The default token provider is now Fernet." msgstr "現在、デフォルトのトークンプロバイダーは、 Fernet です。" msgid "" "The external authentication plugins ExternalDefault, ExternalDomain, " "LegacyDefaultDomain, and LegacyDomain, deprecated in Icehouse, are no longer " "available." msgstr "" "ExternalDefault 、 ExternalDomain 、 LegacyDefaultDomain 、および " "LegacyDomain は Icehouse で非推奨となり、使用できません。" msgid "" "The functionality of the ``ADMIN_TOKEN`` remains, but has been incorporated " "into the main auth middleware (``keystone.middleware.auth." "AuthContextMiddleware``)." msgstr "" "``ADMIN_TOKEN`` の機能は残っていますが、主な認証ミドルウェア(``keystone." "middleware.auth.AuthContextMiddleware``)に組み込まれています。" msgid "" "The identity backend driver interface has changed. A new method, " "`unset_default_project_id(project_id)`, was added to unset a user's default " "project ID for a given project ID. Custom backend implementations must " "implement this method." msgstr "" "認証バックエンドドライバーインターフェースが変更されました。与えられたプロ" "ジェクト ID のユーザーのデフォルトプロジェクト ID の設定を解除するための新し" "いメソッド `unset_default_project_id(project_id)` が追加されました。 カスタム" "バックエンドの実装では、このメソッドを実装する必要があります。" msgid "" "The identity backend driver interface has changed. We've added a new " "``change_password()`` method for self service password changes. If you have " "a custom implementation for the identity driver, you will need to implement " "this new method." msgstr "" "認証バックエンドドライバーインターフェースが変更されました。 セルフサービスの" "パスワードを変更するための新しい ``change_password()`` メソッドを追加しまし" "た。認証ドライバーのカスタム実装がある場合は、この新しいメソッドを実装する必" "要があります。" msgid "" "The implementation for checking database state during an upgrade with the " "use of `keystone-manage db_sync --check` has been corrected. This allows " "users and automation to determine what step is next in a rolling upgrade " "based on logging and command status codes." msgstr "" "`keystone-manage db_sync --check` を使用してアップグレード中にデータベースの" "状態をチェックする実装が修正されました。 これにより、ユーザーおよび自動化は、" "ロギングおよびコマンドステータスコードに基づいて、ローリングアップグレードの" "次のステップを判断できます。" msgid "" "The list_project_ids_for_user(), list_domain_ids_for_user(), " "list_user_ids_for_project(), list_project_ids_for_groups(), " "list_domain_ids_for_groups(), list_role_ids_for_groups_on_project() and " "list_role_ids_for_groups_on_domain() methods have been removed from the V9 " "version of the Assignment driver." msgstr "" "割り当てドライバーの V9 バージョンから、 list_project_ids_for_user()、 " "list_domain_ids_for_user()、 list_user_ids_for_project()、 " "list_project_ids_for_groups()、 list_domain_ids_for_groups()、 " "list_role_ids_for_groups_on_project() および " "list_role_ids_for_groups_on_domain() メソッドが削除されました。" msgid "The method signature has changed from::" msgstr "メソッドの署名が以下から変更されました。" msgid "" "The resource backend cannot be configured to anything but SQL if the SQL " "Identity backend is being used. The resource backend must now be SQL which " "allows for the use of Foreign Keys to domains/projects wherever desired. " "This makes managing project relationships and such much more straight " "forward. The inability to configure non-SQL resource backends has been in " "Keystone since at least Ocata. This is eliminating some complexity and " "preventing the need for some really ugly back-port SQL migrations in favor " "of a better model. Resource is highly relational and should be SQL based." msgstr "" "SQL バックエンドが使用されている場合は、リソースバックエンドを SQL 以外のもの" "に構成することはできません。リソースのバックエンドは、現在、ドメイン/プロジェ" "クトへの外部キーの使用を可能にする SQL でなければなりません。これにより、プロ" "ジェクトの関係性の管理などがより簡単になります。 非 SQL リソースのバックエン" "ドを構成できないことは、少なくとも Ocata 以来、 Keystone にありました。 これ" "は、いくつかの複雑さを排除し、いくつかのとても酷いバックポート SQL マイグレー" "ションの必要性を防止し、より良いモデルを支持します。 リソースは非常にリレー" "ショナルであり、 SQL ベースでなければなりません。" msgid "" "The response's content type for creating request token or access token is " "changed to `application/x-www-form-urlencoded`, the old value `application/x-" "www-urlformencoded` is invalid and will no longer be used." msgstr "" "リクエストトークンまたはアクセストークンを作成するためのレスポンスのコンテン" "ツタイプが `application/x-www-form-urlencoded` に変更され、古い値の " "`application / x-www-urlformencoded` は無効になり、使用されなくなりました。" msgid "" "The rules are specified as a list of lists. The elements of the sub-lists " "must be strings and are intended to mirror the required authentication " "method names (e.g. ``password``, ``totp``, etc) as defined in the ``keystone." "conf`` file in the ``[auth] methods`` option." msgstr "" "ルールはリストのリストとして指定されます。 サブリストの要素は文字列でなければ" "ならず、 ``[auth] methods`` オプションの ``keystone.conf`` で定義されている必" "要な認証方法名(``password`` や ``totp`` など)です。" msgid "" "The token_formatter utility class has been moved from under fernet to the " "default token directory. This is to allow for the reuse of functionality " "with other token providers. Any deployments that are specifically using the " "fernet utils may be affected and will need to adjust accordingly." msgstr "" "token_formatter ユーザビリティクラスは、fernet 配下からデフォルトトークンディ" "レクトリーに移動しました。これは、他のトークンプロバイダーとともにこの機能を" "再利用できるようにするためです。fernet ユーティリティを使用している構成では、" "影響を受ける可能性があり、それに対応して調整する必要があります。" msgid "" "The trusts table now has an expires_at_int column that represents the " "expiration time as an integer instead of a datetime object. This will " "prevent rounding errors related to the way date objects are stored in some " "versions of MySQL. The expires_at column remains, but will be dropped in " "Rocky." msgstr "" "trusts テーブルに、有効期限を日時オブジェクトの代わりに整数で表す " "expires_at_int 列が追加されました。これは、MySQL の幾つかのバージョンで、日付" "オブジェクトの格納方法に関連する丸め誤差を防止します。expires_at 列は残ってい" "ますが、Rocky バージョンで削除される予定です。" msgid "" "The use of `sha512_crypt` is considered inadequate for password hashing in " "an application like Keystone. The use of bcrypt or scrypt is recommended to " "ensure protection against password cracking utilities if the hashes are " "exposed. This is due to Time-Complexity requirements for computing the " "hashes in light of modern hardware (CPU, GPU, ASIC, FPGA, etc). Keystone has " "moved to bcrypt as a default and no longer hashes new passwords (and " "password changes) with sha512_crypt. It is recommended passwords be changed " "after upgrade to Pike. The risk of password hash exposure is limited, but " "for the best possible protection against cracking the hash it is recommended " "passwords be changed after upgrade. The password change will then result in " "a more secure hash (bcrypt by default) being used to store the password in " "the DB." msgstr "" "`sha512_crypt` の使用は Keystone のようなアプリケーションでのパスワードハッシ" "ングには不十分と考えられます。 ハッシュが公開されている場合、パスワードクラッ" "キングユーティリティーからの保護を確実にするために、 bcrypt または scrypt の" "使用を推奨します。これは、最新のハードウェア(CPU、GPU、ASIC、FPGAなど)を焦" "点にしてハッシュを計算するための時間複雑性の要件によるものです。Keystone がデ" "フォルトを bcrypt に移行し、 sha512_crypt では新しいパスワード(およびパス" "ワード変更)をハッシュしなくなりました。 Pike へのアップグレード後にパスワー" "ドを変更することをお勧めします。パスワードハッシュ公開のリスクは限られていま" "すが、ハッシュをクラックさせないようにするために、アップグレード後にパスワー" "ドを変更することをお勧めします。 パスワードを変更すると、より安全なハッシュ" "(デフォルトでは bcrypt )が使用され、パスワードが DB に格納されます。" msgid "" "The use of admin_token filter is insecure compared to the use of a proper " "username/password. Historically the admin_token filter has been left enabled " "in Keystone after initialization due to the way CMS systems work. Moving to " "an out-of-band initialization using ``keystone-manage bootstrap`` will " "eliminate the security concerns around a static shared string that conveys " "admin access to keystone and therefore to the entire installation." msgstr "" "admin_token フィルタの使用は、適切なユーザ名/パスワードの使用と比較して安全で" "はありません。歴史的に、admin_token フィルタは、 CMS システムを動作させるため" "に、初期化後の Keystone では有効になったままです。 ``keystone-manage " "bootstrap`` を使ったアウトオブバンドの初期化に移行すると、 Keystone への、つ" "まりインストール全体への管理者アクセス権を譲渡する静的共有文字列に関するセ" "キュリティ上の懸念が排除されます。" msgid "" "Third-party extensions that extend the abstract class " "(``ShadowUsersDriverBase``) should be updated according to the new parameter " "names." msgstr "" "抽象クラス(``ShadowUsersDriverBase``)を継承するサードパーティ拡張は、新しい" "パラメータ名に従って更新する必要があります。" msgid "" "This release adds support for Application Credentials, a new way to allow " "applications and automated tooling to authenticate with keystone. Rather " "than storing a username and password in an application's config file, which " "can pose security risks, you can now create an application credential to " "allow an application to authenticate and acquire a preset scope and role " "assignments. This is especially useful for LDAP and federated users, who can " "now delegate their cloud management tasks to a keystone-specific resource, " "rather than share their externally managed credentials with keystone and " "risk a compromise of those external systems. Users can delegate a subset of " "their role assignments to an application credential, allowing them to " "strategically limit their application's access to the minimum needed. Unlike " "passwords, a user can have more than one active application credential, " "which means they can be rotated without causing downtime for the " "applications using them." msgstr "" "このリリースでは、アプリケーションや自動化ツールが Keystone で認証できる新し" "い手段、アプリケーション認証情報のサポートを追加しました。セキュリティリスク" "を発生する可能性がある、ユーザー名とパスワードをアプリケーションの設定ファイ" "ルに保存する方法ではなく、アプリケーションが事前に設定したスコープとロール割" "り当てを取得して認証できるように、アプリケーション認証情報を作成できます。こ" "れは、外部管理された認証情報を Keystone と共有して外部システムが妥協するリス" "クを冒すよりも、クラウド管理タスクを Keystone 固有のリソースに委譲できるの" "で、LDAP や統合ユーザーにとって特に有用です。ユーザーは、ロール割り当てのサブ" "セットをアプリケーション認証情報に委譲して、アプリケーションのアクセスを戦略" "的に必要最小限に制限できます。パスワードとは異なり、ユーザーは複数の有効なア" "プリケーション認証情報を持つことができ、これを使用するアプリケーションのダウ" "ンタイムを引き起こすことなく、これらをローテートできることを意味します。" msgid "To mark a user as exempt from the PCI password expiry policy::" msgstr "" "ユーザーを PCI パスワード有効期限ポリシーから免除されるようにマークするには::" msgid "To mark a user as exempt from the PCI reset policy::" msgstr "ユーザーを PCI リセットポリシーから免除されるようにマークするには::" msgid "To mark a user exempt from the MFA Rules::" msgstr "ユーザーを MFA ルールから免除されるようにマークするには::" msgid "To the properly written::" msgstr "正しくは::" msgid "To::" msgstr "変更後::" msgid "" "Token persistence driver/code (SQL) is deprecated with this patch since it " "is only used by the UUID token provider.." msgstr "" "トークン永続ドライバー/コード(SQL)は、 UUID トークンプロバイダーのみが使用" "するため、非推奨になりました。" msgid "Tokens can now be cached when issued." msgstr "トークンは発行された時にキャッシュされるようになりました。" msgid "" "UUID token provider ``[token] provider=uuid`` has been deprecated in favor " "of Fernet tokens ``[token] provider=fernet``. With Fernet tokens becoming " "the default UUID tokens can be slated for removal in the R release. This " "also deprecates token-bind support as it was never implemented for fernet." msgstr "" "UUID トークンプロバイダー ``[token] provider=uuid`` は、 Fernet トークン " "``[token] provider=fernet`` のために非推奨になりました。 Fernet トークンがデ" "フォルトになり、 UUID トークンは R リリースで削除予定です。また、トークンバイ" "ンドのサポートは、 fernet のために実装されたものではないので、非推奨になりま" "した。" msgid "Upgrade Notes" msgstr "アップグレード時の注意" msgid "" "Use of ``$(tenant_id)s`` in the catalog endpoints is deprecated in favor of " "``$(project_id)s``." msgstr "" "カタログエンドポイントでの ``$(tenant_id)s`` の使用は非推奨となり、 ``" "$(project_id)s`` になりました。" msgid "" "Using LDAP as the resource backend, i.e for projects and domains, is now " "deprecated and will be removed in the Mitaka release." msgstr "" "プロジェクトやドメインなどのリソースバックエンドとしての LDAP の使用は非推奨" "となり、 Mitaka リリースで削除される予定です。" msgid "" "Using the full path to the driver class is deprecated in favor of using the " "entrypoint. In the Mitaka release, the entrypoint must be used." msgstr "" "エントリーポイントを使用するため、ドライバークラスへのフルパスの使用は非推奨" "となりました。 Mitaka リリースでは、エントリーポイントを使用する必要がありま" "す。" msgid "" "We have added the ``password_expires_at`` attribute to the user response " "object." msgstr "" "``password_expires_at`` 属性をユーザーのレスポンスオブジェクトに追加しまし" "た。" msgid "" "We now expose entrypoints for the ``keystone-manage`` command instead of a " "file." msgstr "" "ファイルの代わりに ``keystone-manage`` コマンドのエントリーポイントを公開しま" "した。" msgid "" "Write support for the LDAP has been removed in favor of read-only support. " "The following operations are no longer supported for LDAP:" msgstr "" "LDAP への書き込みのサポートは、読み込みのみのサポートのため、削除されました。" "以下の LDAP の操作はサポートさなくなりました。" msgid "" "[`Bug 1645487 `_] Added a " "new PCI-DSS feature that will require users to immediately change their " "password upon first use for new users and after an administrative password " "reset. The new feature can be enabled by setting [security_compliance] " "``change_password_upon_first_use`` to ``True``." msgstr "" "[`Bug 1645487 `_] 新しい" "ユーザーの最初の使用時、および管理者によるパスワードのリセット後に、ユーザー" "にすぐにパスワード変更を必要とさせるための新しい PCI-DSS 機能が追加されまし" "た。新しい機能を有効にするには、[security_compliance] " "``change_password_upon_first_use`` を ``True`` に設定します。" msgid "" "[`Bug 1649446 `_] The " "default policy for listing revocation events has changed. Previously, any " "authenticated user could list revocation events; it is now, by default, an " "admin or service user only function. This can be changed by modifying the " "policy file being used by keystone." msgstr "" "[`Bug 1649446 `_] 取り消し" "イベントをリストするデフォルトのポリシーが変更されました。以前は、すべての認" "証されたユーザーが取り消しイベントをリストできました。デフォルトでは、管理者" "またはサービスユーザー専用の機能に変更されました。 これは、 Keystone によって" "使用されているポリシーファイルを変更することによって変更できます。" msgid "" "[`Related to Bug 1649446 `_] The ``identity:list_revoke_events`` rule has been changed " "in both sample policy files, ``policy.json`` and ``policy.v3cloudsample." "json``. From::" msgstr "" "[`Related to Bug 1649446 `_] ``identity:list_revoke_events`` ルールは両方のサンプルポリ" "シーファイル ``policy.json``と ``policy.v3cloudsample.json`` で変更されまし" "た。変更前::" msgid "" "[`blueprint allow-expired `_] An `allow_expired` flag is added to the token validation " "call (``GET/HEAD /v3/auth/tokens``) that allows fetching a token that has " "expired. This allows for validating tokens in long running operations." msgstr "" "[`blueprint allow-expired `_] 有効期限が切れたトークンを取得できるトークン検証コール " "(``GET /HEAD/v3 /auth/tokens``) に `allow_expired` フラグが追加されまし" "た。 これにより、長時間実行されている操作のトークンの検証が可能になります。" msgid "" "[`blueprint allow-expired `_] To allow long running operations to complete services must " "be able to fetch expired tokens via the ``allow_expired`` flag. The length " "of time a token is retrievable for beyond its traditional expiry is managed " "by the ``[token] allow_expired_window`` option and so the data must be " "retrievable for this about of time. When using fernet tokens this means that " "the key rotation period must exceed this time so that older tokens are still " "decrytable. Ensure that you do not rotate fernet keys faster than ``[token] " "expiration`` + ``[token] allow_expired_window`` seconds." msgstr "" "[`blueprint allow-expired `_] サービスを完了するために長時間実行される操作を許可するに" "は、 ``allow_expired`` フラグを使って期限切れのトークンを取得できる必要があり" "ます。それまでの有効期限を超えてトークンを取得可能な時間は、 ``[token] " "allow_expired_window`` オプションで管理されるため、この時間の間、データを取得" "可能にする必要があります。 Fernet トークンを使用する場合、古いトークンがまだ" "解読可能であるためには、キーのローテート期間がこれ以上でなければならないこと" "を意味します。 ``[token] expiration`` + ``[token] allow_expired_window`` 秒よ" "り早く Fernet キーをローテートさせないようにしてください。" msgid "" "[`blueprint application-credentials `_] Users can now create Application " "Credentials, a new keystone resource that can provide an application with " "the means to get a token from keystone with a preset scope and role " "assignments. To authenticate with an application credential, an application " "can use the normal token API with the 'application_credential' auth method." msgstr "" "[`blueprint application-credentials `_] ユーザーは、事前に設定されたスコー" "プとロール割り当てを持つトークンをアプリケーションに提供するための新しい " "Keystone リソース、アプリケーション認証情報を作成できるようになりました。アプ" "リケーション認証情報で認証するには、アプリケーションは " "'application_credential' 認証メソッドとともに通常のトークン API を使用しま" "す。" msgid "" "[`blueprint bootstrap `_] keystone-manage now supports the bootstrap command on the CLI " "so that a keystone install can be initialized without the need of the " "admin_token filter in the paste-ini." msgstr "" "[`blueprint bootstrap `_] keystone-manage は CLI での bootstrap コマンドをサポートし、 " "paste-ini に admin_token フィルタを必要とせずに Keystone のインストールを初期" "化できるようになりました。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the PKI " "and PKIz token formats have been deprecated. They will be removed in the 'O' " "release. Due to this change, the `hash_algorithm` option in the `[token]` " "section of the configuration file has also been deprecated. Also due to this " "change, the ``keystone-manage pki_setup`` command has been deprecated as " "well." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] Mitaka リリースでは、 PKI および " "PKIz トークンの形式は非推奨です。これらは 'O' リリースで削除されます。この変" "更により、設定ファイルの `[token]` セクションの `hash_algorithm` オプションも" "非推奨になりました。この変更のために、 ``keystone-manage pki_setup`` コマンド" "も同様に非推奨されました。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the " "auth plugin `keystone.auth.plugins.saml2.Saml2` has been deprecated. It is " "recommended to use `keystone.auth.plugins.mapped.Mapped` instead. The " "``saml2`` plugin will be removed in the 'O' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] Mitaka リリースでは、認証プラグイ" "ン `keystone.auth.plugins.saml2.Saml2` は廃止されました。 代わりに、 " "`keystone.auth.plugins.mapped.Mapped` を使用することをお勧めします。 " "``saml2`` プラグインは 'O' リリースで削除されます。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, the " "simple_cert_extension is deprecated since it is only used in support of the " "PKI and PKIz token formats. It will be removed in the 'O' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] Mitaka リリースでは、 " "simple_cert_extension は PKI および PKIz トークン形式のサポートでのみ使用され" "るため、非推奨となっています。これは 'O' リリースで削除されます。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] As of the Mitaka release, write " "support for the LDAP driver of the Identity backend has been deprecated. " "This includes the following operations: create user, create group, delete " "user, delete group, update user, update group, add user to group, and remove " "user from group. These operations will be removed in the 'O' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] Mitaka リリースでは、認証バックエ" "ンドの LDAP ドライバーに対する書き込みサポートが非推奨になりました。これに" "は、ユーザーの作成、グループの作成、ユーザーの削除、グループの削除、ユーザー" "の更新、グループの更新、ユーザーのグループへの追加、およびグループからのユー" "ザーの削除の操作が含まれます。これらの操作は 'O' リリースで削除されます。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] Deprecate the ``enabled`` option " "from ``[endpoint_policy]``, it will be removed in the 'O' release, and the " "extension will always be enabled." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] `` [endpoint_policy]`` の " "``enabled`` オプションを非推奨、 'O'リリースで削除予定とし、その拡張機能は常" "に有効になります。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] Deprecated all v2.0 APIs. The " "keystone team recommends using v3 APIs instead. Most v2.0 APIs will be " "removed in the 'Q' release. However, the authentication APIs and EC2 APIs " "are indefinitely deprecated and will not be removed in the 'Q' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] すべての v2.0 API を非推奨にしまし" "た。 Keystone チームは代わりに v3 API を使用することを推奨します。ほとんどの " "v2.0 API は 'Q' リリースで削除されます。 ただし、認証 API と EC2 API は無期限" "に非推奨であり、 'Q' リリースでは削除されません。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] The V8 Assignment driver " "interface is deprecated. Support for the V8 Assignment driver interface is " "planned to be removed in the 'O' release of OpenStack." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] V8 割り当てドライバーインタフェー" "スは非推奨です。 V8 割り当てドライバーインタフェースのサポートは、 OpenStack " "の 'O' リリースで削除される予定です。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] The V8 Role driver interface is " "deprecated. Support for the V8 Role driver interface is planned to be " "removed in the 'O' release of OpenStack." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] V8 ロールドライバーインタフェース" "は非推奨です。 V8 ロールドライバーインタフェースのサポートは、 OpenStack の " "'O' リリースで削除される予定です。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] The ``admin_token_auth`` filter " "must now be placed before the ``build_auth_context`` filter in `keystone-" "paste.ini`." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] ``admin_token_auth`` フィルタは、 " "`keystone-paste.ini` の ``build_auth_context`` フィルタの前に置く必要がありま" "す。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] The file ``httpd/keystone.py`` " "has been deprecated in favor of ``keystone-wsgi-admin`` and ``keystone-wsgi-" "public`` and may be removed in the 'O' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] ``httpd/keystone.py`` ファイル" "は、 ``keystone-wsgi-admin`` と ``keystone-wsgi-public`` のために非推奨とな" "り、 'O' リリースで削除される予定です。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] The token memcache and " "memcache_pool persistence backends have been deprecated in favor of using " "Fernet tokens (which require no persistence)." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] トークン用の memcache と " "memcache_pool 永続性バックエンドは、永続性を必要としない Fernet トークンを使" "用するために非推奨になりました。" msgid "" "[`blueprint deprecated-as-of-mitaka `_] ``keystone.common.cache.backends." "memcache_pool``, ``keystone.common.cache.backends.mongo``, and ``keystone." "common.cache.backends.noop`` are deprecated in favor of oslo.cache backends. " "The keystone backends will be removed in the 'O' release." msgstr "" "[`blueprint deprecated-as-of-mitaka `_] ``keystone.common.cache.backends." "memcache_pool``、 ``keystone.common.cache.backends.mongo``、および " "``keystone.common.cache.backends.noop`` は、 oslo.cache バックエンドを使用す" "るため、非推奨になりました。これらの Keystone バックエンドは 'O' リリースで削" "除されます。" msgid "" "[`blueprint deprecated-as-of-newton `_] As of the Newton release, the " "class plugin `keystone.common.kvs.core.KeyValueStore` has been deprecated. " "It is recommended to use alternative backends instead. The ``KeyValueStore`` " "class will be removed in the 'P' release." msgstr "" "[`blueprint deprecated-as-of-newton `_] Newton リリースでは、クラスプラグ" "イン `keystone.common.kvs.core.KeyValueStore` は非推奨になりました。代わりの" "バックエンドを使用することをお勧めします。 ``KeyValueStore`` クラスは 'P' リ" "リースで削除されます。" msgid "" "[`blueprint deprecated-as-of-ocata `_] The catalog backend " "``endpoint_filter.sql`` has been deprecated in the `Ocata` release, it has " "been consolidated with the ``sql`` backend. It is recommended to replace the " "``endpoint_filter.sql`` catalog backend with the ``sql`` backend. The " "``endpoint_filter.sql`` backend will be removed in the `Pike` release." msgstr "" "[`blueprint deprecated-as-of-ocata `_] カタログバックエンド " "``endpoint_filter.sql`` は、 `Ocata` リリースで非推奨となり、 ``sql`` バック" "エンドと統合されました。 ``endpoint_filter.sql`` カタログバックエンドを " "``sql`` バックエンドに置き換えることをお勧めします。 ``endpoint_filter.sql`` " "バックエンドは `Pike` リリースで削除されます。" msgid "" "[`blueprint deprecated-as-of-ocata `_] Various KVS backends and config " "options have been deprecated and will be removed in the `Pike` release. This " "includes:" msgstr "" "[`blueprint deprecated-as-of-ocata `_] 様々な KVS バックエンドと設定オプ" "ションが非推奨となり、 `Pike` リリースで削除される予定です。 以下が含まれま" "す。" msgid "" "[`blueprint deprecated-as-of-pike `_] The v2.0 ``auth`` and ``ec2`` APIs were " "already maked as deprecated in the Mitaka release, although no removal " "release had yet been identified. These APIs will now be removed in the 'T' " "release. The v3 APIs should be used instead." msgstr "" "[`blueprint-as-of-pike `_] v2.0 の ``auth`` と ``ec2`` API は、Mitaka リリース" "で廃止されましたが、削除するためのリリースはまだされていません。これらの API " "は 'T' リリースで削除される予定です。代わりに v3 API\n" " を使用する必要があります。" msgid "" "[`blueprint domain-config-as-stable `_] Deprecated ``keystone-manage " "domain_config_upload``. The keystone team recommends setting domain config " "options via the API instead. The ``domain_config_upload`` command line " "option may be removed in the 'P' release." msgstr "" "[`blueprint domain-config-as-stable `_] ``keystone-manage " "domain_config_upload`` は非推奨になりました。 Keystone チームは、代わりに " "API を介してドメイン設定オプションを設定することを推奨します。 " "``domain_config_upload`` コマンドラインオプションは 'P' リリースで削除される" "予定です。" msgid "" "[`blueprint domain-config-as-stable `_] The domain config via API is now " "marked as stable." msgstr "" "[`blueprint domain-config-as-stable `_] API 経由のドメイン設定は、安定な機" "能になりました。" msgid "" "[`blueprint domain-config-default `_] The Identity API now supports retrieving the " "default values for the configuration options that can be overriden via the " "domain specific configuration API." msgstr "" "[`blueprint domain-config-default `_] 認証 API は、ドメイン固有の設定 API でオー" "バーライドできる、設定オプションのデフォルト値の取得をサポートしました。" msgid "" "[`blueprint domain-specific-roles `_] Roles can now be optionally defined as " "domain specific. Domain specific roles are not referenced in policy files, " "rather they can be used to allow a domain to build their own private " "inference rules with implied roles. A domain specific role can be assigned " "to a domain or project within its domain, and any subset of global roles it " "implies will appear in a token scoped to the respective domain or project. " "The domain specific role itself, however, will not appear in the token." msgstr "" "[`blueprint domain-specific-roles `_] ロールをドメイン固有として定義できるようにな" "りました。ドメイン固有のロールは、ポリシーファイルでは参照されず、ドメインが" "推論ルールを持つ独自の暗黙ロールを構築できるようにするために使用されます。ド" "メイン固有のロールをドメインまたはドメイン内のプロジェクトに割り当てることが" "でき、グローバルなロールのサブセットを意味し、それぞれのドメインまたはプロ" "ジェクトにスコープされたトークンに表示されます。 ただし、ドメイン固有のロール" "自体はトークンには表示されません。" msgid "" "[`blueprint federation-group-ids-mapped-without-domain-reference `_] Enhanced the federation mapping engine to allow for " "group IDs to be referenced without a domain ID." msgstr "" "[`blueprint federation-group-ids-mapped-without-domain-reference `_] 統合マッピングエンジンを強化し、ドメイン ID なしでグルー" "プ ID を参照できるようにしました。" msgid "" "[`blueprint implied-roles `_] Keystone now supports creating implied roles. Role " "inference rules can now be added to indicate when the assignment of one role " "implies the assignment of another. The rules are of the form `prior_role` " "implies `implied_role`. At token generation time, user/group assignments of " "roles that have implied roles will be expanded to also include such roles in " "the token. The expansion of implied roles is controlled by the " "`prohibited_implied_role` option in the `[assignment]` section of `keystone." "conf`." msgstr "" "[`blueprint implied-roles `_] Keystone は暗黙のロールの作成をサポートするようになりまし" "た。ロール推論ルールを追加して、あるロールの割り当てが別のロールの割り当てを" "意味する場合を示すことができるようになりました。そのルールは `prior_role` の" "形式で `implied_role` を意味します。トークン生成時に、暗黙的なロールを持つ" "ロールのユーザー/グループの割り当てが展開され、そのロールもトークンに含められ" "ます。暗黙のロールの拡張は、 `keystone.conf` の `[assignment]` セクションの " "`prohibited_implied_role` オプションによって制御されます。" msgid "" "[`blueprint manage-migration `_] Upgrading keystone to a new version can now be " "undertaken as a rolling upgrade using the `--expand`, `--migrate` and `--" "contract` options of the `keystone-manage db_sync` command." msgstr "" "[`blueprint manage-migration `_] Keystone の新しいバージョンへのアップグレードは、 " "`keystone-manage db_sync` コマンドの `--expand`、` --migrate`、 `--contract` " "オプションを使用して、ローリングアップグレードとして実行できるようになりまし" "た。" msgid "" "[`blueprint move-extensions `_] If any extension migrations are run, for example: " "``keystone-manage db_sync --extension endpoint_policy`` an error will be " "returned. This is working as designed. To run these migrations simply run: " "``keystone-manage db_sync``. The complete list of affected extensions are: " "``oauth1``, ``federation``, ``endpoint_filter``, ``endpoint_policy``, and " "``revoke``." msgstr "" "[`blueprint move-extensions `_] ``keystone-manage db_sync --extension endpoint_policy`` " "のような拡張マイグレーションが実行された場合、エラーが返されます。これは設計" "どおりの動作です。これらの移行を実行するには、単に ``keystone-manage " "db_sync`` を実行します。影響を受ける拡張は、 ``oauth1`` 、 ``federation`` 、 " "``endpoint_filter`` 、``endpoint_policy`` 、および ``revoke`` です。" msgid "" "[`blueprint password-expires-validation `_] Token responses will now have " "a ``password_expires_at`` field in the ``user`` object, this can be " "expressed briefly as::" msgstr "" "[`blueprint password-expires-validation `_] トークンレスポンスは ``user`` " "オブジェクトに ``password_expires_at`` フィールドを持つようになりました。これ" "はつまり、" msgid "" "[`blueprint pci-dss-notifications `_] CADF notifications now extend to PCI-DSS " "events. A ``reason`` object is added to the notification. A ``reason`` " "object has both a ``reasonType`` (a short description of the reason) and " "``reasonCode`` (the HTTP return code). The following events will be impacted:" msgstr "" "[`blueprint pci-dss-notifications `_] CADF 通知は、 PCI-DSS イベントにも適用されま" "す。 ``reason`` オブジェクトが通知に追加されました。 ``reason`` オブジェクト" "には ``reasonType`` (理由の簡単な説明)と ``reasonCode`` (HTTP レスポンス" "コード)があります。以下のイベントが影響を受けます:" msgid "" "[`blueprint pci-dss-password-requirements-api `_] Added a new API (``/" "v3/domains/{domain_id}/config/security_compliance``) to retrieve regular " "expression requirements for passwords. Specifically, ``[security_compliance] " "password_regex`` and ``[security_compliance] password_regex_description`` " "will be returned. Note that these options are only meaningful if PCI support " "is enabled, via various ``[security_compliance]`` configuration options." msgstr "" "[`blueprint pci-dss-password-requirements-api `_] パスワードの正規表現" "要件を取得するための新しい API ( ``/v3/domains/{domain_id}/config/" "security_compliance`` )を追加しました。具体的には、 ``[security_compliance] " "password_regex`` と ``[security_compliance] password_regex_description`` が返" "されます。これらのオプションは、さまざまな ``[security_compliance]`` 設定オプ" "ションを使って PCI サポートが有効になっている場合にのみ有効であることに注意し" "てください。" msgid "" "[`blueprint pci-dss-query-password-expired-users `_] Added " "a ``password_expires_at`` query to ``/v3/users`` and ``/v3/groups/{group_id}/" "users``. The ``password_expires_at`` query is comprised of two parts, an " "``operator`` (valid choices listed below) and a ``timestamp`` (of form " "``YYYY-MM-DDTHH:mm:ssZ``). The APIs will filter the list of users based on " "the ``operator`` and ``timestamp`` given." msgstr "" "[`blueprint pci-dss-query-password-expired-users `_] ``/v3/" "users`` と ``/v3/groups/{group_id}/users`` に ``password_expires_at`` クエ" "リーを追加しました。 ``password_expires_at`` クエリーは ``operator`` (下記の" "有効な選択肢)と ``YYYY-MM-DDTHH:mm:ssZ`` 形式の ``timestamp`` の2つの部分" "で構成されています。 API は与えられた ``operator`` と ``timestamp`` に基づい" "てユーザーのリストをフィルタリングします。" msgid "" "[`blueprint per-user-auth-plugin-reqs `_] Per-user Multi-Factor-Auth " "rules (MFA Rules) have been implemented. These rules define which auth " "methods can be used (e.g. Password, TOTP) and provides the ability to " "require multiple auth forms to successfully get a token." msgstr "" "[`blueprint per-user-auth-plugin-reqs `_] ユーザー単位の多要素認証ルール" "( MFA ルール)が実装されました。 これらのルールは、どの認証方法を使用できる" "か(たとえば、パスワードや TOTP)を定義し、トークンを正常に取得するために複数" "の認証フォームを要求する機能を提供します。" msgid "" "[`blueprint project-tags `_] Projects have a new property called tags. These tags are " "simple strings that can be used to allow projects to be filtered/searched. " "Project tags will have the following properties:" msgstr "" "[`blueprint project-tags `_] プロジェクトには tags という新しいプロパティがあります。これ" "らのタグは、プロジェクトをフィルタリング/検索できるようにするための単純な文" "字列です。プロジェクトタグには、以下のプロパティがあります。" msgid "" "[`blueprint removed-as-of-mitaka `_] Notifications with event_type ``identity." "created.role_assignment`` and ``identity.deleted.role_assignment`` have been " "removed. The keystone team suggests listening for ``identity.role_assignment." "created`` and ``identity.role_assignment.deleted`` instead. This was " "deprecated in the Kilo release." msgstr "" "[`blueprint removed-as-of-mitaka `_] ``identity.created.role_assignment`` と " "``identity.deleted.role_assignment`` の event_type の通知は削除されました。 " "Keystone チームは代わりに、 ``identity.role_assignment.created`` と " "``identity.role_assignment.deleted`` で判断するよう提案しています。 これは " "Kilo リリースで非推奨になりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] Removed Catalog KVS backend (``keystone." "catalog.backends.sql.Catalog``). This was deprecated in the Icehouse release." msgstr "" "[`blueprint removed-as-of-mitaka `_] カタログ KVS バックエンドを削除しました" "( ``keystone.catalog.backends.sql.Catalog`` )。 これは Icehouse リリースで" "非推奨になりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] Removed Revoke KVS backend (``keystone.revoke." "backends.kvs.Revoke``). This was deprecated in the Juno release." msgstr "" "[`blueprint removed-as-of-mitaka `_] Revoke KVS バックエンドを削除しました" "( ``keystone.revoke.backends.kvs.Revoke``)。 これは Juno リリースで非推奨に" "なりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] Removed ``RequestBodySizeLimiter`` from " "keystone middleware. The keystone team suggests using ``oslo_middleware." "sizelimit.RequestBodySizeLimiter`` instead. This was deprecated in the Kilo " "release." msgstr "" "[`blueprint removed-as-of-mitaka `_] keystone middleware から " "``RequestBodySizeLimiter`` を削除しました。 Keystone チームは代わりに " "``oslo_middleware.sizelimit.RequestBodySizeLimiter`` を使用することを提案して" "います。これは Kilo リリースで非推奨になりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] Removed ``check_role_for_trust`` from the " "trust controller, ensure policy files do not refer to this target. This was " "deprecated in the Kilo release." msgstr "" "[`blueprint removed-as-of-mitaka `_] トラストコントローラーから " "``check_role_for_trust`` を削除ました。ポリシーファイルがこのターゲットを参照" "していないことを確認してください。これは Kilo リリースで非推奨になりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] Removed ``extras`` from token responses. " "These fields should not be necessary and a well-defined API makes this field " "redundant. This was deprecated in the Kilo release." msgstr "" "[`blueprint removed-as-of-mitaka `_] トークンレスポンスから ``extras`` を削除しまし" "た。 これらのフィールドは必要ではなく、明確な API によってこのフィールドは冗" "長になります。これは Kilo リリースで非推奨になりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] The LDAP backend for Assignment has been " "removed. This was deprecated in the Kilo release." msgstr "" "[`blueprint removed-as-of-mitaka `_] Assignment の LDAP バックエンドが削除されまし" "た。これは Kilo リリースで非推奨になりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] The LDAP backend for Resource has been " "removed. This was deprecated in the Kilo release." msgstr "" "[`blueprint removed-as-of-mitaka `_] Resource の LDAP バックエンドが削除されまし" "た。これは Kilo リリースで非推奨になりました。" msgid "" "[`blueprint removed-as-of-mitaka `_] The LDAP backend for Role has been removed. " "This was deprecated in the Kilo release." msgstr "" "[`blueprint removed-as-of-mitaka `_] ロール の LDAP バックエンドが削除されました。" "これは Kilo リリースで非推奨になりました。" msgid "" "[`blueprint removed-as-of-newton `_] Removed ``[eventlet_server]`` and " "``[eventlet_server_ssl]`` sections from the `keystone.conf`." msgstr "" "[`blueprint removed-as-of-newton `_] ``[eventlet_server]`` と " "``[eventlet_server_ssl]`` セクションが `keystone.conf` から削除されました。" msgid "" "[`blueprint removed-as-of-newton `_] Removed support for generating SSL " "certificates." msgstr "" "[`blueprint removed-as-of-newton `_] SSL 証明書の生成のサポートは削除されました。" msgid "" "[`blueprint removed-as-of-newton `_] Removed support for running keystone under " "eventlet. It is recommended to run keystone in an HTTP server." msgstr "" "[`blueprint removed-as-of-newton `_] イベントレット配下での Keystone の実行のサポー" "トが削除されました。 HTTP サーバーでの Keystone の実行が推奨されています。" msgid "" "[`blueprint removed-as-of-newton `_] Removed the backend and route from ``keystone." "contrib.endpoint_policy``. The package has been moved to ``keystone." "endpoint_policy``. This was deprecated in the Liberty release." msgstr "" "[`blueprint removed-as-of-newton `_] ``keystone.contrib.endpoint_policy`` からバッ" "クエンドとルートを削除しました。このパッケージは ``keystone." "endpoint_policy`` に移動しました。これは Liberty リリースで非推奨になりまし" "た。" msgid "" "[`blueprint removed-as-of-newton `_] The ``revoke_by_expiration`` method in " "``keystone.revoke.core`` has been removed. This was deprecated in the Juno " "release." msgstr "" "[`blueprint removed-as-of-newton `_] ``keystone.revoke.core`` の " "``revoke_by_expiration`` メソッドを削除しました。 これは Juno リリースで非推" "奨になりました。" msgid "" "[`blueprint removed-as-of-pike `_] All key-value-store code, options, and " "documentation has been removed as of the Pike release. The removed code " "included ``keystone.common.kvs`` configuration options for the KVS code, " "unit tests, and the KVS token persistence driver ``keystone.token." "persistence.backends.kvs``. All associated documentation has been removed." msgstr "" "[`blueprint removed-as-of-pike `_] すべてのキーバリューストアのコード、オプション、" "およびドキュメントは、 Pike リリースで削除されます。削除されたコードには、" "KVS コードの ``keystone.common.kvs`` 設定オプション、単体テスト、KVS トークン" "永続性ドライバーの ``keystone.token.persistence.backends.kvs`` が含まれていま" "す。すべての関連文書が削除されました。" msgid "" "[`blueprint removed-as-of-pike `_] Direct import of drivers outside of their " "`keystone` namespace has been removed. Ex. identity drivers are loaded from " "the `keystone.identity` namespace and assignment drivers from the `keystone." "assignment` namespace. Loading drivers outside of their keystone namespaces " "was deprecated in the Liberty release." msgstr "" "[`blueprint-as-of-pike `_] `keystone` 名前空間の外のドライバーの直接的なインポー" "トを削除しました。例えば、認証ドライバーは `keystone.identity` 名前空間から、" "割り当てドライバーは `keystone.assignment` 名前空間からロードされます。 " "keystone 名前空間外のドライバーの読み込みは、Liberty リリースで非推奨になって" "いました。" msgid "" "[`blueprint removed-as-of-pike `_] The ``admin_token_auth`` filter has been " "removed from all sample pipelines, specifically, the following section has " "been removed from ``keystone-paste.ini``::" msgstr "" "[`blueprint removed-as-of-pike `_] ``admin_token_auth`` フィルタはすべてのサンプル" "パイプラインから削除されました。具体的には、以下のセクションが ``keystone-" "paste.ini`` から削除されました::" msgid "" "[`blueprint removed-as-of-pike `_] The ``keystone-manage pki_setup`` was added to " "aid developer setup by hiding the sometimes cryptic openssl commands. This " "is no longer needed since keystone no longer supports PKI tokens and can no " "longer serve SSL. This was deprecated in the Mitaka release." msgstr "" "[`blueprint removed-as-of-pike `_] しばしば謎めいた openssl コマンドを隠すことに" "よって、開発者向けセットアップを補助するために、``keystone-manage " "pki_setup`` が追加されていました。 Keystone はもはや PKI トークンをサポートし" "なくなり、 SSL を提供することができなくなったため、これはもう必要ありませ" "ん。 これは Mitaka リリースで廃止されました。" msgid "" "[`blueprint removed-as-of-pike `_] The ``keystone.common.ldap`` module was removed " "from the code tree. It was deprecated in the Newton release in favor of " "using ``keystone.identity.backends.ldap.common`` which has the same " "functionality." msgstr "" "[`blueprint removed-as-of-pike `_] ``keystone.common.ldap`` モジュールがコードツ" "リーから削除されました。 Newton リリースで非推奨となり、同じ機能を持つ " "``keystone.identity.backends.ldap.common`` の使用を推奨しています。" msgid "" "[`blueprint removed-as-of-queens `_] The ``admin_token_auth`` middleware is " "removed now. The related doc is removed as well." msgstr "" "[`blueprint removed-as-of-queens `_] ``admin_token_auth`` ミドルウェアを削除しまし" "た。関連する文書も削除しました。" msgid "" "[`blueprint shadow-mapping `_] The federated identity mapping engine now supports the " "ability to automatically provision ``projects`` for ``federated users``. A " "role assignment will automatically be created for the user on the specified " "project. If the project specified within the mapping does not exist, it will " "be automatically created in the ``domain`` associated with the ``identity " "provider``. This behavior can be triggered using a specific syntax within " "the ``local`` rules section of a mapping. For more information see: `mapping " "combinations `_" msgstr "" "[`blueprint shadow-mapping `_] 統合認証マッピングエンジンは、 ``統合ユーザー`` の ``プロ" "ジェクト`` を自動的に提供する機能をサポートします。ユーザーのロール割当は、指" "定されたプロジェクトに自動的に登録されます。マッピング内に指定されたプロジェ" "クトが存在しない場合、``認証プロバイダー`` に関連付けられた ``ドメイン`` に自" "動的に作成されます。この動作は、マッピングの `` local`` ルールセクション内の" "特定の構文を使用することで引き起こされます。詳細は、 `マッピングの組み合わ" "せ\n" " `_ を参照してください。" msgid "" "[`blueprint support-federated-attr `_] Added new filters to the `list " "user` API (``GET /v3/users``) to support querying federated identity " "attributes: ``idp_id``, ``protocol_id``, and ``unique_id``." msgstr "" "[`blueprint support-federated-attr `_] 統合認証の属性( ``idp_id``、 " "``protocol_id``、 ``unique_id`` )の問い合わせをサポートするために、 `list " "user` API (``GET /v3/users``) に新しいフィルターを追加しました。" msgid "" "[`blueprint totp-auth `_] Keystone now supports authenticating via Time-based One-time " "Password (TOTP). To enable this feature, add the ``totp`` auth plugin to the " "`methods` option in the `[auth]` section of `keystone.conf`. More " "information about using TOTP can be found in `keystone's developer " "documentation `_." msgstr "" "[`blueprint totp-auth `_] Keystone は Time-based One-Time Password(TOTP)による認証をサポート" "しました。 この機能を有効にするには、 ``totp`` 認証プラグインを `keystone." "conf` の `[auth]` セクションの `methods` オプションに追加してください。 TOTP " "の使用に関する詳細は、 `Keystone の開発者用ドキュメント `_ にあります。" msgid "" "[`blueprint unified-limit `_] Keystone now supports unified limits. Two resouces called " "``registered limit`` and ``limit`` are added and a batch of related APIs are " "supported as well. These APIs are experimental now. It means that they are " "not stable enough and may be changed without backward compatibility. Once " "unified limit feature are ready for consuming, the APIs will be marked as " "stable." msgstr "" "[`blueprint unified-limit `_] Keystone は、統一制限をサポートしました。``登録された制限``" "および``制限``と呼ばれる二つのリソースが追加され、関連する API のバッチもサ" "ポートされます。これらの API は、現在実験段階です。これは、それらが十分安定し" "ておらず、下位互換性なしに変更される可能性があることを意味します。統一制限機" "能が使用可能になると、API は安定したものとしてマークされます。" msgid "" "[`blueprint url-safe-naming `_] The names of projects and domains can optionally be " "ensured to be url safe, to support the future ability to specify projects " "using hierarchical naming." msgstr "" "[`blueprint url-safe-naming `_] 階層的な名前付けを使用してプロジェクトを指定するための将" "来の機能をサポートするために、オプションで、プロジェクトとドメインの名前を " "URL に安全に使用できるようにしました。" msgid "" "[`blueprint x509-ssl-client-cert-authn `_] Keystone now supports " "tokenless client SSL x.509 certificate authentication and authorization." msgstr "" "[`blueprint x509-ssl-client-cert-authn `_] Keystone は、トークンレスクライ" "アント SSL x.509 証明書の認証と承認をサポートしました。" msgid "" "[`bug 1017606 `_] The " "signature on the ``get_catalog`` and ``get_v3_catalog`` methods of " "``keystone.catalog.backends.base.CatalogDriverBase`` have been updated. " "Third-party extensions that extend the abstract class " "(``CatalogDriverBase``) should be updated according to the new parameter " "names. The method signatures have changed from::" msgstr "" "[`bug 1017606 `_] " "``keystone.catalog.backends.base.CatalogDriverBase`` の ``get_catalog`` メ" "ソッドと ``get_v3_catalog`` メソッドが変更されました。抽象クラス" "(``CatalogDriverBase``)を拡張するサードパーティー拡張は、新しいパラメータ名" "に従って変更する必要があります。変更前のメソッド::" msgid "" "[`bug 1291157 `_] Identity " "provider information is now validated in during token validation. If an " "identity provider is removed from a keystone service provider, tokens " "associated to that identity provider will be considered invalid." msgstr "" "[`bug 1291157 `_] トークン" "の検証中に、認証プロバイダー情報が検証されるようになりました。認証プロバイ" "ダーが Keystone サービスプロバイダーから削除された場合、その認証プロバイダー" "に関連付けられたトークンは無効とみなされます。" msgid "" "[`bug 1367113 `_] The " "\"get entity\" and \"list entities\" functionality for the KVS catalog " "backend has been reimplemented to use the data from the catalog template. " "Previously this would only act on temporary data that was created at " "runtime. The create, update and delete entity functionality now raises an " "exception." msgstr "" "[`bug 1367113 `_] カタログ" "テンプレートのデータを使用するために、KVS カタログバックエンドの「エンティ" "ティー取得」機能と「エンティティー一覧」機能が再実装されました。これまでは、" "実行時に作成された一時的なデータに対してのみ機能しました。 エンティティーの作" "成、更新、削除機能は、例外を発生するようになりました。" msgid "" "[`bug 1473042 `_] " "Keystone's S3 compatibility support can now authenticate using AWS Signature " "Version 4." msgstr "" "[`bug 1473042 `_] Keystone " "の S3 互換性サポートは、 AWS Signature Version 4 を使用して認証を行います。" msgid "" "[`bug 1473553 `_] The " "`keystone-paste.ini` must be updated to put the ``admin_token_auth`` " "middleware before ``build_auth_context``. See the sample `keystone-paste." "ini` for the correct `pipeline` value. Having ``admin_token_auth`` after " "``build_auth_context`` is deprecated and will not be supported in a future " "release." msgstr "" "[`bug 1473553 `_] " "`keystone-paste.ini` は、``build_auth_context`` の前に ``admin_token_auth`` " "ミドルウェアを置くように変更する必要があります。正しい `pipeline` の値につい" "ては、` keystone-paste.ini` サンプルを参照してください。" "``build_auth_context`` の後の ``admin_token_auth`` は非推奨になり、将来のリ" "リースではサポートされなくなります。" msgid "" "[`bug 1479569 `_] Names " "have been added to list role assignments (GET /role_assignments?" "include_names=True), rather than returning just the internal IDs of the " "objects the names are also returned." msgstr "" "[`bug 1479569 `_] ロールの" "割り当てのリストに名前が追加されました(GET /role_assignments?" "include_names=True)。オブジェクトの内部的な ID だけでなく、名前も返されま" "す。" msgid "" "[`bug 1480270 `_] " "Endpoints created when using v3 of the keystone REST API will now be " "included when listing endpoints via the v2.0 API." msgstr "" "[`bug 1480270 `_] v2.0 API " "を使用してエンドポイントをリストするときに、Keystone REST API の v3 を使用し" "て作成されたエンドポイントが含まれるようになりました。" msgid "" "[`bug 1489061 `_] Caching " "has been added to catalog retrieval on a per user ID and project ID basis. " "This affects both the v2 and v3 APIs. As a result this should provide a " "performance benefit to fernet-based deployments." msgstr "" "[`bug 1489061 `_] ユーザー " "ID およびプロジェクト ID ごとのカタログ検索にキャッシュが追加されました。これ" "は、v2 と v3 の両方の API に影響します。 結果として、これはFernet ベースのデ" "プロイでパフォーマンス上の利点を提供します。" msgid "" "[`bug 1490804 `_] Audit " "IDs are included in the token revocation list." msgstr "" "[`bug 1490804 `_] 監査 ID " "がトークン取消リストに含まれるようになりました。" msgid "" "[`bug 1490804 `_] " "[`CVE-2015-7546 `_] A bug is fixed where an attacker could avoid token " "revocation when the PKI or PKIZ token provider is used. The complete " "remediation for this vulnerability requires the corresponding fix in the " "keystonemiddleware project." msgstr "" "[`bug 1490804 `_] " "[`CVE-2015-7546 `_] PKI または PKIz トークンプロバイダーが使用されていると" "きに、攻撃者がトークンの取り消しを回避できるバグが修正されました。この脆弱性" "に対する完全な修復には、keystonemiddleware プロジェクトの対応する修正が必要で" "す。" msgid "" "[`bug 1500222 `_] Added " "information such as: user ID, project ID, and domain ID to log entries. As a " "side effect of this change, both the user's domain ID and project's domain " "ID are now included in the auth context." msgstr "" "[`bug 1500222 `_] ユーザー " "ID 、プロジェクト ID 、およびドメイン ID などの情報をログエントリーに追加しま" "した。この変更の副作用として、ユーザーのドメイン ID とプロジェクトのドメイン " "ID の両方が認証コンテキストに含まれるようになりました。" msgid "" "[`bug 1501698 `_] Support " "parameter `list_limit` when LDAP is used as identity backend." msgstr "" "[`bug 1501698 `_] 認証バッ" "クエンドに LDAP を使用した時の `list_limit` パラメーターがサポートされまし" "た。" msgid "" "[`bug 1515302 `_] Two new " "configuration options have been added to the `[ldap]` section. " "`user_enabled_emulation_use_group_config` and " "`project_enabled_emulation_use_group_config`, which allow deployers to " "choose if they want to override the default group LDAP schema option." msgstr "" "[`bug 1515302 `_] `[ldap]` " "セクションに2つの新しい設定オプションが追加されました。" "`user_enabled_emulation_use_group_config` と " "`project_enabled_emulation_use_group_config` を使用して、デプロイヤーがデフォ" "ルトのグループ LDAP スキーマオプションを上書きするかどうかを選択できます。" msgid "" "[`bug 1516469 `_] " "Endpoints filtered by endpoint_group project association will be included in " "the service catalog when a project scoped token is issued and " "``endpoint_filter.sql`` is used for the catalog driver." msgstr "" "[`bug 1516469 `_] " "endpoint_group のプロジェクト関連付けによってフィルタリングされたエンドポイン" "トは、プロジェクトスコープトークンが発行され、カタログドライバに " "``endpoint_filter.sql`` が使用されたときにサービスカタログに含まれます。" msgid "" "[`bug 1519210 `_] A user " "may now opt-out of notifications by specifying a list of event types using " "the `notification_opt_out` option in `keystone.conf`. These events are never " "sent to a messaging service." msgstr "" "[`bug 1519210 `_] " "`keystone.conf` の `notification_opt_out` オプションを使ってイベント種別のリ" "ストを指定することで、通知をオプトアウトすることができます。 これらのイベント" "は、メッセージングサービスには送信されません。" msgid "" "[`bug 1523369 `_] Deleting " "a project will now cause it to be removed as a default project for users. If " "caching is enabled the changes may not be visible until the user's cache " "entry expires." msgstr "" "[`bug 1523369 `_] プロジェ" "クトを削除すると、ユーザーのデフォルトプロジェクトからそのプロジェクトが削除" "されます。キャッシングが有効な場合、ユーザーのキャッシュエントリーが期限切れ" "になるまで変更内容が表示されないことがあります。" msgid "" "[`bug 1524030 `_] During " "token validation we have reduced the number of revocation events returned, " "only returning a subset of events relevant to the token. Thus, improving " "overall token validation performance." msgstr "" "[`bug 1524030 `_] トークン" "の検証中に返される取り消しイベントの数を減らし、トークンに関連するイベントの" "サブセットのみ返すようにしました。したがって、全体的なトークン検証のパフォー" "マンスが向上します。" msgid "" "[`bug 1524030 `_] " "Revocation records are no longer written to the ``revocation_event`` table " "when a domain or project is disabled. These records were only ever used " "during the token validation process. In favor of revocation events, the " "project or domain will be validated online when the token is validated. This " "results in less database bloat while maintaining security during token " "validation." msgstr "" "[`bug 1524030 `_] ドメイン" "またはプロジェクトが無効になっている場合、破棄レコードは " "``revocation_event`` テーブルに書き込まれなくなりました。これらのレコードは、" "トークンの検証プロセスのみで使われていました。破棄イベントのために、トークン" "が検証されるときに、プロジェクトまたはドメインはオンラインで検証されます。こ" "れにより、トークンの検証中にセキュリティを維持しながら、データベースの膨張を" "少なくできます。" msgid "" "[`bug 1524030 `_] The " "signature on the ``list_events`` method of ``keystone.revoke.backends.base." "RevokeDriverBase`` has been updated. Third-party extensions that extend the " "abstract class (``RevokeDriverBase``) should update their code according to " "the new parameter names. The method signature has changed from::" msgstr "" "[`bug 1524030 `_] " "``keystone.revoke.backends.base.RevokeDriverBase`` の ``list_events`` メソッ" "ドを変更しました。 抽象クラス(``RevokeDriverBase``)を拡張するサードパー" "ティー拡張は、新しいパラメーターに従ってコードを変更する必要があります。 メ" "ソッドの変更前::" msgid "" "[`bug 1525317 `_] Enable " "filtering of identity providers based on `id`, and `enabled` attributes." msgstr "" "[`bug 1525317 `_] 認証プロ" "バイダーのフィルタリングを `id` と `enabled` 属性を基にして有効化します。" msgid "" "[`bug 1526462 `_] Support " "for posixGroups with OpenDirectory and UNIX when using the LDAP identity " "driver." msgstr "" "[`bug 1526462 `_] LDAP 認証" "ドライバーを使用する場合、 OpenDirectory および UNIX での posixGroups をサ" "ポートしました。" msgid "" "[`bug 1527759 `_] Reverted " "the change that eliminates the ability to get a V2 token with a user or " "project that is not in the default domain. This change broke real-world " "deployments that utilized the ability to authenticate via V2 API with a user " "not in the default domain or with a project not in the default domain. The " "deployer is being convinced to update code to properly handle V3 auth but " "the fix broke expected and tested behavior." msgstr "" "[`bug 1527759 `_] 既定のド" "メインにないユーザーまたはプロジェクトで V2 トークンを取得する機能を削除する" "変更を元に戻しました。この変更は、既定のドメインにないユーザーまたはプロジェ" "クトで V2 API 経由で認証する機能を利用していた実際のデプロイを破壊しました。" "デプロイヤーは V3 の認証を適切に処理するためにコードを変更してくれると確信し" "ていますが、この修正は、期待され、テストされた動作を破壊したものでした。" msgid "" "[`bug 1535878 `_] " "Originally, to perform GET /projects/{project_id}, the provided policy files " "required a user to have at least project admin level of permission. They " "have been updated to allow it to be performed by any user who has a role on " "the project." msgstr "" "[`bug 1535878 `_] もとも" "と、 GET /projects/{project_id} を実行するために、提供されたポリシーファイル" "には、少なくともプロジェクトの管理者レベルの権限が必要でした。プロジェクトに" "役割を持つユーザーなら誰でも実行できるように更新しました。" msgid "" "[`bug 1541092 `_] Only " "database upgrades from Kilo and newer are supported." msgstr "" "[`bug 1541092 `_] データ" "ベースのアップグレードは Kilo 以降のみでサポートされます。" msgid "" "[`bug 1542417 `_] Added " "support for a `user_description_attribute` mapping to the LDAP driver " "configuration." msgstr "" "[`bug 1542417 `_] " "`user_description_attribute` の LDAP ドライバー設定へのマッピングがサポートさ" "れました。" msgid "" "[`bug 1543048 `_] [`bug " "1668503 `_] Keystone now " "supports multiple forms of password hashing. Notably bcrypt, scrypt, and " "pbkdf2_sha512. The options are now located in the `[identity]` section of " "the configuration file. To set the algorithm use `[identity] " "password_hash_algorithm`. To set the number of rounds (time-complexity, and " "memory-use in the case of scrypt) use `[identity] password_hash_rounds`. " "`scrypt` and `pbkdf2_sha512` have further tuning options available. Keystone " "now defaults to using `bcrypt` as the hashing algorithm. All passwords will " "continue to function with the old sha512_crypt hash, but new password hashes " "will be bcrypt." msgstr "" "[`bug 1543048 `_] [`bug " "1668503 `_] Keystone は複数" "の形式のパスワードハッシュをサポートするようになりました。特に bcrypt 、 " "scrypt 、および pbkdf2_sha512 。オプションは設定ファイルの `[identity]` セク" "ションにあります。 アルゴリズムを設定するには `[identity] " "password_hash_algorithm` を使います。ラウンド数(scrypt の場合、時間複雑度と" "メモリ使用量)を設定するには、 `[identity] password_hash_rounds` を使用しま" "す。 `scrypt` と `pbkdf2_sha512` にはさらにチューニングオプションがありま" "す。 Keystone はデフォルトのハッシュアルゴリズムとして `bcrypt` を使用するよ" "うになっています。 すべてのパスワードは古い\n" "sha512_crypt ハッシュで引き続き機能しますが、新しいパスワードハッシュは " "bcrypt になります。" msgid "" "[`bug 1547684 `_] A minor " "change to the ``policy.v3cloudsample.json`` sample file was performed so the " "sample file loads correctly. The ``cloud_admin`` rule has changed from::" msgstr "" "[`bug 1547684 `_] サンプル" "ファイルが正しく読み込まれるように、 ``policy.v3cloudsample.json`` サンプル" "ファイルを少し変更しました。 ``cloud_admin`` ルール変更前::" msgid "" "[`bug 1547684 `_] A typo " "in the ``policy.v3cloudsample.json`` sample file was causing `oslo.policy` " "to not load the file. See the ``upgrades`` section for more details." msgstr "" "[`bug 1547684 `_] ``policy." "v3cloudsample.json`` サンプルファイルのタイプミスにより `oslo.policy` がファ" "イルをロードしていませんでした。 詳細については、``アップグレード時の注意`` " "の節を参照してください。" msgid "" "[`bug 1555830 `_] Enable " "filtering of service providers based on `id`, and `enabled` attributes." msgstr "" "[`bug 1555830 `_] サービス" "プロバイダーのフィルタリングを `id` と `enabled` 属性を基にして有効化します。" msgid "" "[`bug 1561054 `_] If " "upgrading to Fernet tokens, you must have a key repository and key " "distribution mechanism in place, otherwise token validation may not work. " "Please see the upgrade section for more details." msgstr "" "[`bug 1561054 `_] Fernet " "トークンにアップグレードする場合は、キーリポジトリーとキー配布メカニズムを適" "切に配置する必要があります。そうしないと、トークンの検証が機能しない可能性が" "あります。詳細については、アップグレード時の注意のセクションを参照してくださ" "い。" msgid "" "[`bug 1561054 `_] The " "default token provider has switched from UUID to Fernet. Please note that " "Fernet requires a key repository to be in place prior to running Ocata, this " "can be done running ``keystone-manage fernet_setup``. Additionally, for " "multi-node deployments, it is imperative that a key distribution process be " "in use before upgrading. Once a key repository has been created it should be " "distributed to all keystone nodes in the deployment. This ensures that each " "keystone node will be able to validate tokens issued across the deployment. " "If you do not wish to switch token formats, you will need to explicitly set " "the token provider for each node in the deployment by setting ``[token] " "provider`` to ``uuid`` in ``keystone.conf``. Documentation can be found at " "`fernet-tokens `_." msgstr "" "[`bug 1561054 `_] デフォル" "トのトークンプロバイダーは、 UUID から Fernet に切り替えました。 Fernet は、" "Ocata を実行する前にキーリポジトリーが必要であることに注意してください。これ" "は ``keystone-manage fernet_setup`` を実行することで実現できます。さらに、マ" "ルチノード環境では、アップグレードする前にキー配布プロセスを使用することが不" "可欠です。キーリポジトリーを作成したら、そのリポジトリーをデプロイメントのす" "べての Keystone ノードに配布する必要があります。これにより、各 Keystone ノー" "ドはデプロイメント全体で発行されたトークンを検証できます。 トークン形式を切り" "替える必要がない場合は、 ``keystone.conf`` の ``[token] provider`` を " "``uuid`` に設定して、各ノードのトークンプロバイダーを明示的に設定する必要があ" "ります。 ドキュメントは `fernet-tokens ` _ にありま" "す。" msgid "" "[`bug 1563101 `_] The " "token provider driver interface has moved from ``keystone.token.provider." "Provider`` to ``keystone.token.providers.base.Provider``. If implementing a " "custom token provider, subclass from the new location." msgstr "" "[`bug 1563101 `_] トークン" "プロバイダードライバーインタフェースを ``keystone.token.provider.Provider`` " "から ``keystone.token.providers.base.Provider`` に移動しました。カスタムトー" "クンプロバイダーを実装する場合は、新しい場所からサブクラス化してください。" msgid "" "[`bug 1571878 `_] A valid " "``mapping_id`` is now required when creating or updating a federation " "protocol. If the ``mapping_id`` does not exist, a ``400 - Bad Request`` will " "be returned." msgstr "" "[`bug 1571878 `_] フェデ" "レーションプロトコルを作成または更新するときは、有効な ``mapping_id`` が必要" "になります。 ``mapping_id`` が存在しない場合、 ``400 - Bad Request`` が返され" "ます。" msgid "" "[`bug 1582585 `_] A new " "method ``get_domain_mapping_list`` was added to ``keystone.identity." "mapping_backends.base.MappingDriverBase``. Third-party extensions that " "extend the abstract class (``MappingDriverBase``) should implement this new " "method. The method has the following signature::" msgstr "" "[`bug 1582585 `_] 新しいメ" "ソッド ``get_domain_mapping_list`` が ``keystone.identity.mapping_backends." "base.MappingDriverBase`` に追加されました。抽象クラス" "(``MappingDriverBase``)を拡張するサードパーティー拡張は、この新しいメソッド" "を実装する必要があります。このメソッドは次のように実装されています::" msgid "" "[`bug 1590587 `_] When " "assigning Domain Specific Roles, the domain of the role and the domain of " "the project must match. This is now validated and the REST call will return " "a 403 Forbidden." msgstr "" "[`bug 1590587 `_] ドメイン" "固有のロールを割り当てるときは、ロールのドメインとプロジェクトのドメインが一" "致する必要があります。現在、これが検証されており、 REST 呼び出しは 403 " "Forbidden を返します。" msgid "" "[`bug 1594482 `_] When " "using list_limit config option, the GET /services?name={service_name} API " "was first truncating the list and afterwards filtering by name. The API was " "fixed to first filter by name and only afterwards truncate the result list " "to the desired limit." msgstr "" "[`bug 1594482 `_] " "list_limit 設定オプションを使用すると、 GET /services?name={service_name} " "API が最初にリストを切り捨ててから、名前でフィルタリングしていました。 API は" "最初に名前でフィルタリングするように修正され、その後、結果リストを目的の制限" "で切り捨てます。" msgid "" "[`bug 1611102 `_] The " "methods ``list_endpoints_for_policy()`` and ``get_policy_for_endpoint()`` " "have been removed from the ``keystone.endpoint_policy.backends.base." "EndpointPolicyDriverBase`` abstract class, they were unused." msgstr "" "[`bug 1611102 `_] " "``list_endpoints_for_policy()`` と ``get_policy_for_endpoint()`` メソッド" "は使用されていないため、 ``keystone.endpoint_policy.backends.base." "EndpointPolicyDriverBase`` 抽象クラスから削除されました。" msgid "" "[`bug 1613466 `_] " "Credentials update to ec2 type originally accepted credentials with no " "project ID set, this would lead to an error when trying to use such " "credential. This behavior has been blocked, so creating a non-ec2 credential " "with no project ID and updating it to ec2 without providing a project ID " "will fail with a `400 Bad Request` error." msgstr "" "[`bug 1613466 `_] プロジェ" "クト ID が設定されていない資格情報が ec2 タイプの資格情報に更新されると、その" "資格情報を使用するときにエラーになります。この動作はブロックされており、プロ" "ジェクト ID を持たない ec2 以外の認証情報を作成して、それをプロジェクト ID を" "指定せずに ec2 に更新すると、 `400 Bad Request` エラーが発生します。" msgid "" "[`bug 1615014 `_] " "Migration order is now strictly enforced. The ensure upgrade process is done " "in the order it is officially documented and support, starting with " "`expand`, then `migrate`, and finishing with `contract`." msgstr "" "[`bug 1615014 `_] 移行指示" "が厳密に強制されるようになりました。確実なアップグレードプロセスは、公式に文" "書化されている順序で実行され、 `expand` で開始、次に `migrate` となり、 " "`contract` での終了をサポートしています。" msgid "" "[`bug 1616424 `_] Provide " "better exception messages when creating OAuth request tokens and OAuth " "access tokens via the ``/v3/OS-OAUTH1/request_token`` and ``/v3/OS-OAUTH1/" "access_token`` APIs, respectively." msgstr "" "[`bug 1616424 `_] ``/v3/OS-" "OAUTH1/request_token`` と ``/ v3/OS-OAUTH1/access_token`` API を使って、 " "OAuth リクエストトークンと OAuth アクセストークンを作成する際の例外メッセージ" "が改善されました。" msgid "" "[`bug 1616424 `_] Python " "build-in exception was raised if create request token or access token " "request from client with invalid request parameters, invalid signature for " "example. The implementation is hardened by showing proper exception and " "displaying the failure reasons if existent." msgstr "" "[`bug 1616424 `_] Python の" "ビルドインの例外は、無効なリクエストパラメータを持つクライアントからの要求" "トークンまたはアクセストークンの作成要求、例えば無効な署名などの場合に発生し" "ました。適切な例外を表示し、存在する場合は失敗の理由を表示することによって、" "実装が強化されます。" msgid "" "[`bug 1622310 `_] A new " "method ``delete_trusts_for_project`` has been added to ``keystone.trust." "backends.base.TrustDriverBase``. Third-party extensions that extend the " "abstract class (``TrustDriverBase``) should be updated according to the new " "parameter names. The signature for the new method is::" msgstr "" "[`bug 1622310 `_] 新しいメ" "ソッド ``delete_trusts_for_project`` が ``keystone.trust.backends.base." "TrustDriverBase`` に追加されました。抽象クラス(``TrustDriverBase``)を拡張す" "るサードパーティー拡張は、新しいパラメータに従って変更する必要があります。 新" "しいメソッドは次のように実装されています::" msgid "" "[`bug 1622310 `_] Trusts " "will now be invalidated if: the project to which the trust is scoped, or the " "user (trustor or trustee) for which the delegation is assigned, has been " "deleted." msgstr "" "[`bug 1622310 `_] 信頼がス" "コープされているプロジェクト、または委任が割り当てられているユーザー(委託者" "または受託者)が削除された場合、信頼は無効になります。" msgid "" "[`bug 1636950 `_] New " "option ``[ldap] connection_timeout`` allows a deployer to set a " "``OPT_NETWORK_TIMEOUT`` value to use with the LDAP server. This allows the " "LDAP server to return a ``SERVER_DOWN`` exception, if the LDAP URL is " "incorrect or if there is a connection failure. By default, the value for " "``[ldap] connection_timeout`` is -1, meaning it is disabled. Set a positive " "value (in seconds) to enable the option." msgstr "" "[`bug 1636950 `_] 新しいオ" "プション ``[ldap] connection_timeout`` は、デプロイヤーが LDAP サーバーで使用" "するための ``OPT_NETWORK_TIMEOUT`` 値を設定できるようにします。これにより、" "LDAP URLが正しくない場合や接続に失敗した場合、 LDAP サーバーは " "``SERVER_DOWN`` 例外を返すことができます。デフォルトでは、 ``[ldap] " "connection_timeout`` の値は -1 であり、無効になっています。オプションを有効に" "するには、正の値(秒単位)を設定します。" msgid "" "[`bug 1638603 `_] Add " "support for nested groups in Active Directory. A new boolean option ``[ldap] " "group_ad_nesting`` has been added, it defaults to ``False``. Enable the " "option is using Active Directory with nested groups. This option will impact " "the ``list_users_in_group``, ``list_groups_for_user``, and " "``check_user_in_group`` operations." msgstr "" "[`bug 1638603 `_] Active " "Directory のネストされたグループのサポートを追加しました。新しいブール値のオ" "プション ``[ldap] group_ad_nesting`` を追加しました。デフォルトは ``False`` " "です。有効にすると、ネストされたグループを持つ Active Directory を使用できま" "す。このオプションは、 ``list_users_in_group``、 ``list_groups_for_user``、お" "よび ``check_user_in_group`` 操作に影響します。" msgid "" "[`bug 1638603 `_] Support " "nested groups in Active Directory. A new boolean option ``[ldap] " "group_ad_nesting`` has been added, it defaults to ``False``. Enable the " "option is using Active Directory with nested groups. This option will impact " "the ``list_users_in_group``, ``list_groups_for_user``, and " "``check_user_in_group`` operations." msgstr "" "[`bug 1638603 `_] Active " "Directory のネストされたグループをサポートします。新しいブール値のオプション " "``[ldap] group_ad_nesting`` が追加され、デフォルトは ``False`` です。このオプ" "ションは、ネストされたグループのある Active Directory を使用可能にします。こ" "のオプションは、 ``list_users_in_group``、 ``list_groups_for_user``、および " "``check_user_in_group`` 操作に影響します。" msgid "" "[`bug 1641645 `_] RBAC " "protection was removed from the `Self-service change user password` API (``/" "v3/user/$user_id/password``), meaning, a user can now change their password " "without a token specified in the ``X-Auth-Token`` header. This change will " "allow a user, with an expired password, to update their password without the " "need of an administrator." msgstr "" "[`bug 1641645 `_] RBAC保護" "が `Self-service change user password` API (``/v3/user/$ user_id/" "password``)から削除されました。つまり、ユーザーは ``X- Auth-Token` ヘッダー" "に指定されているトークンを使用せずにパスワードを変更できるようになりました。" "この変更により、期限切れのパスワードを持つユーザーは、管理者の介入なしにパス" "ワードを更新できます。" msgid "" "[`bug 1641654 `_] The " "``healthcheck`` middleware from `oslo.middleware` has been added to the " "keystone application pipelines by default. The following section has been " "added to ``keystone-paste.ini``::" msgstr "" "[`bug 1641654 `_] `oslo." "middleware` の ``healthcheck`` ミドルウェアは、デフォルトで KeyStone のアプリ" "ケーションパイプラインに追加されています。 次のセクションが ``keystone-paste." "ini`` に追加されました。" msgid "" "[`bug 1641654 `_] The " "``healthcheck`` middleware from `oslo.middleware` has been added to the " "keystone application pipelines by default. This middleware provides a common " "method to check the health of keystone. Refer to the example paste provided " "in ``keystone-paste.ini`` to see how to include the ``healthcheck`` " "middleware." msgstr "" "[`bug 1641654 `_] `oslo." "middleware` の ``healthcheck`` ミドルウェアは、デフォルトで Keystone のアプリ" "ケーションパイプラインに追加されています。 このミドルウェアは、Keystone の健" "全性をチェックする一般的な方法を提供します。 ``keystone-paste.ini`` に用意さ" "れている例を参照して、 ``healthcheck`` ミドルウェアを含める方法を見てくださ" "い。" msgid "" "[`bug 1641660 `_] The " "default value for ``[DEFAULT] notification_format`` has been changed from " "``basic`` to ``cadf``. The CADF notifications have more information about " "the user that initiated the request." msgstr "" "[`bug 1641660 `_] " "``[DEFAULT] notification_format`` のデフォルト値は ``basic`` から ``cadf`` に" "変更されました。 CADF 通知には、要求を開始したユーザーに関する詳細情報があり" "ます。" msgid "" "[`bug 1641660 `_] The " "default value for ``[DEFAULT] notification_opt_out`` has been changed to " "include: ``identity.authenticate.success``, ``identity.authenticate." "pending`` and ``identity.authenticate.failed``. If a deployment relies on " "these notifications, then override the default setting." msgstr "" "[`bug 1641660 `_] " "``[DEFAULT] notification_opt_out`` のデフォルト値は、 ``identity." "authenticate.success``、 ``identity.authenticate.pending``、 ``identity." "authenticate.failed`` を含むように変更されました。 デプロイメントがこれらの通" "知に依存している場合は、デフォルト設定を書き換えてください。" msgid "" "[`bug 1641816 `_] The " "``[token] cache_on_issue`` option is now enabled by default. This option has " "no effect unless global caching and token caching are enabled." msgstr "" "[`bug 1641816 `_] デフォル" "トで ``[token] cache_on_issue`` オプションが有効になりました。このオプション" "は、グローバルキャッシングとトークンキャッシングが有効になっていない限り無効" "です。" msgid "" "[`bug 1642348 `_] Added " "new option ``[security_compliance] lockout_ignored_user_ids`` to allow " "deployers to specify users that are exempt from PCI lockout rules." msgstr "" "[`bug 1642348 `_] デプロイ" "ヤーが PCI ロックアウトルールから除外されているユーザーを指定できるようにする" "新しいオプション ``[security_compliance] lockout_ignored_user_ids`` が追加さ" "れました。" msgid "" "[`bug 1642457 `_] Handle " "disk write and IO failures when rotating keys for Fernet tokens. Rather than " "creating empty keys, properly catch and log errors when unable to write to " "disk." msgstr "" "[`bug 1642457 `_] Fernet " "トークンのキーをローテーションするときの、ディスク書き込みと I/O 失敗を処理し" "ます。空のキーを作成するのではなく、ディスクに書き込めないときにエラーを適切" "にキャッチしてログに記録します。" msgid "" "[`bug 1642687 `_] The " "signature on the ``create_federated_user`` method of ``keystone.identity." "shadow_backends.base.ShadowUsersDriverBase`` has been updated." msgstr "" "[`bug 1642687 `_] " "``keystone.identity.shadow_backends.base.ShadowUsersDriverBase`` の " "``create_federated_user`` メソッドにおける署名が変更されました。" msgid "" "[`bug 1642687 `_] Upon a " "successful upgrade, all existing ``identity providers`` will now be " "associated with a automatically created domain. Each ``identity provider`` " "that existed prior to the `Ocata` release will now have a ``domain_id`` " "field. The new domain will have an ``id`` (random UUID), a ``name`` (that " "will match the ``identity provider`` ID , and be ``enabled`` by default." msgstr "" "[`bug 1642687 `_] アップグ" "レードが成功すると、すべての既存の ``identity provider`` が自動的に作成された" "ドメインに関連付けられます。 `Ocata` リリースより前に存在していた``identity " "provider`` には ``domain_id`` フィールドが追加されました。新しいドメインは " "``id`` (ランダムな UUID) および ``identity provider`` ID と一致する " "``name`` を持ち、デフォルトでは ``enabled`` になります。" msgid "" "[`bug 1642687 `_] Users " "that authenticate with an ``identity provider`` will now have a " "``domain_id`` attribute, that is associated with the ``identity provider``." msgstr "" "[`bug 1642687 `_] " "``identity provider`` で認証するユーザーは、 ``domain_id`` 属性を持つようにな" "りました。これは、 ``identity provider`` と結びついています。" msgid "" "[`bug 1642687 `_] When " "registering an ``identity provider`` via the OS-FEDERATION API, it is now " "recommended to include a ``domain_id`` to associate with the ``identity " "provider`` in the request. Federated users that authenticate with the " "``identity provider`` will now be associated with the ``domain_id`` " "specified. If no ``domain_id`` is specified, then a domain will be " "automatically created." msgstr "" "[`bug 1642687 `_] OS-" "FEDERATION API を介して ``identity provider`` を登録する際には、要求内の " "``identity provider`` に関連付けるために ``domain_id`` を含むことが推奨されて" "います。 ``identity provider`` で認証された統合されたユーザーは、指定された " "``domain_id`` に関連付けられます。 ``domain_id`` を指定しないと、自動的にドメ" "インが作成されます。" msgid "" "[`bug 1642692 `_] When a " "`federation protocol` is deleted, all users that authenticated with the " "`federation protocol` will also be deleted." msgstr "" "[`bug 1642692 `_] 1つの" "`federation protocol` が削除されると、`federation protocol` で認証されたすべ" "てのユーザーも削除されます。" msgid "" "[`bug 1649138 `_] When " "using LDAP as an identity backend, the initial bind will now occur upon " "creation of a connection object, i.e. early on when performing LDAP queries, " "no matter whether the bind is authenticated or anonymous, so that any " "connection errors can be handled correctly and early." msgstr "" "[`bug 1649138 `_] 認証バッ" "クエンドとして LDAP を使用する場合、初期のバインドは接続オブジェクトの作成時" "に発生します。つまり、LDAP クエリーの実行の早期の段階で、バインドが認証されて" "いるか匿名であるかにかかわらず、接続エラーを正しく、早期に処理できます。" msgid "" "[`bug 1650676 `_] " "Authentication plugins now required ``AuthContext`` objects to be used. This " "has added security features to ensure information such as the ``user_id`` " "does not change between authentication methods being processed by the " "server. The ``keystone.controllers.Auth.authenticate`` method now requires " "the argument ``auth_context`` to be an actual ``AuthContext`` object." msgstr "" "[`bug 1650676 `_] 認証プラ" "グインは、 ``AuthContext`` オブジェクトを使用する必要があります。これにより、" "サーバーが処理する認証方法の間で ``user_id`` などの情報が変更されないようにす" "るためのセキュリティ機能が追加されました。 ``keystone.controllers.Auth." "authenticate`` メソッドは、引数 ``auth_context`` を実際の ``AuthContext`` オ" "ブジェクトにする必要があります。" msgid "" "[`bug 1651989 `_] Due to " "``bug 1547684``, when using the ``policy.v3cloudsample.json`` sample file, a " "domain admin token was being treated as a cloud admin. Since the " "``is_admin_project`` functionality only supports project-scoped tokens, we " "automatically set any domain scoped token to have the property " "``is_admin_project`` to ``False``." msgstr "" "[`bug 1651989 `_] ``bug " "1547684`` によって、``policy.v3cloudsample.json`` サンプルファイルを使用した" "時に、ドメイン管理者トークンがクラウド管理者として扱われれていました。" "``is_admin_project`` 機能がプロジェクトにスコープされたトークンのみをサポート" "しているので、 ``is_admin_project`` プロパティを持つすべてのドメインにスコー" "プされたトークンに ``False``に自動的に設定します。" msgid "" "[`bug 1656076 `_] The " "various plugins under ``keystone.controllers.Auth.authenticate`` now require " "``AuthContext`` objects to be returned." msgstr "" "[`bug 1656076 `_] " "``keystone.controllers.Auth.authenticate`` 配下の様々なプラグインは、 " "``AuthContext`` オブジェクトを返す必要があります。" msgid "" "[`bug 1659730 `_] The " "signature on the ``authenticate`` method of ``keystone.auth.plugins.base." "AuthMethodHandler`` has been updated. Third-party extensions that extend the " "abstract class (``AuthMethodHandler``) should update their code according to " "the new parameter names. The method signature has changed from::" msgstr "" "[`bug 1659730 `_] " "``keystone.auth.plugins.base.AuthMethodHandler`` の ``authenticate`` メソッド" "にある署名が変更されました。抽象クラス(``AuthMethodHandler``)を継承したサー" "ドパーティー製の拡張は、新しいパラメーター名にしたがって、そのコードを変更す" "る必要があります。メソッドの署名は以下から変更されました。" msgid "" "[`bug 1659995 `_] New " "options have been made available via the user create and update API (``POST/" "PATCH /v3/users``) call, the options will allow an admin to mark users as " "exempt from certain PCI requirements via an API." msgstr "" "[`bug 1659995 `_] 新しいオ" "プションは、ユーザー作成更新 API (``POST/PATCH /v3/users``)呼び出しを使用し" "て、管理者がユーザーを特定の PCI 要件から免除されるようにマークすることを可能" "にします。" msgid "" "[`bug 1659995 `_] The " "config option ``[security_compliance] password_expires_ignore_user_ids`` has " "been deprecated in favor of using the option value set, available via the " "user create and update API call" msgstr "" "[`bug 1659995 `_] 設定オプ" "ション ``[security_compliance] password_expires_ignore_user_ids`` は非推奨と" "なり、オプションの値セットの使用を推奨しています。これは、ユーザー作成および" "更新 API 呼び出しで利用できます。" msgid "" "[`bug 1670382 `_] The ldap " "config group_members_are_ids has been added to the whitelisted options " "allowing it to now be used in the domain config API and `keystone-manage " "domain_config_upload`" msgstr "" "[`bug 1670382 `_] LDAP 設定" "のgroup_members_are_ids がホワイトリストされたオプションに追加され、ドメイン" "設定 API と `keystone-manage domain_config_upload` で使用できるようになりまし" "た。" msgid "" "[`bug 1674415 `_] Fixed " "issue with translation of keystone error messages which was not happening in " "case of any error messages from identity API with locale being set." msgstr "" "[`bug 1674415 `_] ロケール" "が設定されている 認証 API から、どんなエラーメッセージも発生しないという、 " "Keystone エラーメッセージの翻訳に関する問題を修正しました。" msgid "" "[`bug 1676497 `_] `bindep` " "now correctly reports the `openssl-devel` binary dependency for rpm distros " "instead of `libssl-dev`." msgstr "" "[`bug 1676497 `_] `bindep` " "は、 `libssl-dev` の代わりに rpm ディストリビューションの `openssl-devel` バ" "イナリー依存関係を正しく報告するようになりました。" msgid "" "[`bug 1684994 `_] This " "catches the ldap.INVALID_CREDENTIALS exception thrown when trying to connect " "to an LDAP backend with an invalid username or password, and emits a message " "back to the user instead of the default 500 error message." msgstr "" "[`bug 1684994 `_] 無効な" "ユーザー名、またはパスワードを使用して LDAP バックエンドに接続しようとしたと" "きにスローされるldap.INVALID_CREDENTIALS 例外がキャッチされ、デフォルトの " "500 エラーメッセージではなくユーザーにメッセージが返されます。" msgid "" "[`bug 1687593 `_] Ensure " "that the URL used to make the request when creating OAUTH1 request tokens is " "also the URL that verifies the request token." msgstr "" "[`bug 1687593 `_] OAUTH1 要" "求トークンを作成するときの要求を行うために使用される URL が、要求トークンを検" "証する URL でもあることを確実にしました。" msgid "" "[`bug 1688188 `_] When " "creating an IdP, if a domain was generated for it and a conflict was raised " "while effectively creating the IdP in the database, the auto-generated " "domain is now cleaned up." msgstr "" "[`bug 1688188 `_] IdP を作" "成するときに、そのためのドメインが生成され、データベースに IdP を効果的に作成" "して競合が発生した場合、自動生成ドメインがクリーンアップされるようになりまし" "た。" msgid "" "[`bug 1689616 `_] " "Significant improvements have been made when performing a token flush on " "massive data sets." msgstr "" "[`bug 1689616 `_] 膨大な" "データセットに対してトークンフラッシュを実行するときの、重要な改善が行われま" "した。" msgid "" "[`bug 1696574 `_] All GET " "APIs within keystone now have support for HEAD, if not already implemented. " "All new HEAD APIs have the same response codes and headers as their GET " "counterparts. This aids in client-side processing, especially caching." msgstr "" "[`bug 1696574 `_] Keystone " "内のすべての\n" "GET API は、まだ実装されていない場合は HEAD をサポートします。すべての新しい " "HEAD API は、GET のものと同じレスポンスコードとヘッダーを持っています。 これ" "は、クライアント側の処理、特にキャッシングに役立ちます。" msgid "" "[`bug 1700852 `_] Keystone " "now supports caching of the `GET|HEAD /v3/users/{user_id}/projects` API in " "an effort to improve performance." msgstr "" "[`bug 1700852 `_] Keystone " "は、パフォーマンスを向上させるために、 `GET|HEAD /v3/users/{user_id}/" "projects` API のキャッシュをサポートしました。" msgid "" "[`bug 1701324 `_] Token " "bodies now contain only unique roles in the authentication response." msgstr "" "[`bug 1701324 `_] トークン" "本体は、認証応答に固有のロールのみを含むようになりました。" msgid "" "[`bug 1702211 `_] Password " "`created_at` field under some versions/deployments of MySQL would lose sub-" "second precision. This means that it was possible for passwords to be " "returned out-of-order when changed within one second (especially common in " "testing). This change stores password `created_at` and `expires_at` as an " "integer instead of as a DATETIME data-type." msgstr "" "[`bug 1702211 `_] MySQL の" "いくつかのバージョン/デプロイメントでは、パスワードの `created_at` フィールド" "は 1 秒未満の精度を失います。 これは、 1 秒以内にパスワードが変更されたときに" "パスワードが乱れる可能性があることを意味します(特にテストでは一般的です)。" "この変更は、パスワードの `created_at` と `expires_at` を DATETIME データ型で" "はなく整数として保存します。" msgid "" "[`bug 1703369 `_] There " "was a typo for the identity:get_identity_provider rule in the default " "``policy.json`` file in previous releases. The default value for that rule " "was the same as the default value for the default rule (restricted to admin) " "so this typo was not readily apparent. Anyone customizing this rule should " "review their settings and confirm that they did not copy that typo. More " "context regarding the purpose of this backport can be found in the bug " "report." msgstr "" "[`bug 1703369 `_] 以前のリ" "リースのデフォルトの ``policy.json`` ファイルには、identity:" "get_identity_provider ルールにタイプミスがありました。そのルールのデフォルト" "値はデフォルトルールのデフォルト値(adminに制限されています)と同じで、このタ" "イプミスが容易には判明しませんでした。このルールをカスタマイズしたら、設定を" "確認して、そのタイプミスをコピーしていないことを確認する必要があります。この" "バックポートの目的に関する詳細は、バグレポートに記載されています。" msgid "" "[`bug 1703369 `_] There " "was a typo for the identity:get_identity_provider rule in the default " "``policy.json`` file in previous releases. The default value for that rule " "was the same as the default value for the default rule (restricted to admin) " "so this typo was not readily apparent. Anyone customizing this rule should " "review their settings and confirm that they did not copy that typo. " "Particularly given that the default rule is being removed in Pike with the " "move of policy into code." msgstr "" "[`bug 1703369 `_] 以前のリ" "リースのデフォルトの ``policy.json`` ファイルには、identity:" "get_identity_provider ルールにタイプミスがありました。そのルールのデフォルト" "値はデフォルトルールのデフォルト値(adminに制限されています)と同じで、このタ" "イプミスが容易には判明しませんでした。このルールをカスタマイズしたら、設定を" "確認して、そのタイプミスをコピーしていないことを確認する必要があります。特に" "ポリシーのコードへの移行と伴に、 Pike ではデフォルトルールが削除されているこ" "とに注意してください。" msgid "" "[`bug 1704205 `_] All " "users and groups are required to have a name. Prior to this fix, Keystone " "was not properly enforcing this for LDAP users and groups. Keystone will now " "ignore users and groups that do not have a value for the LDAP attribute " "which Keystone has been configured to use for that entity's name." msgstr "" "[`bug 1704205 `_] すべての" "ユーザーとグループには名前が必要です。この修正に先立って、 Keystone は LDAP " "ユーザーとグループに対してこれを適切に実施していませんでした。 Keystone は、" "そのエンティティの名前に使用するように設定されている LDAP 属性の値を持たない" "ユーザーおよびグループを無視します。" msgid "" "[`bug 1705485 `_] A " "`previous change `_ removed policy " "from the self-service password API. Since a user is required to authenticate " "to change their password, protection via policy didn't necessarily make " "sense. This change removes the default policy from code, since it is no " "longer required or used by the service. Note that administrative password " "resets for users are still protected via policy through a separate endpoint." msgstr "" "[`bug 1705485 `_] 前回の変" "更`_ ではセルフサービスパスワード " "API からポリシーが削除されました。 ユーザーはパスワードを変更するために認証す" "る必要があるため、ポリシーによる保護は必ずしも意味をなさないものです。この変" "更により、デフォルトのポリシーはコードから削除されます。サービスで必要とされ" "なくなったためです。ユーザーの管理パスワードのリセットは、ポリシーによって別" "のエンドポイントを介して保護されます。" msgid "" "[`bug 1705485 `_] The " "`change_password` protection policy can be removed from file-based policies. " "This policy is no longer used to protect the self-service password change " "API since the logic was moved into code. Note that the administrative " "password reset functionality is still protected via policy on the " "`update_user` API." msgstr "" "[`bug 1705485 `_] " "`change_password` 保護ポリシーは、ファイルベースのポリシーから削除することが" "できます。このポリシーは、ロジックがコードに移行されたため、セルフサービスパ" "スワード変更 API を保護するために使用されなくなりました。管理パスワードのリ" "セット機能は `update_user` API のポリシーで保護されています。" msgid "" "[`bug 1718747 `_] As part " "of solving a regression in the identity SQL backend that prevented domains " "containing users from being deleted, a notification callback was altered so " "that users would only be deleted if the identity backend is SQL. If you have " "a custom identity backend that is not read-only, deleting a domain in " "keystone will not delete the users in your backend unless your driver has an " "is_sql property that evaluates to true." msgstr "" "[`bug 1718747 `_] ユーザー" "を含むドメインが削除されないようにする認証 SQL バックエンドのリグレッションを" "解決する一環として、認証バックエンドが SQL の場合にのみユーザーが削除されるよ" "うに通知コールバックが変更されました。読み取り専用ではないカスタム認証バック" "エンドを使用している場合、そのドライバーで is_sql プロパティが true と評価さ" "れない限り、Keystone でドメインを削除してもバックエンドのユーザーは削除されま" "せん。" msgid "" "[`bug 1718747 `_] Fixes a " "regression where deleting a domain with users in it caues a server error. " "This bugfix restores the previous behavior of deleting the users namespaced " "in the domain. This only applies when using the SQL identity backend." msgstr "" "[`bug 1718747 `_] ドメイン" "を含まれるユーザーごと削除するときにサーバーエラーが発生するというリグレッ" "ションが修正されています。このバグ修正は、ドメインの名前空間内に置かれている" "ユーザーを削除するという以前の動作を復元します。これは、SQL 認証バックエンド" "を使用する場合にのみ適用されます。" msgid "" "[`bug 1727099 `_] When " "users try to changes their password, the total number which includes the new " "password should not be greater or equal to the " "``unique_last_password_count`` config options. But the help and error " "messages for this scenario are not described clearly. Now the messges are " "updated to be more clear." msgstr "" "[`bug 1727099 `_] ユーザー" "がパスワードを変更しようとするとき、新しいパスワードは、新しいパスワードを含" "んで、設定オプション ``unique_last_password_count`` 個以内で一意でなければな" "りません。 しかし、ヘルプとエラーメッセージは明確は説明していませんでした。" "メッセージは更新され、より明確になりました。" msgid "" "[`bug 1727726 `_] All " "users and groups are required to have a name. Prior to this fix, Keystone " "was allowing LDAP users and groups whose name has only empty white spaces. " "Keystone will now ignore users and groups that do have only white spaces as " "value for the LDAP attribute which Keystone has been configured to use for " "that entity's name." msgstr "" "[`bug 1727726 `_] すべての" "ユーザーとグループには名前が必要です。この修正以前は、 Keystone は名前に空白" "だけがある LDAP ユーザーとグループを許可していました。Keystone は、使用するよ" "う設定されたエンティティ名の LDAP 属性が、値として空白のみを持つユーザーおよ" "びグループを無視するようになりました。" msgid "" "[`bug 1728690 `_] The " "``keystone-manage bootstrap`` command will only create the admin role and " "will no longer create a default member role. Please create any additional " "roles you need after running ``bootstrap`` by using the ``openstack role " "create`` command." msgstr "" "[`bug 1728690 `_] " "``keystone-manage bootstrap`` コマンドは admin ロールを作成するだけで、もはや" "デフォルトのメンバーロールを作成しません。`` bootstrap`` を実行した後、" "``openstack role create`` コマンドを使って必要なロールを追加してください。" msgid "" "[`bug 1728690 `_] The " "``member_role_id`` and ``member_role_name`` config options were used to " "create a default member role for keystone v2 role assignments, but with the " "removal of the v2 API it is no longer necessary to create this default role. " "This option is deprecated and will be removed in the S release. If you are " "depending on having a predictable role name and ID for this member role you " "will need to update your tooling." msgstr "" "[`bug 1728690 `_] " "``member_role_id`` と ``member_role_name`` 設定オプションは、Keystone v2 の" "ロール割り当てのためのデフォルトのメンバーロールを作成するために使用されまし" "たが、v2 API の削除により、このデフォルトロールを作成する必要はありません。こ" "のオプションは非推奨となり、S リリースで削除予定です。想定しているロール名と " "ID に依存している場合、ユーザーはツールを更新する必要があります。" msgid "" "[`bug 1733754 `_] Keystone " "didn't validate the OS-TRUST:trust key of the authentication request is " "actually a dictionary. This results in a 500 Internal Server Error when it " "should really be a 400 Bad Request." msgstr "" "[`bug 1733754 `_] Keystone " "は認証リクエストの OS-TRUST:trust 鍵が辞書型かを検証しませんでした。これによ" "り、実際には 400 Bad Request が発生すべき場合に、500 Internal Server Error が" "発生します。" msgid "" "[`bug 1734244 `_] Return a " "400 status code instead of a 500 when creating a trust with extra attributes " "in the roles parameter." msgstr "" "[`bug 1734244 `_] roles パ" "ラメーターに余分な属性を含んで信頼を作成するとき、500 ではなく 400 のステータ" "スコードを返します。" msgid "" "[`bug 1736875 `_] Add " "schema check to return a 400 status code instead of a 500 when authorize a " "request token with non-id attributes in the `roles` parameter." msgstr "" "[`bug 1736875 `_] `roles` " "パラメータに非 id 属性を持つリクエストトークンを認可するとき、スキーマチェッ" "クを追加して 500 ではなく 400 のステータスコードを返します。" msgid "" "[`bug 1738895 `_] Fixed " "the bug that federated users can't be listed by `name` filter. Now when list " "users by `name`, Keystone will query both local user backend and shadow user " "backend." msgstr "" "[`bug 1738895 `_] `name` " "フィルタで統合ユーザーをリストアップできないバグを修正しました。ユーザーを " "`name` でリストするとき、Keystone はローカルユーザーバックエンドとシャドウ" "ユーザーバックエンドの両方に問い合わせるようになりました。" msgid "" "[`bug 1740951 `_] A new " "method was added that made it so oslo.policy sample generation scripts can " "be used with keystone. The ``oslopolicy-policy-generator`` script will now " "generate a policy file containing overrides and defaults registered in code." msgstr "" "[`bug 1740951 `_] oslo." "policy のサンプル生成スクリプトを Keystone で使用できるようにする新しい方法が" "追加されました。 ``oslopolicy-policy-generator`` は、上書きとコードに登録され" "ているデフォルトを含むポリシーファイルを生成します。" msgid "" "[`bug 1747694 `_] The " "trust API reference declared support for ``page`` and ``per_page`` query " "parameters, when the actual trust API didn't support them. The API reference " "has been updated accordingly." msgstr "" "[`bug 1747694 `_] trust " "API リファレンスは ``page`` と ``per_page`` クエリパラメータのサポートを定義" "していますが、実際の trust API はサポートしていませんでした。API リファレンス" "を実装通りに更新しました。" msgid "" "[`bug 96869 `_] A pair of " "configuration options have been added to the ``[resource]`` section to " "specify a special ``admin`` project: ``admin_project_domain_name`` and " "``admin_project_name``. If these are defined, any scoped token issued for " "that project will have an additional identifier ``is_admin_project`` added " "to the token. This identifier can then be checked by the policy rules in the " "policy files of the services when evaluating access control policy for an " "API. Keystone does not yet support the ability for a project acting as a " "domain to be the admin project. That will be added once the rest of the " "code for projects acting as domains is merged." msgstr "" "[`bug 96869 `_] " "``[resource]`` セクションに、特別な ``admin`` プロジェクト " "``admin_project_domain_name`` と ``admin_project_name`` を指定するための、一" "対の設定オプションが追加されました。これらが定義されている場合、そのプロジェ" "クトに対して発行されたスコープ付きトークンは、トークンに追加された追加の識別" "子 ``is_admin_project`` を持ちます。この識別子は、 API がアクセス制御ポリシー" "を評価するときに、サービスのポリシーファイル内のポリシールールによってチェッ" "クできます。 Keystone は、まだドメインとして動作しているプロジェクトが管理プ" "ロジェクトになる機能をサポートしていません。 これは、ドメインとして動作してい" "るプロジェクトのための残りのコードがマージされると追加されます。" msgid "" "[`bug 968696 `_] The work " "to introduce `system-scope `_ in addition to associating `scope types `_ to operations with ``oslo.policy`` will give project developers the " "ability to fix `bug 968696 `_." msgstr "" "[`bug 968696 `_] `system-" "scope `_ を導入" "する作業、および ``oslo.policy`` による `スコープ種別 `_ の操作への割り当てとともに、プロジェクト開発者が `バグ 968696 " "`_ を修正できるようにしまし" "た。" msgid "" "`[DEFAULT] crypt_strength` is deprecated in favor of `[identity] " "password_hash_rounds`. Note that `[DEFAULT] crypt_strength` is still used " "when `[identity] rolling_upgrade_password_hash_compat` is set to `True`." msgstr "" "`[DEFAULT] crypt_strength` は、 `[identity] password_hash_rounds` のために 非" "推奨となりました。 `[identity] rolling_upgrade_password_hash_compat` が ` " "True` に設定されているとき、 `[DEFAULT] crypt_strength` は引き続き使用される" "ことに注意してください。" msgid "" "`[`blueprint policy-in-code `_] Keystone now supports the ability to register default " "policies in code. This makes policy file maintenance easier by allowing " "duplicated default policies to be removed from the policy file. The only " "policies that should exist within a deployment's policy file after Pike " "should be policy overrides. Note that there is no longer a default value for " "the default rule. That rule is only checked when the more specific rule " "cannot be found, and with policy in code all rules should be found in code " "even if they are not in the policy file. To generate sample policy files " "from default values, prune default policies from existing policy files, or " "familiarize yourself with general policy usage, please see the `usage " "documentation `_ provided in oslo.policy." msgstr "" "`[`blueprint policy-in-code `_] Keystone は、コードでデフォルトポリシーを登録する機能をサ" "ポートしました。 これにより、重複したデフォルトポリシーをポリシーファイルから" "削除することで、ポリシーファイルのメンテナンスが容易になります。Pike 以後のデ" "プロイにあるポリシーファイル内に存在する唯一のポリシーは、ポリシーを上書きさ" "れる必要があります。 デフォルトルールのデフォルト値はもはや存在しないことに注" "意してください。 このルールは、さらに指定されたルールが見つからない場合にのみ" "チェックされ、コードによるのポリシーでは、すべてのルールがポリシーファイル内" "になくてもコード内に見つかるようにする必要があります。デフォルト値からサンプ" "ルポリシーファイルを生成したり、既存のポリシーファイルからデフォルトポリシー" "を削除したり、ポリシーの一般的な使い方を理解するには、oslo.policy で提供され" "ている `usage documentation `_ を参照してください。 " msgid "``add user to group``" msgstr "``ユーザーのグループへの追加``" msgid "``create group``" msgstr "``グループの作成``" msgid "``create user``" msgstr "``ユーザーの作成``" msgid "``delete group``" msgstr "``グループの削除``" msgid "``delete user``" msgstr "``ユーザーの削除``" msgid "``issue_v2_token``" msgstr "``issue_v2_token``" msgid "``issue_v3_token``" msgstr "``issue_v3_token``" msgid "" "``keystone-manage db_sync`` will no longer create the Default domain. This " "domain is used as the domain for any users created using the legacy v2.0 " "API. A default domain is created by ``keystone-manage bootstrap`` and when a " "user or project is created using the legacy v2.0 API." msgstr "" "``keystone-manage db_sync`` は、デフォルトドメインを作成しません。 このドメイ" "ンは、従来の v2.0 API を使用して作成されたすべてのユーザーのドメインとして使" "用されます。 デフォルトのドメインは、 ``keystone-manage bootstrap`` 、あるい" "はユーザーやプロジェクトが従来の v2.0 API を使って作成されたときに作成されま" "す。" msgid "``keystone.common.kvs.backends.inmemdb.MemoryBackend``" msgstr "``keystone.common.kvs.backends.inmemdb.MemoryBackend``" msgid "``keystone.common.kvs.backends.memcached.MemcachedBackend``" msgstr "``keystone.common.kvs.backends.memcached.MemcachedBackend``" msgid "``keystone.token.persistence.backends.kvs.Token``" msgstr "``keystone.token.persistence.backends.kvs.Token``" msgid "``keystone/common/cache/backends/memcache_pool``" msgstr "``keystone/common/cache/backends/memcache_pool``" msgid "``keystone/common/cache/backends/mongo``" msgstr "``keystone/common/cache/backends/mongo``" msgid "``keystone/common/cache/backends/noop``" msgstr "``keystone/common/cache/backends/noop``" msgid "``keystone/contrib/admin_crud``" msgstr "``keystone/contrib/admin_crud``" msgid "``keystone/contrib/endpoint_filter``" msgstr "``keystone/contrib/endpoint_filter``" msgid "``keystone/contrib/federation``" msgstr "``keystone/contrib/federation``" msgid "``keystone/contrib/oauth1``" msgstr "``keystone/contrib/oauth1``" msgid "``keystone/contrib/revoke``" msgstr "``keystone/contrib/revoke``" msgid "``keystone/contrib/simple_cert``" msgstr "``keystone/contrib/simple_cert``" msgid "``keystone/contrib/user_crud``" msgstr "``keystone/contrib/user_crud``" msgid "" "``openstack_user_domain`` and ``openstack_project_domain`` attributes were " "added to SAML assertion in order to map user and project domains, " "respectively." msgstr "" "ユーザーとプロジェクトのドメインをそれぞれマッピングするために、 " "``openstack_user_domain`` と ``openstack_project_domain`` 属性が SAML アサー" "ションに追加されました。" msgid "``remove user from group``" msgstr "``ユーザーのグループからの削除``" msgid "``update group``" msgstr "``グループの更新``" msgid "``update user``" msgstr "``ユーザーの更新``" msgid "``validate_non_persistent_token``" msgstr "``validate_non_persistent_token``" msgid "``validate_v2_token``" msgstr "``validate_v2_token``" msgid "``validate_v3_token``" msgstr "``validate_v3_token``" msgid "all config options under ``[kvs]`` in `keystone.conf`" msgstr "`keystone.conf` の ``[kvs]`` にあるすべての設定オプション" msgid "and will return a list of mappings for a given domain ID." msgstr "指定されたドメイン ID のマッピングの一覧が返されます。" msgid "eq - password expires at the timestamp" msgstr "eq - パスワードはそのタイムスタンプで有効期限が切れます" msgid "gt - password expires after the timestamp" msgstr "gt - パスワードはそのタイムスタンプより後に有効期限が切れます" msgid "gte - password expires at or after the timestamp" msgstr "gte - パスワードはそのタイムスタンプ以降に有効期限が切れます" msgid "lt - password expires before the timestamp" msgstr "lt - パスワードはそのタイムスタンプより前に有効期限が切れます" msgid "lte - password expires at or before timestamp" msgstr "lte - パスワードはそのタイムスタンプ以降に有効期限が切れます" msgid "neq - password expires not at the timestamp" msgstr "neq - パスワードはそのタイムスタンプではないときに有効期限が切れます" msgid "" "stats_monitoring and stats_reporting paste filters have been removed, so " "references to it must be removed from the ``keystone-paste.ini`` " "configuration file." msgstr "" "stats_monitoring と stats_reporting の貼り付けフィルタは削除されているので、 " "``keystone-paste.ini`` 設定ファイルから参照を削除する必要があります。" msgid "the config option ``[memcached] servers`` in `keystone.conf`" msgstr "`keystone.conf` の ``[memcached] servers`` の設定オプション" msgid "to::" msgstr "変更後::" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.4301152 keystone-26.0.0/releasenotes/source/locale/ko_KR/0000775000175000017500000000000000000000000021720 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000023505 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/locale/ko_KR/LC_MESSAGES/releasenotes.po0000664000175000017500000001164500000000000026545 0ustar00zuulzuul00000000000000# Sungjin Kang , 2017. #zanata # Ian Y. Choi , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: Keystone Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-05-16 18:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-14 07:39+0000\n" "Last-Translator: Ian Y. Choi \n" "Language-Team: Korean (South Korea)\n" "Language: ko_KR\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "'/' and ',' are not allowed to be in a tag" msgstr "'/' 및 ',' 는 태그 내에서 허용하지 않습니다" msgid "" "**Experimental** - Domain specific configuration options can be stored in " "SQL instead of configuration files, using the new REST APIs." msgstr "" "**실험적 기능** - 도메인에 특화된 구성 옵션을 구성 파일 대신 SQL에 새로운 " "REST API를 사용하여 저장 가능합니다." msgid "" "**Experimental** - Keystone now supports tokenless authorization with X.509 " "SSL client certificate." msgstr "" "**실험적 기능** - Keystone이 이제 X.509 SSL 클라이언트 인증서를 사용한 토큰" "이 없는 인증 방식 (tokenless authorization)을 지원합니다." msgid "10.0.0" msgstr "10.0.0" msgid "10.0.1" msgstr "10.0.1" msgid "10.0.3" msgstr "10.0.3" msgid "11.0.0" msgstr "11.0.0" msgid "11.0.1" msgstr "11.0.1" msgid "11.0.3" msgstr "11.0.3" msgid "12.0.0" msgstr "12.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "8.1.0" msgstr "8.1.0" msgid "9.0.0" msgstr "9.0.0" msgid "9.2.0" msgstr "9.2.0" msgid "Bug Fixes" msgstr "버그 수정" msgid "Critical Issues" msgstr "치명적인 이슈" msgid "Current Series Release Notes" msgstr "현재 시리즈 릴리즈 노트" msgid "Deprecation Notes" msgstr "지원 종료된 기능 노트" msgid "" "For additional details see: `event notifications `_" msgstr "" "추가적으로 자세한 사항을 확인하려면 : `event notifications `_" msgid "Keystone Release Notes" msgstr "Keystone 릴리즈 노트" msgid "Liberty Series Release Notes" msgstr "Liberty 시리즈 릴리즈 노트" msgid "Mitaka Series Release Notes" msgstr "Mitaka 시리즈 릴리즈 노트" msgid "New Features" msgstr "새로운 기능" msgid "Newton Series Release Notes" msgstr "Newton 시리즈 릴리즈 노트" msgid "Ocata Series Release Notes" msgstr "Ocata 시리즈 릴리즈 노트" msgid "Other Notes" msgstr "기타 기능" msgid "Pike Series Release Notes" msgstr "Pike 시리즈 릴리즈 노트" msgid "Queens Series Release Notes" msgstr "Queens 시리즈 릴리즈 노트" msgid "Security Issues" msgstr "보안 이슈" msgid "To::" msgstr "To::" msgid "Upgrade Notes" msgstr "업그레이드 노트" msgid "``add user to group``" msgstr "``add user to group``" msgid "``create group``" msgstr "``create group``" msgid "``create user``" msgstr "``create user``" msgid "``delete group``" msgstr "``delete group``" msgid "``delete user``" msgstr "``delete user``" msgid "``issue_v2_token``" msgstr "``issue_v2_token``" msgid "``issue_v3_token``" msgstr "``issue_v3_token``" msgid "``keystone.common.kvs.backends.inmemdb.MemoryBackend``" msgstr "``keystone.common.kvs.backends.inmemdb.MemoryBackend``" msgid "``keystone.common.kvs.backends.memcached.MemcachedBackend``" msgstr "``keystone.common.kvs.backends.memcached.MemcachedBackend``" msgid "``keystone.token.persistence.backends.kvs.Token``" msgstr "``keystone.token.persistence.backends.kvs.Token``" msgid "``keystone/common/cache/backends/memcache_pool``" msgstr "``keystone/common/cache/backends/memcache_pool``" msgid "``keystone/common/cache/backends/mongo``" msgstr "``keystone/common/cache/backends/mongo``" msgid "``keystone/common/cache/backends/noop``" msgstr "``keystone/common/cache/backends/noop``" msgid "``keystone/contrib/admin_crud``" msgstr "``keystone/contrib/admin_crud``" msgid "``keystone/contrib/endpoint_filter``" msgstr "``keystone/contrib/endpoint_filter``" msgid "``keystone/contrib/federation``" msgstr "``keystone/contrib/federation``" msgid "``keystone/contrib/oauth1``" msgstr "``keystone/contrib/oauth1``" msgid "``keystone/contrib/revoke``" msgstr "``keystone/contrib/revoke``" msgid "``keystone/contrib/simple_cert``" msgstr "``keystone/contrib/simple_cert``" msgid "``keystone/contrib/user_crud``" msgstr "``keystone/contrib/user_crud``" msgid "``remove user from group``" msgstr "``remove user from group``" msgid "``update group``" msgstr "``update group``" msgid "``update user``" msgstr "``update user``" msgid "``validate_non_persistent_token``" msgstr "``validate_non_persistent_token``" msgid "``validate_v2_token``" msgstr "``validate_v2_token``" msgid "``validate_v3_token``" msgstr "``validate_v3_token``" msgid "to::" msgstr "to::" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/mitaka.rst0000664000175000017500000000021600000000000021453 0ustar00zuulzuul00000000000000============================= Mitaka Series Release Notes ============================= .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/newton.rst0000664000175000017500000000023200000000000021515 0ustar00zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000021270 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/pike.rst0000664000175000017500000000124200000000000021135 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike :ignore-notes: bp-allow-expired-f5d845b9601bc1ef.yaml, bp-shadow-mapping-06fc7c71a401d707.yaml, bp-support-federated-attr-94084d4073f50280.yaml, integrate-osprofiler-ad0e16a542b12899.yaml, bug-1561054-dbe88b552a936a05.yaml, bug-1642687-5497fb56fe86806d.yaml, bug-1642687-c7ab1c9be152db20.yaml, bug-1642687-5497fb56fe86806d.yaml, bug-1659995-f3e716de743b7291.yaml, bug-1636950-8fa1a47fce440977.yaml, bug-1659995-f3e716de743b7291.yaml, bug-1652012-b3aea7c0d5affdb6.yaml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000021503 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000021330 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000021323 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000021327 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000122600000000000022336 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: :ignore-notes: bug-1806762-c3bfc71cb9bb94f3.yaml, bug-1817313-c11481e6eed29ec2.yaml, bug-1806762-2092fee9f6c87dc3.yaml, bug-1805369-ed98d3fcfafb5c43.yaml, scope-and-default-roles-a733c235731bb558.yaml, bug-1806762-09f414995924db23.yaml, bug-1801873-0eb9a5ec3e801190.yaml, bug-1750669-dfce859550126f03.yaml, bug-1750673-b53f74944d767ae9.yaml, bug-1748027-decc2e11154b97cf.yaml, bug-1750660-e2a360ddd6790fc4.yaml, bug-1819036-e2d24655c70d0aad.yaml, bug-1805406-252b45d443af20b3.yaml, ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021532 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/victoria.rst0000664000175000017500000000022000000000000022020 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: unmaintained/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000021400000000000021636 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: unmaintained/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/xena.rst0000664000175000017500000000020000000000000021131 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: unmaintained/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/yoga.rst0000664000175000017500000000020000000000000021135 0ustar00zuulzuul00000000000000========================= Yoga Series Release Notes ========================= .. release-notes:: :branch: unmaintained/yoga ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/releasenotes/source/zed.rst0000664000175000017500000000017400000000000020772 0ustar00zuulzuul00000000000000======================== Zed Series Release Notes ======================== .. release-notes:: :branch: unmaintained/zed ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/reno.yaml0000664000175000017500000000023000000000000015305 0ustar00zuulzuul00000000000000--- # Ignore the kilo-eol tag because that branch does not work with reno # and contains no release notes. closed_branch_tag_re: "(.+)(?=2.0.0 # Apache-2.0 WebOb>=1.7.1 # MIT Flask!=0.11,>=1.0.2 # BSD Flask-RESTful>=0.3.5 # BSD cryptography>=2.7 # BSD/Apache-2.0 SQLAlchemy>=1.4.0 # MIT stevedore>=1.20.0 # Apache-2.0 passlib>=1.7.0 # BSD python-keystoneclient>=3.8.0 # Apache-2.0 keystonemiddleware>=7.0.0 # Apache-2.0 bcrypt>=3.1.3 # Apache-2.0 scrypt>=0.8.0 # BSD oslo.cache>=1.26.0 # Apache-2.0 oslo.config>=6.8.0 # Apache-2.0 oslo.context>=2.22.0 # Apache-2.0 oslo.messaging>=5.29.0 # Apache-2.0 oslo.db>=6.0.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.44.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=3.10.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.utils>=3.33.0 # Apache-2.0 oauthlib>=0.6.2 # BSD pysaml2>=5.0.0 PyJWT>=1.6.1 # MIT dogpile.cache>=1.0.2 # BSD jsonschema>=3.2.0 # MIT pycadf!=2.0.0,>=1.1.0 # Apache-2.0 msgpack>=0.5.0 # Apache-2.0 osprofiler>=1.4.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6221123 keystone-26.0.0/setup.cfg0000664000175000017500000001144000000000000015304 0ustar00zuulzuul00000000000000[metadata] name = keystone summary = OpenStack Identity description_file = README.rst author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/keystone/latest python_requires = >=3.8 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 [files] data_files = etc/keystone = etc/sso_callback_template.html packages = keystone [extras] ldap = python-ldap>=3.0.0 # PSF ldappool>=2.3.1 # MPL memcache = python-memcached>=1.56 # PSF [entry_points] console_scripts = keystone-manage = keystone.cmd.manage:main keystone-status = keystone.cmd.status:main wsgi_scripts = keystone-wsgi-admin = keystone.server.wsgi:initialize_admin_application keystone-wsgi-public = keystone.server.wsgi:initialize_public_application keystone.assignment = sql = keystone.assignment.backends.sql:Assignment keystone.auth.application_credential = default = keystone.auth.plugins.application_credential:ApplicationCredential keystone.auth.external = default = keystone.auth.plugins.external:DefaultDomain DefaultDomain = keystone.auth.plugins.external:DefaultDomain Domain = keystone.auth.plugins.external:Domain keystone.auth.kerberos = default = keystone.auth.plugins.external:KerberosDomain keystone.auth.oauth1 = default = keystone.auth.plugins.oauth1:OAuth keystone.auth.openid = default = keystone.auth.plugins.mapped:Mapped keystone.auth.password = default = keystone.auth.plugins.password:Password keystone.auth.saml2 = default = keystone.auth.plugins.mapped:Mapped keystone.auth.token = default = keystone.auth.plugins.token:Token keystone.auth.totp = default = keystone.auth.plugins.totp:TOTP keystone.auth.x509 = default = keystone.auth.plugins.mapped:Mapped keystone.auth.mapped = default = keystone.auth.plugins.mapped:Mapped keystone.catalog = sql = keystone.catalog.backends.sql:Catalog templated = keystone.catalog.backends.templated:Catalog keystone.credential = sql = keystone.credential.backends.sql:Credential keystone.credential.provider = fernet = keystone.credential.providers.fernet:Provider keystone.identity = ldap = keystone.identity.backends.ldap:Identity sql = keystone.identity.backends.sql:Identity keystone.identity.id_generator = sha256 = keystone.identity.id_generators.sha256:Generator keystone.identity.id_mapping = sql = keystone.identity.mapping_backends.sql:Mapping keystone.identity.shadow_users = sql = keystone.identity.shadow_backends.sql:ShadowUsers keystone.policy = rules = keystone.policy.backends.rules:Policy sql = keystone.policy.backends.sql:Policy keystone.resource = sql = keystone.resource.backends.sql:Resource keystone.resource.domain_config = sql = keystone.resource.config_backends.sql:DomainConfig keystone.role = sql = keystone.assignment.role_backends.sql:Role keystone.token.provider = fernet = keystone.token.providers.fernet:Provider jws = keystone.token.providers.jws:Provider keystone.receipt.provider = fernet = keystone.receipt.providers.fernet:Provider keystone.trust = sql = keystone.trust.backends.sql:Trust keystone.unified_limit = sql = keystone.limit.backends.sql:UnifiedLimit keystone.endpoint_filter = sql = keystone.catalog.backends.sql:Catalog keystone.endpoint_policy = sql = keystone.endpoint_policy.backends.sql:EndpointPolicy keystone.federation = sql = keystone.federation.backends.sql:Federation keystone.oauth1 = sql = keystone.oauth1.backends.sql:OAuth1 keystone.revoke = sql = keystone.revoke.backends.sql:Revoke keystone.application_credential = sql = keystone.application_credential.backends.sql:ApplicationCredential keystone.unified_limit.model = flat = keystone.limit.models.flat:FlatModel strict_two_level = keystone.limit.models.strict_two_level:StrictTwoLevelModel oslo.config.opts = keystone = keystone.conf.opts:list_opts oslo.config.opts.defaults = keystone = keystone.conf:set_external_opts_defaults oslo.policy.policies = keystone = keystone.common.policies:list_rules oslo.policy.enforcer = keystone = keystone.common.rbac_enforcer.policy:get_enforcer keystone.server_middleware = cors = oslo_middleware:CORS sizelimit = oslo_middleware:RequestBodySizeLimiter http_proxy_to_wsgi = oslo_middleware:HTTPProxyToWSGI osprofiler = osprofiler.web:WsgiMiddleware request_id = oslo_middleware:RequestId debug = oslo_middleware:Debug [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/setup.py0000664000175000017500000000126000000000000015174 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/test-requirements.txt0000664000175000017500000000112700000000000017725 0ustar00zuulzuul00000000000000hacking flake8-docstrings bashate~=2.1.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 freezegun>=0.3.6 # Apache-2.0 # Include drivers for opportunistic testing. oslo.db[fixtures,mysql,postgresql]>=6.0.0 # Apache-2.0 # computes code coverage percentages coverage!=4.4,>=4.0 # Apache-2.0 # fixture stubbing fixtures>=3.0.0 # Apache-2.0/BSD # xml parsing lxml>=4.5.0 # BSD oslotest>=3.2.0 # Apache-2.0 # test wsgi apps without starting an http server WebTest>=2.0.27 # MIT testtools>=2.2.0 # MIT tempest>=17.1.0 # Apache-2.0 # Functional tests. requests>=2.14.2 # Apache-2.0 bandit>=1.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1727867786.6181123 keystone-26.0.0/tools/0000775000175000017500000000000000000000000014623 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/tools/cover.sh0000664000175000017500000000465700000000000016311 0ustar00zuulzuul00000000000000#!/bin/bash # # Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Code from https://github.com/openstack/rally/blob/master/tests/ci/cover.sh ALLOWED_EXTRA_MISSING=0 show_diff () { head -1 $1 diff -U 0 $1 $2 | sed 1,2d } # Stash uncommitted changes, checkout master and save coverage report uncommited=$(git status --porcelain | grep -v "^??") [[ -n $uncommited ]] && git stash > /dev/null git checkout HEAD^ baseline_report=$(mktemp -t keystone_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" coverage report > $baseline_report cat $baseline_report baseline_missing=$(awk 'END { print $3 }' $baseline_report) # Checkout back and unstash uncommitted changes (if any) git checkout - [[ -n $uncommited ]] && git stash pop > /dev/null # Generate and save coverage report current_report=$(mktemp -t keystone_coverageXXXXXXX) find . -type f -name "*.pyc" -delete && python setup.py testr --coverage --testr-args="$*" coverage report > $current_report current_missing=$(awk 'END { print $3 }' $current_report) # Show coverage details allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING)) echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" echo "Missing lines in master : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ $allowed_missing -ge $current_missing ]; then if [ $baseline_missing -lt $current_missing ]; then show_diff $baseline_report $current_report echo "I believe you can cover all your code with 100% coverage!" else echo "Thank you! You are awesome! Keep writing unit tests! :)" fi exit_code=0 else show_diff $baseline_report $current_report echo "Please write more unit tests, we should keep our test coverage :( " exit_code=1 fi rm $baseline_report $current_report exit $exit_code././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/tools/fast8.sh0000775000175000017500000000120500000000000016205 0ustar00zuulzuul00000000000000#!/bin/bash NUM_COMMITS=${FAST8_NUM_COMMITS:-1} if [[ $NUM_COMMITS = "smart" ]]; then # Run on all commits not submitted yet # (sort of -- only checks vs. "master" since this is easy) NUM_COMMITS=$(git cherry master | wc -l) fi echo "Checking last $NUM_COMMITS commits." cd $(dirname "$0")/.. CHANGED=$(git diff --name-only HEAD~${NUM_COMMITS} | tr '\n' ' ') # Skip files that don't exist # (have been git rm'd) CHECK="" for FILE in $CHANGED; do if [ -f "$FILE" ]; then CHECK="$CHECK $FILE" fi done diff -u --from-file /dev/null $CHECK | flake8 --diff --ignore=D100,D101,D102,D103,D104,E305,E402,W503,W504,W605 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/tools/sample_data.sh0000775000175000017500000002116000000000000017434 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Sample initial data for Keystone using python-openstackclient # # This script is based on the original DevStack keystone_data.sh script. # # It demonstrates how to bootstrap Keystone with an administrative user # using the `keystone-manage bootstrap` command. # # Disable creation of endpoints by setting DISABLE_ENDPOINTS environment variable. # Use this with the Catalog Templated backend. # # Project User Roles # ------------------------------------------------------- # demo admin admin # service glance service # service nova service # service cinder service # service swift service # service neutron service # By default, passwords used are those in the OpenStack Install and Deploy Manual. # One can override these (publicly known, and hence, insecure) passwords by setting the appropriate # environment variables. A common default password for all the services can be used by # setting the "SERVICE_PASSWORD" environment variable. # Test to verify that the openstackclient is installed, if not exit type openstack >/dev/null 2>&1 || { echo >&2 "openstackclient is not installed. Please install it to use this script. Aborting." exit 1 } ADMIN_PASSWORD=${ADMIN_PASSWORD:-secret} NOVA_PASSWORD=${NOVA_PASSWORD:-${SERVICE_PASSWORD:-nova}} GLANCE_PASSWORD=${GLANCE_PASSWORD:-${SERVICE_PASSWORD:-glance}} CINDER_PASSWORD=${CINDER_PASSWORD:-${SERVICE_PASSWORD:-cinder}} SWIFT_PASSWORD=${SWIFT_PASSWORD:-${SERVICE_PASSWORD:-swiftpass}} NEUTRON_PASSWORD=${NEUTRON_PASSWORD:-${SERVICE_PASSWORD:-neutron}} CONTROLLER_PUBLIC_ADDRESS=${CONTROLLER_PUBLIC_ADDRESS:-localhost} CONTROLLER_ADMIN_ADDRESS=${CONTROLLER_ADMIN_ADDRESS:-localhost} CONTROLLER_INTERNAL_ADDRESS=${CONTROLLER_INTERNAL_ADDRESS:-localhost} KEYSTONE_PORT=${KEYSTONE_PORT:-5000} TOOLS_DIR=$(cd $(dirname "$0") && pwd) KEYSTONE_CONF=${KEYSTONE_CONF:-/etc/keystone/keystone.conf} if [[ ! -r "$KEYSTONE_CONF" ]]; then if [[ -r "$TOOLS_DIR/../etc/keystone.conf" ]]; then # assume git checkout KEYSTONE_CONF="$TOOLS_DIR/../etc/keystone.conf" else KEYSTONE_CONF="" fi fi export OS_USERNAME=admin export OS_PASSWORD=$ADMIN_PASSWORD export OS_PROJECT_NAME=admin export OS_USER_DOMAIN_ID=default export OS_PROJECT_DOMAIN_ID=default export OS_IDENTITY_API_VERSION=3 export OS_AUTH_URL=http://$CONTROLLER_PUBLIC_ADDRESS:${KEYSTONE_PORT}/v3 export OS_BOOTSTRAP_PASSWORD=$ADMIN_PASSWORD export OS_BOOTSTRAP_REGION_ID=RegionOne export OS_BOOTSTRAP_ADMIN_URL="http://$CONTROLLER_PUBLIC_ADDRESS:${KEYSTONE_PORT}/v3" export OS_BOOTSTRAP_PUBLIC_URL="http://$CONTROLLER_ADMIN_ADDRESS:${KEYSTONE_PORT}/v3" export OS_BOOTSTRAP_INTERNAL_URL="http://$CONTROLLER_INTERNAL_ADDRESS:${KEYSTONE_PORT}/v3" keystone-manage bootstrap # # Default tenant # openstack project create demo \ --description "Default Tenant" # # Service tenant # openstack role create service openstack project create service \ --description "Service Tenant" openstack user create glance --project service\ --password "${GLANCE_PASSWORD}" openstack role add --user glance \ --project service \ service openstack user create nova --project service\ --password "${NOVA_PASSWORD}" openstack role add --user nova \ --project service \ service openstack user create cinder --project service \ --password "${CINDER_PASSWORD}" openstack role add --user cinder \ --project service \ service openstack user create swift --project service \ --password "${SWIFT_PASSWORD}" \ openstack role add --user swift \ --project service \ service openstack user create neutron --project service \ --password "${NEUTRON_PASSWORD}" \ openstack role add --user neutron \ --project service \ service # # Nova service # openstack service create --name=nova_legacy \ --description="Nova Compute Service (Legacy 2.0)" \ compute_legacy if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ compute public "http://$CONTROLLER_PUBLIC_ADDRESS:8774/v2/\$(project_id)s" openstack endpoint create --region RegionOne \ compute admin "http://$CONTROLLER_ADMIN_ADDRESS:8774/v2/\$(project_id)s" openstack endpoint create --region RegionOne \ compute internal "http://$CONTROLLER_INTERNAL_ADDRESS:8774/v2/\$(project_id)s" fi openstack service create --name=nova \ --description="Nova Compute Service" \ compute if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ compute public "http://$CONTROLLER_PUBLIC_ADDRESS:8774/v2.1" openstack endpoint create --region RegionOne \ compute admin "http://$CONTROLLER_ADMIN_ADDRESS:8774/v2.1" openstack endpoint create --region RegionOne \ compute internal "http://$CONTROLLER_INTERNAL_ADDRESS:8774/v2.1" fi # # Volume service # openstack service create --name=cinderv2 \ --description="Cinder Volume Service V2" \ volumev2 if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ volume public "http://$CONTROLLER_PUBLIC_ADDRESS:8776/v2/\$(project_id)s" openstack endpoint create --region RegionOne \ volume admin "http://$CONTROLLER_ADMIN_ADDRESS:8776/v2/\$(project_id)s" openstack endpoint create --region RegionOne \ volume internal "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v2/\$(project_id)s" fi openstack service create --name=cinderv3 \ --description="Cinder Volume Service V3" \ volumev3 if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ volume public "http://$CONTROLLER_PUBLIC_ADDRESS:8776/v3/\$(project_id)s" openstack endpoint create --region RegionOne \ volume admin "http://$CONTROLLER_ADMIN_ADDRESS:8776/v3/\$(project_id)s" openstack endpoint create --region RegionOne \ volume internal "http://$CONTROLLER_INTERNAL_ADDRESS:8776/v3/\$(project_id)s" fi # # Image service # openstack service create --name=glance \ --description="Glance Image Service" \ image if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ image public "http://$CONTROLLER_PUBLIC_ADDRESS:9292" openstack endpoint create --region RegionOne \ image admin "http://$CONTROLLER_ADMIN_ADDRESS:9292" openstack endpoint create --region RegionOne \ image internal "http://$CONTROLLER_INTERNAL_ADDRESS:9292" fi # # Swift service # openstack service create --name=swift \ --description="Swift Object Storage Service" \ object-store if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ object-store public "http://$CONTROLLER_PUBLIC_ADDRESS:8080/v1/AUTH_\$(project_id)s" openstack endpoint create --region RegionOne \ object-store admin "http://$CONTROLLER_ADMIN_ADDRESS:8080/v1" openstack endpoint create --region RegionOne \ object-store internal "http://$CONTROLLER_INTERNAL_ADDRESS:8080/v1/AUTH_\$(project_id)s" fi # # Neutron service # openstack service create --name=neutron \ --description="Neutron Network Service" \ network if [[ -z "$DISABLE_ENDPOINTS" ]]; then openstack endpoint create --region RegionOne \ network public "http://$CONTROLLER_PUBLIC_ADDRESS:9696" openstack endpoint create --region RegionOne \ network admin "http://$CONTROLLER_ADMIN_ADDRESS:9696" openstack endpoint create --region RegionOne \ network internal "http://$CONTROLLER_INTERNAL_ADDRESS:9696" fi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/tools/test-setup.sh0000775000175000017500000000413600000000000017303 0ustar00zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW'; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;" # Bump the max_connections limit sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " SET GLOBAL max_connections = 1024;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # The root password for the PostgreSQL database; pass it in via # POSTGRES_ROOT_PW. DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1727867754.0 keystone-26.0.0/tox.ini0000664000175000017500000001433600000000000015005 0ustar00zuulzuul00000000000000[tox] minversion = 3.18.0 envlist = py3,pep8,api-ref,docs,genconfig,genpolicy,releasenotes,protection ignore_basepython_conflict = true [testenv] basepython = python3 usedevelop = True setenv = PYTHONDONTWRITEBYTECODE=1 # TODO(stephenfin): Remove once we bump our upper-constraint to SQLAlchemy 2.0 SQLALCHEMY_WARN_20=1 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt .[ldap,memcache] commands = stestr run {posargs} allowlist_externals = bash passenv = http_proxy,HTTP_PROXY,https_proxy,HTTPS_PROXY,no_proxy,NO_PROXY,PBR_VERSION [testenv:pep8] deps = {[testenv]deps} pre-commit commands = pre-commit run --all-files --show-diff-on-failure # Run bash8 during pep8 runs to ensure violations are caught by # the check and gate queues bashate devstack/plugin.sh [testenv:fast8] deps = {[testenv:pep8]deps} commands = {toxinidir}/tools/fast8.sh passenv = FAST8_NUM_COMMITS allowlist_externals = {toxinidir}/tools/fast8.sh [testenv:bandit] # NOTE(browne): This is required for the integration test job of the bandit # project. Please do not remove. deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/requirements.txt commands = bandit -r keystone -x 'keystone/tests/*' [testenv:cover] # Also do not run test_coverage_ext tests while gathering coverage as those # tests conflict with coverage. setenv = {[testenv]setenv} PYTHON=coverage run --source keystone --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml [testenv:patch_cover] commands = bash tools/cover.sh [testenv:venv] commands = {posargs} [testenv:debug] commands = oslo_debug_helper {posargs} passenv = KSTEST_* [testenv:functional] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/test-requirements.txt setenv = OS_TEST_PATH=./keystone/tests/functional commands = stestr run {posargs} stestr slowest passenv = KSTEST_* [flake8] application-import-names = keystone import-order-style = pep8 filename = *.py,keystone-manage show-source = true enable-extensions = H203,H904 # D100: Missing docstring in public module # D101: Missing docstring in public class # D102: Missing docstring in public method # D103: Missing docstring in public function # D104: Missing docstring in public package # D106: Missing docstring in public nested class # D107: Missing docstring in __init__ # D203: 1 blank line required before class docstring (deprecated in pep257) # D401: First line should be in imperative mood; try rephrasing # E402: module level import not at top of file # H211: Use assert{Is,IsNot}instance # H214: Use assertIn/NotIn(A, B) rather than assertTrue/False(A in/not in B) when checking collection contents. # W503: line break before binary operator # W504: line break after binary operator ignore = D100,D101,D102,D103,D104,D106,D107,E203,D203,D401,E402,H211,H214,W503,W504 exclude = .venv,.git,.tox,build,dist,*lib/python*,*egg,tools,vendor,.update-venv,*.ini,*.po,*.pot max-complexity = 24 per-file-ignores = # URL lines too long keystone/common/password_hashing.py: E501 keystone/api/auth.py: E501 keystone/api/users.py: E501 keystone/federation/utils.py: E501 keystone/assignment/core.py: E501 keystone/common/policies/endpoint_group.py: E501 keystone/exception.py: E501 keystone/resource/core.py: E501 keystone/tests/protection/v3/test_credentials.py: E501 keystone/tests/protection/v3/test_grants.py: E501 keystone/tests/protection/v3/test_groups.py: E501 keystone/tests/unit/assignment/test_backends.py: E501 keystone/tests/unit/ksfixtures/__init__.py: H301,F401,E501 keystone/tests/unit/test_associate_project_endpoint_extension.py: E501 keystone/tests/unit/test_backend_ldap.py: E501 keystone/tests/unit/test_cli.py: E501 keystone/tests/unit/test_versions.py: E501 keystone/tests/unit/test_v3_filters.py: E501 keystone/token/providers/jws/core.py: E501 [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} -r{toxinidir}/doc/requirements.txt .[ldap,memcache] commands= bash -c "rm -rf doc/build" bash -c "rm -rf doc/source/api" sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html # FIXME(gyee): we need to pre-create the doc/build/pdf/_static directory as a # workaround because sphinx_feature_classification.support_matrix extension # is operating under the assumption that the _static directory already exist # and trying to copy support-matrix.css into it. We need to remove # the workaround after this patch has merged: # https://review.opendev.org/#/c/679860 [testenv:pdf-docs] deps = {[testenv:docs]deps} allowlist_externals = make mkdir rm commands = rm -rf doc/build/pdf mkdir -p doc/build/pdf/_static sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:releasenotes] deps = {[testenv:docs]deps} commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:api-ref] deps = {[testenv:docs]deps} commands = bash -c "rm -rf api-ref/build" sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=config-generator/keystone.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file config-generator/keystone-policy-generator.conf [hacking] import_exceptions = keystone.i18n [flake8:local-plugins] extension = K001 = checks:CheckForMutableDefaultArgs K002 = checks:block_comments_begin_with_a_space K005 = checks:CheckForTranslationIssues K008 = checks:dict_constructor_with_sequence_copy paths = ./keystone/tests/hacking [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files. deps = bindep commands = bindep test [testenv:protection] commands = stestr run --test-path=./keystone/tests/protection {posargs}